Интерпретация исходного кода pytorch Vgg

PyTorch
Интерпретация исходного кода pytorch Vgg

Интерпретация реализации официальной сети vgg от pytorch в реальном бою

Интерпретируйте его по разделам:

1. Пакет руководства

Обратите внимание, что в родственных ссылках мы используем пример .utils: from .utils import load_state_dict_from_url

import torch
import torch.nn as nn
from .utils import load_state_dict_from_url
from typing import Union, List, Dict, Any, cast

2. Запрещенные каталоги, на которые хотят ссылаться другие проекты

Можно ссылаться только на те файлы, которые находятся в этом файле.

__all__ = [
    'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
    'vgg19_bn', 'vgg19',
]

3. Параметр сетевого пути сетевой модели

Эта часть предназначена для перечисления сетевого пути предварительно обученной модели, мы можем использовать load_state_dict_from_url для загрузки предварительно обученной модели в сети.

model_urls = {
    'vgg11': 'https://download.pytorch.org/models/vgg11-8a719046.pth',
    'vgg13': 'https://download.pytorch.org/models/vgg13-19584684.pth',
    'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
    'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
    'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
    'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
    'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
    'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}

4. Вгг核心реализация сети

Эта часть фактически представляет собой процесс построения стандартной сети.

  • Первым шагом является инициализация __init__() (начальное назначение элементов класса, построение базовых наборов сети)
  • Второй шаг - построение прямой сети forward() (ввод x после прохождения каждого слоя сети и, наконец, получение выходных данных сети)
class VGG(nn.Module):

    # 初始化__init__()(类元素的初始赋值,基础网络集合的构建)
    def __init__(self,features: nn.Module,num_classes: int = 1000,init_weights: bool = True):
        # 固定写法,继承父类的初始化方法
        super(VGG, self).__init__()
        # 特征提取层
        self.features = features
        # 经过池化层使得特征提取的输出和分类层的输入吻合
        self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
        # 使用Sequential直接对多层进行构建成一个整体的部分<这是Sequential的第一种也是最常用的方式>
        self.classifier = nn.Sequential(
            nn.Linear(512 * 7 * 7, 4096),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(4096, num_classes),
        )
        # 存在初始化参数的话,完成参数的初始化步骤
        if init_weights:
            self._initialize_weights()

    # 前向网络构建forward()(输入时x,经过网络的各个层之后,最终得到经过网络的输出)
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # 对输入的x,经过各个层之后,最终将得到的结果进行输出
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x

    def _initialize_weights(self):
        # 初始化参数,是卷积层的话,我们使用凯明初始化,BN层的话,我们使用常数初始化,Linear层的话,我们使用高斯初始化
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)

5. Сделайте модуль пула свертки vgg

Вся часть состоит в том, чтобы сделать модуль пула свертки vgg.Здесь мы используем метод Sequential(* list) для создания модуля

def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:
    # 保留构建层的list
    layers: List[nn.Module] = []
    in_channels = 3
    # 将我们构建的都放在cfg配置文件中,然后按照设计图中的方式构建A,B,D,E的vgg的特征提取的部分
    for v in cfg:
        # M maxpoolings最大池化层
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        # 卷积层:注意下输入的in_channels
        else:
            v = cast(int, v)
            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layers)

6. Строительные параметры различных сетей

Конфигурационный файл в виде словаря представляет собой параметры построения, соответствующие режимам A, B, D и E соответственно. 15.png

cfgs: Dict[str, List[Union[str, int]]] = {
    'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
    'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}

7. Процесс предварительной сборки

def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained: bool, progress: bool, **kwargs: Any) -> VGG:
    # 是否需要网络上的预训练的参数集合,需要的话就将参数初始化的部分禁掉
    if pretrained:
        kwargs['init_weights'] = False
    # 构建网络模型将特征提取层和分类的层组合到一起
    model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
    # 需要预训练的参数,我们使用load_state_dict_from_url从对应的网站上下载参数集合
    if pretrained:
        # 从对应的网站上下载参数集合,progress参数的设置是:是否使用进度条
        state_dict = load_state_dict_from_url(model_urls[arch],progress=progress)
        # 模型加载参数
        model.load_state_dict(state_dict)
    return model

8. Завершить построение указанной сети Vgg

Далее идет построение Vgg с различной глубиной сети (то же ниже)

def vgg11(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
    r"""VGG 11-layer model (configuration "A") from
    `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
    The required minimum input size of the model is 32x32.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    # 名称,加载哪个部分的参数,是否需要batch_norm,预训练参数,进度条,其他参数
    return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)

Общий исходный код выглядит следующим образом

import torch
import torch.nn as nn
from .utils import load_state_dict_from_url
from typing import Union, List, Dict, Any, cast


__all__ = [
    'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
    'vgg19_bn', 'vgg19',
]

# 这个部分是列出预训练模型的网络路径,我们可以使用load_state_dict_from_url加载网络上的预训练模型
model_urls = {
    'vgg11': 'https://download.pytorch.org/models/vgg11-8a719046.pth',
    'vgg13': 'https://download.pytorch.org/models/vgg13-19584684.pth',
    'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
    'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
    'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
    'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
    'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
    'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}

# 这个部分其实就是标准的网络的构建的过程,第一步初始化__init__()(类元素的初始赋值,基础网络集合的构建),第二步前向网络构建forward()(输入时x,经过网络的各个层之后,最终得到经过网络的输出)
class VGG(nn.Module):

    # 初始化__init__()(类元素的初始赋值,基础网络集合的构建)
    def __init__(self,features: nn.Module,num_classes: int = 1000,init_weights: bool = True):
        # 固定写法,继承父类的初始化方法
        super(VGG, self).__init__()
        # 特征提取层
        self.features = features
        # 经过池化层使得特征提取的输出和分类层的输入吻合
        self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
        # 使用Sequential直接对多层进行构建成一个整体的部分<这是Sequential的第一种也是最常用的方式>
        self.classifier = nn.Sequential(
            nn.Linear(512 * 7 * 7, 4096),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(4096, num_classes),
        )
        # 存在初始化参数的话,完成参数的初始化步骤
        if init_weights:
            self._initialize_weights()

    # 前向网络构建forward()(输入时x,经过网络的各个层之后,最终得到经过网络的输出)
    def forward(self, x: torch.Tensor) -> torch.Tensor:
        # 对输入的x,经过各个层之后,最终将得到的结果进行输出
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.classifier(x)
        return x

    def _initialize_weights(self):
        # 初始化参数,是卷积层的话,我们使用凯明初始化,BN层的话,我们使用常数初始化,Linear层的话,我们使用高斯初始化
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.constant_(m.bias, 0)

# 整个的这个部分是制作vgg的卷积池化模块,这个地方我们使用的方式Sequential(*list)的方式进行模块的构建
def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:
    # 保留构建层的list
    layers: List[nn.Module] = []
    in_channels = 3
    # 将我们构建的都放在cfg配置文件中,然后按照设计图中的方式构建A,B,D,E的vgg的特征提取的部分
    for v in cfg:
        # M maxpoolings最大池化层
        if v == 'M':
            layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
        # 卷积层:注意下输入的in_channels
        else:
            v = cast(int, v)
            conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
            if batch_norm:
                layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
            else:
                layers += [conv2d, nn.ReLU(inplace=True)]
            in_channels = v
    return nn.Sequential(*layers)

# 配置文件,是字典的形式,是A,B,D,E模式分别对应的构建的参数
cfgs: Dict[str, List[Union[str, int]]] = {
    'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
    'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
    'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}


# 预构建的过程
def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained: bool, progress: bool, **kwargs: Any) -> VGG:
    # 是否需要网络上的预训练的参数集合,需要的话就将参数初始化的部分禁掉
    if pretrained:
        kwargs['init_weights'] = False
    # 构建网络模型将特征提取层和分类的层组合到一起
    model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
    # 需要预训练的参数,我们使用load_state_dict_from_url从对应的网站上下载参数集合
    if pretrained:
        # 从对应的网站上下载参数集合,progress参数的设置是:是否使用进度条
        state_dict = load_state_dict_from_url(model_urls[arch],progress=progress)
        # 模型加载参数
        model.load_state_dict(state_dict)
    return model

# 下面是各种不同的网络深度的Vgg的构建(下同)
def vgg11(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
    r"""VGG 11-layer model (configuration "A") from
    `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
    The required minimum input size of the model is 32x32.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    # 名称,加载哪个部分的参数,是否需要batch_norm,预训练参数,进度条,其他参数
    return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)


def vgg11_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
    r"""VGG 11-layer model (configuration "A") with batch normalization
    `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
    The required minimum input size of the model is 32x32.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)


def vgg13(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
    r"""VGG 13-layer model (configuration "B")
    `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
    The required minimum input size of the model is 32x32.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)


def vgg13_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
    r"""VGG 13-layer model (configuration "B") with batch normalization
    `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
    The required minimum input size of the model is 32x32.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)


def vgg16(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
    r"""VGG 16-layer model (configuration "D")
    `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
    The required minimum input size of the model is 32x32.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)


def vgg16_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
    r"""VGG 16-layer model (configuration "D") with batch normalization
    `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
    The required minimum input size of the model is 32x32.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)


def vgg19(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
    r"""VGG 19-layer model (configuration "E")
    `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
    The required minimum input size of the model is 32x32.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)


def vgg19_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:
    r"""VGG 19-layer model (configuration 'E') with batch normalization
    `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_.
    The required minimum input size of the model is 32x32.
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        progress (bool): If True, displays a progress bar of the download to stderr
    """
    return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)