MobileNetsV2代码复现
时间: 2025-02-10 22:00:30 浏览: 49
### MobileNetV2 的代码实现与教程
#### 使用 PyTorch 实现 MobileNetV2
以下是基于 PyTorch 的 MobileNetV2 完整实现:
```python
import torch.nn as nn
import torch
class ConvBNReLU(nn.Sequential):
def __init__(conv, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
padding = (kernel_size - 1) // 2
super(ConvBNReLU, conv).__init__(
nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, groups=groups, bias=False),
nn.BatchNorm2d(out_planes),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend([
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
])
conv.block = nn.Sequential(*layers)
def forward(self, x):
if conv.use_res_connect:
return x + conv.block(x)
else:
return conv.block(x)
class MobileNetV2(nn.Module):
def __init__(mobilenetv2,
num_classes=1000,
width_mult=1.0,
inverted_residual_setting=None,
round_nearest=8,
block=None):
last_channel = 1280
input_channel = 32
cfgs = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
if block is None:
block = InvertedResidual
mobilenetv2.features = []
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
mobilenetv2.features.append(ConvBNReLU(3, input_channel, stride=2))
# building inverted residual blocks
for t, c, n, s in cfgs:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
mobilenetv2.features.append(block(input_channel, output_channel, stride, expand_ratio=t))
input_channel = output_channel
# building last several layers
mobilenetv2.features.append(ConvBNReLU(input_channel, last_channel, kernel_size=1))
# make it nn.Sequential
mobilenetv2.features = nn.Sequential(*mobilenetv2.features)
# building classifier
mobilenetv2.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(last_channel, num_classes),
)
def forward(mobilenetv2, x):
x = mobilenetv2.features(x)
x = x.mean([2, 3]) # global average pooling
x = mobilenetv2.classifier(x)
return x
```
这段代码展示了如何定义 `ConvBNReLU` 和 `InvertedResidual` 层以及完整的 `MobileNetV2` 类。此网络结构采用了倒残差模块和线性瓶颈设计,这些特性使得模型更加高效[^1]。
对于 TensorFlow 用户来说,官方提供了预训练好的 MobileNetV2 模型可以直接加载并使用:
```python
from tensorflow.keras.applications import MobileNetV2
model = MobileNetV2(weights='imagenet')
```
这行简单的命令即可获取到已经训练好 ImageNet 数据集的 MobileNetV2 模型实例[^2]。
阅读全文
相关推荐
















