transformer实现图像分类pytorch代码
时间: 2025-01-11 22:15:40 浏览: 129
### 使用 PyTorch 实现基于 Transformer 的图像分类
为了实现基于 Transformer 的图像分类,在 PyTorch 中可以采用 Vision Transformer (ViT) 或 Swin Transformer 这样的架构。下面展示了一个简单的 ViT 模型用于图像分类的例子。
#### 创建 Vision Transformer 类
```python
import torch
from torch import nn, optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
class PatchEmbedding(nn.Module):
""" 将图片分割成多个patch并映射到指定维度 """
def __init__(self, img_size=224, patch_size=16, embed_dim=768):
super().__init__()
self.img_size = img_size
self.patch_size = patch_size
self.n_patches = (img_size // patch_size) ** 2
self.projection = nn.Conv2d(
in_channels=3,
out_channels=embed_dim,
kernel_size=patch_size,
stride=patch_size
)
def forward(self, x):
x = self.projection(x).flatten(2).transpose(1, 2)
return x
class AttentionBlock(nn.Module):
""" 多头自注意力机制 """
def __init__(self, dim, num_heads=12, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
assert dim % num_heads == 0, "dim should be divisible by num_heads."
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MLP(nn.Module):
""" 前馈神经网络层 """
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Block(nn.Module):
""" Transformer 编码器中的单个block """
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, p=0., attn_p=0.):
super(Block, self).__init__()
self.norm1 = nn.LayerNorm(dim, eps=1e-6)
self.attn = AttentionBlock(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_p,
proj_drop=p
)
self.norm2 = nn.LayerNorm(dim, eps=1e-6)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = MLP(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=nn.GELU,
drop=p
)
def forward(self, x):
x = x + self.attn(self.norm1(x))
x = x + self.mlp(self.norm2(x))
return x
class VisionTransformer(nn.Module):
def __init__(self, config, img_size=224, patch_size=16, in_c=3, num_classes=10, vis=False):
super(VisionTransformer, self).__init__()
self.vis = vis
self.patch_embed = PatchEmbedding(img_size=img_size, patch_size=patch_size, embed_dim=config.embed_dim)
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, 1 + self.patch_embed.n_patches, config.embed_dim))
self.blocks = nn.Sequential(*[
Block(
dim=config.embed_dim,
num_heads=config.num_heads,
mlp_ratio=config.mlp_ratio,
qkv_bias=True,
p=config.p,
attn_p=config.attn_p
) for _ in range(config.depth)])
self.norm = nn.LayerNorm(config.embed_dim, eps=1e-6)
self.head = nn.Linear(config.embed_dim, num_classes)
def forward(self, x):
n_samples = x.shape[0]
cls_tokens = self.cls_token.expand(n_samples, -1, -1)
x = self.patch_embed(x)
x = torch.cat((cls_tokens, x), dim=1)
x += self.pos_embed
features = []
for blk in self.blocks:
x = blk(x)
if self.vis:
features.append(x)
x = self.norm(x)
logits = self.head(x[:, 0])
if self.vis:
return logits, features
else:
return logits
```
这段代码定义了完整的 Vision Transformer 架构,包括补丁嵌入、多头注意模块、MLP 层和编码器 block[^1]。
对于更复杂的场景或更大的数据集,可能还需要考虑使用预训练权重初始化模型,并微调这些权重以适应特定的任务需求。此外,还可以探索其他变体如 Swin Transformer 来进一步提升性能[^2]。
阅读全文
相关推荐


















