1.基于PyTorch实现的UNet模型代码进行数据集测试2.Transformer和CNN混合模型,用于图像分割任务3.基于Swin Transformer图像分割模型架构

1.基于PyTorch实现的UNet模型代码进行数据集测试2.Transformer和CNN混合模型,用于图像分割任务3.基于Swin Transformer图像分割模型架构

文章目录

      • 1. UNet模型定义
      • 2. 数据集准备
      • 3. 测试代码
      • 模型架构解析
      • PyTorch 实现
      • 架构解析
      • PyTorch 实现

1.基于PyTorch实现的UNet模型代码进行数据集测试2.Transformer和CNN混合模型,用于图像分割任务3.基于Swin Transformer图像分割模型架构_第1张图片
经典的UNet架构,它是一种用于图像分割的卷积神经网络。UNet由编码器和解码器两部分组成,通过跳跃连接(skip connections)来融合不同层次的信息。基于PyTorch实现的UNet模型代码示例,如何使用该模型进行数据集测试的流程。

1. UNet模型定义

import torch
import torch.nn as nn
import torch.nn.functional as F

class DoubleConv(nn.Module):
    """(convolution => [BN] => ReLU) * 2"""

    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.double_conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.double_conv(x)

class Down(nn.Module):
    """Downscaling with maxpool then double conv"""

    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.maxpool_conv = nn.Sequential(
            nn.MaxPool2d(2),
            DoubleConv(in_channels, out_channels)
        )

    def forward(self, x):
        return self.maxpool_conv(x)

class Up(nn.Module):
    """Upscaling then double conv"""

    def __init__(self, in_channels, out_channels, bilinear=True):
        super().__init__()

        # if bilinear, use the normal convolutions to reduce the number of channels
        if bilinear:
            self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
            self.conv = DoubleConv(in_channels, out_channels // 2)
        else:
            self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
            self.conv = DoubleConv(in_channels, out_channels)

    def forward(self, x1, x2):
        x1 = self.up(x1)
        # input is CHW
        diffY = x2.size()[2] - x1.size()[2]
        diffX = x2.size()[3] - x1.size()[3]

        x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
                        diffY // 2, diffY - diffY // 2])
        # if you have padding issues, see
        # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
        # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
        x = torch.cat([x2, x1], dim=1)
        return self.conv(x)

class OutConv(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(OutConv, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)

    def forward(self, x):
        return self.conv(x)

class UNet(nn.Module):
    def __init__(self, n_channels, n_classes, bilinear=True):
        super(UNet, self).__init__()
        self.n_channels = n_channels
        self.n_classes = n_classes
        self.bilinear = bilinear

        self.inc = DoubleConv(n_channels, 64)
        self.down1 = Down(64, 128)
        self.down2 = Down(128, 256)
        self.down3 = Down(256, 512)
        factor = 2 if bilinear else 1
        self.down4 = Down(512, 1024 // factor)
        self.up1 = Up(1024, 512 // factor, bilinear)
        self.up2 = Up(512, 256 // factor, bilinear)
        self.up3 = Up(256, 128 // factor, bilinear)
        self.up4 = Up(128, 64, bilinear)
        self.outc = OutConv(64, n_classes)

    def forward(self, x):
        x1 = self.inc(x)
        x2 = self.down1(x1)
        x3 = self.down2(x2)
        x4 = self.down3(x3)
        x5 = self.down4(x4)
        x = self.up1(x5, x4)
        x = self.up2(x, x3)
        x = self.up3(x, x2)
        x = self.up4(x, x1)
        logits = self.outc(x)
        return logits

# 初始化模型
n_channels = 3
n_classes = 1
model = UNet(n_channels, n_classes).cuda()

2. 数据集准备

假设你已经有了一个包含图像和对应标签的数据集,可以按照以下步骤准备数据集:

from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from PIL import Image
import os

class CustomDataset(Dataset):
    def __init__(self, img_dir, mask_dir, transform=None):
        self.img_dir = img_dir
        self.mask_dir = mask_dir
        self.transform = transform
        self.images = sorted(os.listdir(img_dir))
        self.masks = sorted(os.listdir(mask_dir))

    def __len__(self):
        return len(self.images)

    def __getitem__(self, idx):
        img_path = os.path.join(self.img_dir, self.images[idx])
        mask_path = os.path.join(self.mask_dir, self.masks[idx])
        
        image = Image.open(img_path).convert("RGB")
        mask = Image.open(mask_path).convert("L")

        if self.transform:
            image = self.transform(image)
            mask = self.transform(mask)

        return image, mask

# 数据增强
transform = transforms.Compose([
    transforms.ToTensor(),
])

dataset = CustomDataset(
    img_dir="path/to/your/images",
    mask_dir="path/to/your/masks",
    transform=transform
)

data_loader = DataLoader(dataset, batch_size=4, shuffle=False)

3. 测试代码

以下是测试代码,用于加载模型并进行预测:

def test_model(model, data_loader, device):
    model.eval()
    with torch.no_grad():
        for images, masks in data_loader:
            images = images.to(device)
            masks = masks.to(device)
            
            outputs = model(images)
            preds = torch.argmax(outputs, dim=1).cpu().numpy()
            
            # 可视化结果
            visualize_results(images.cpu(), masks.cpu().numpy(), preds)

def visualize_results(images, masks, preds, num_samples=3):
    import matplotlib.pyplot as plt
    fig, axes = plt.subplots(num_samples, 3, figsize=(15, 5*num_samples))
    
    for i in range(num_samples):
        ax = axes[i]
        ax[0].imshow(images[i].permute(1, 2, 0))
        ax[0].set_title('Image')
        ax[1].imshow(masks[i], cmap='gray')
        ax[1].set_title('Ground Truth')
        ax[2].imshow(preds[i], cmap='gray')
        ax[2].set_title('Prediction')
    
    plt.show()

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 加载你的模型并移动到设备上
model_unet = UNet(n_channels=3, n_classes=1).to(device)

# 假设模型已经训练好,加载权重
model_unet.load_state_dict(torch.load('path/to/unet_weights.pth'))

test_model(model_unet, data_loader, device)

1.基于PyTorch实现的UNet模型代码进行数据集测试2.Transformer和CNN混合模型,用于图像分割任务3.基于Swin Transformer图像分割模型架构_第2张图片
这段代码实现了一个结合了Transformer和CNN的混合模型,专门用于图像分割任务。以下是该模型的中文解释及其在PyTorch中的实现。

模型架构解析

  1. 输入层:输入是一个嵌入序列 ( x_p^1, x_p^2, \ldots, x_p^N ),这些是从原图中提取出的补丁(patches)。

  2. Transformer 层

    • Layer Norm(层归一化):对输入进行归一化处理。
    • Multi-Head Self-Attention (MSA,多头自注意力):关注输入序列中不同位置的信息。
    • MLP(多层感知器):处理来自MSA的输出。
  3. CNN 块

    • 隐藏特征提取:从输入中提取特征。
    • 线性投影:将隐藏特征映射到更低维度的空间。
    • 下采样:通过因子2、4和8降低空间维度。
    • 上采样:通过因子2、4和8增加空间维度。
    • 特征连接:组合不同层级的特征。
  4. 分割头(Segmentation Head):生成最终的分割掩码。

PyTorch 实现

import torch
import torch.nn as nn
import torch.nn.functional as F

class TransformerBlock(nn.Module):
    def __init__(self, dim, heads=8, mlp_dim=2048):
        super().__init__()
        self.norm1 = nn.LayerNorm(dim)
        self.attn = nn.MultiheadAttention(dim, heads)
        self.norm2 = nn.LayerNorm(dim)
        self.mlp = nn.Sequential(
            nn.Linear(dim, mlp_dim),
            nn.GELU(),
            nn.Linear(mlp_dim, dim)
        )

    def forward(self, x):
        x = x + self.attn(self.norm1(x), self.norm1(x), self.norm1(x))[0]
        x = x + self.mlp(self.norm2(x))
        return x

class CNNBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
        self.relu = nn.ReLU()
        self.pool = nn.MaxPool2d(kernel_size=2, stride=2)

    def forward(self, x):
        x = self.conv(x)
        x = self.relu(x)
        x = self.pool(x)
        return x

class UpsampleBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)
        self.relu = nn.ReLU()

    def forward(self, x, skip=None):
        x = self.up(x)
        if skip is not None:
            x = torch.cat([x, skip], dim=1)
        x = self.conv(x)
        x = self.relu(x)
        return x

class SegmentationHead(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)

    def forward(self, x):
        return self.conv(x)

class HybridModel(nn.Module):
    def __init__(self, img_size=352, patch_size=16, in_channels=3, num_classes=1, transformer_layers=12):
        super().__init__()
        self.patch_size = patch_size
        self.num_patches = (img_size // patch_size) ** 2
        self.embed_dim = 768  # 示例嵌入维度

        self.transformer = nn.Sequential(*[TransformerBlock(self.embed_dim) for _ in range(transformer_layers)])
        
        self.cnn_blocks = nn.ModuleList([
            CNNBlock(3, 64),
            CNNBlock(64, 128),
            CNNBlock(128, 256),
            CNNBlock(256, 512)
        ])

        self.upsample_blocks = nn.ModuleList([
            UpsampleBlock(512, 256),
            UpsampleBlock(256, 128),
            UpsampleBlock(128, 64),
            UpsampleBlock(64, 32)
        ])

        self.segmentation_head = SegmentationHead(32, num_classes)

    def forward(self, x):
        # 补丁嵌入
        patches = x.unfold(2, self.patch_size, self.patch_size).unfold(3, self.patch_size, self.patch_size)
        patches = patches.reshape(x.shape[0], self.num_patches, -1)
        z = self.transformer(patches)

        # 重新整形回图像
        z = z.reshape(x.shape[0], self.embed_dim, *((x.shape[2] // self.patch_size), (x.shape[3] // self.patch_size)))

        # CNN 块
        skips = []
        for cnn_block in self.cnn_blocks:
            z = cnn_block(z)
            skips.append(z)

        # 上采样和连接
        for i, upsample_block in enumerate(self.upsample_blocks):
            z = upsample_block(z, skips.pop())

        # 分割头
        z = self.segmentation_head(z)
        return z

# 使用示例
model = HybridModel()
input_image = torch.randn(1, 3, 352, 352)
output = model(input_image)
print(output.shape)

Transformer块与CNN块集成在一起用于图像分割。HybridModel类封装了整个架构,包括Transformer层、CNN块、上采样块和分割头。如何创建模型实例,并将输入图像传递给模型。

1.基于PyTorch实现的UNet模型代码进行数据集测试2.Transformer和CNN混合模型,用于图像分割任务3.基于Swin Transformer图像分割模型架构_第3张图片
这张图展示了一个基于Swin Transformer的图像分割模型架构,包括编码器(Encoder)、瓶颈层(Bottleneck)和解码器(Decoder)。以下是该架构的详细解析及Python实现。

架构解析

  1. 输入层:输入图像尺寸为 ( W \times H ),经过Patch Partition后变为 ( \frac{W}{4} \times \frac{H}{4} \times 48 )。
  2. 编码器(Encoder)
    • Linear Embedding:将补丁嵌入到更高维度。
    • Swin Transformer Block:包含两个Swin Transformer块。
    • Patch Merging:合并补丁,减少空间维度。
  3. 瓶颈层(Bottleneck)
    • 包含一个Swin Transformer块。
  4. 解码器(Decoder)
    • Patch Expanding:扩展补丁,增加空间维度。
    • Swin Transformer Block:包含两个Swin Transformer块。
    • Skip Connection:从编码器传递特征到解码器。
  5. 输出层:通过线性投影生成最终的分割掩码。

PyTorch 实现

import torch
import torch.nn as nn
import torch.nn.functional as F

class SwinTransformerBlock(nn.Module):
    def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, drop=0., attn_drop=0., drop_path=0.):
        super().__init__()
        self.norm1 = nn.LayerNorm(dim)
        self.attn = nn.MultiheadAttention(dim, num_heads, dropout=attn_drop, bias=qkv_bias)
        self.drop_path = nn.Dropout(drop_path)
        self.norm2 = nn.LayerNorm(dim)
        self.mlp = nn.Sequential(
            nn.Linear(dim, int(dim * mlp_ratio)),
            nn.GELU(),
            nn.Linear(int(dim * mlp_ratio), dim),
            nn.Dropout(drop)
        )

    def forward(self, x):
        x = x + self.drop_path(self.attn(self.norm1(x))[0])
        x = x + self.drop_path(self.mlp(self.norm2(x)))
        return x

class PatchMerging(nn.Module):
    def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
        super().__init__()
        self.input_resolution = input_resolution
        self.dim = dim
        self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
        self.norm = norm_layer(4 * dim)

    def forward(self, x):
        """
        x: B, H*W, C
        """
        H, W = self.input_resolution
        B, L, C = x.shape
        assert L == H * W, "input feature has wrong size"
        assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."

        x = x.view(B, H, W, C)

        x0 = x[:, 0::2, 0::2, :]  # B H/2 W/2 C
        x1 = x[:, 1::2, 0::2, :]  # B H/2 W/2 C
        x2 = x[:, 0::2, 1::2, :]  # B H/2 W/2 C
        x3 = x[:, 1::2, 1::2, :]  # B H/2 W/2 C
        x = torch.cat([x0, x1, x2, x3], -1)  # B H/2 W/2 4*C
        x = x.view(B, -1, 4 * C)  # B H/2*W/2 4*C

        x = self.norm(x)
        x = self.reduction(x)

        return x

class PatchExpanding(nn.Module):
    def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
        super().__init__()
        self.input_resolution = input_resolution
        self.dim = dim
        self.expand = nn.Linear(dim, 4 * dim, bias=False)
        self.norm = norm_layer(dim)

    def forward(self, x):
        """
        x: B, H*W, C
        """
        H, W = self.input_resolution
        B, L, C = x.shape
        assert L == H * W, "input feature has wrong size"

        x = self.norm(x)
        x = self.expand(x)
        x = x.view(B, H, W, 4 * C)

        x0 = x[:, :, :, :C]  # B H W C
        x1 = x[:, :, :, C:2*C]  # B H W C
        x2 = x[:, :, :, 2*C:3*C]  # B H W C
        x3 = x[:, :, :, 3*C:]  # B H W C
        x = torch.cat([x0, x1, x2, x3], 1)  # B 4*H W C

        return x.view(B, -1, C)

class SwinUNet(nn.Module):
    def __init__(self, img_size=224, patch_size=4, in_chans=3, num_classes=1, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24]):
        super().__init__()
        self.img_size = img_size
        self.patch_size = patch_size
        self.in_chans = in_chans
        self.num_classes = num_classes
        self.embed_dim = embed_dim
        self.depths = depths
        self.num_heads = num_heads

        self.patch_partition = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
        self.linear_embedding = nn.Linear(embed_dim, embed_dim)

        # Encoder
        self.encoder = nn.ModuleList()
        for i in range(len(depths)):
            self.encoder.append(SwinTransformerBlock(embed_dim * (2 ** i), num_heads[i]))
            if i < len(depths) - 1:
                self.encoder.append(PatchMerging((img_size // (patch_size * (2 ** i)), img_size // (patch_size * (2 ** i))), embed_dim * (2 ** i)))

        # Bottleneck
        self.bottleneck = nn.Sequential(
            SwinTransformerBlock(embed_dim * (2 ** len(depths)), num_heads[-1]),
            SwinTransformerBlock(embed_dim * (2 ** len(depths)), num_heads[-1])
        )

        # Decoder
        self.decoder = nn.ModuleList()
        for i in reversed(range(len(depths))):
            self.decoder.append(PatchExpanding((img_size // (patch_size * (2 ** i)), img_size // (patch_size * (2 ** i))), embed_dim * (2 ** i)))
            self.decoder.append(SwinTransformerBlock(embed_dim * (2 ** i), num_heads[i]))

        self.segmentation_head = nn.Conv2d(embed_dim, num_classes, kernel_size=1)

    def forward(self, x):
        # Patch Partition
        x = self.patch_partition(x)
        x = x.flatten(2).transpose(1, 2)
        x = self.linear_embedding(x)

        # Encoder
        skips = []
        for layer in self.encoder:
            x = layer(x)
            if isinstance(layer, PatchMerging):
                skips.append(x)

        # Bottleneck
        x = self.bottleneck(x)

        # Decoder
        for i, layer in enumerate(self.decoder):
            if i % 2 == 0:
                x = layer(x)
            else:
                skip = skips.pop()
                x = torch.cat([x, skip], dim=1)
                x = layer(x)

        # Segmentation Head
        x = x.transpose(1, 2).reshape(x.size(0), x.size(1), self.img_size // self.patch_size, self.img_size // self.patch_size)
        x = self.segmentation_head(x)

        return x

# 使用示例
model = SwinUNet()
input_image = torch.randn(1, 3, 224, 224)
output = model(input_image)
print(output.shape)

基于Swin Transformer的混合模型,用于图像分割任务。SwinUNet类封装了整个架构,包括编码器、瓶颈层和解码器。如何创建模型实例,并将输入图像传递给模型。

你可能感兴趣的:(pytorch,transformer,cnn)