>- ** 本文为[365天深度学习训练营](https://mp.weixin.qq.com/s/pgg8O9Hv8fiLBc8xbFm4HQ) 中的学习记录博客**
>- ** 原作者:[K同学啊 | 接辅导、项目定制](https://mtyjkh.blog.csdn.net/)*
在之前的案例中,我们多是使用
datasets.ImageFolder
函数直接导入已经分类好的数据集形成Dataset
,然后使用DataLoader
加载Dataset
,本文讲分析无法分类的数据集,应当如何处理。本文数据集为车牌号数据集一、导入数据
from torchvision.transforms import transforms
from torch.utils.data import DataLoader
from torchvision import datasets
import torchvision.models as models
import torch.nn.functional as F
import torch.nn as nn
import torch,torchvision
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
device(type='cpu')
1.获取类别名
import os,PIL,random,pathlib
import matplotlib.pyplot as plt
# 支持中文
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
data_dir = './015_licence_plate/'
data_dir = pathlib.Path(data_dir)
data_paths = list(data_dir.glob('*'))
classeNames = [str(path).split("\\")[1].split("_")[1].split(".")[0] for path in data_paths]
print(classeNames)
使用
glob('*')
方法获取指定目录下所有文件的路径,并转换为列表存储在data_paths
中
str(path)
: 将Path
对象转换为字符串。split("\\")[1]
: 使用反斜杠\
分割字符串,并取第二部分(索引为1),获取文件名部分。split("_")[1]
: 使用下划线_
分割文件名字符串,并取第二部分(索引为1),获取类别名称部分。split(".")[0]
: 使用点.
分割字符串,并取第一部分(索引为0),获取类别名称,排除文件扩展名部分。
['川W9BR26', '藏WP66B0', '沪E264UD', '津D8Z15T', '浙E198UJ', '陕Z813VB', '甘G24298', '青SN18Q3', '云HZR899', '辽G46Z9R', '湘G0H422', '蒙D35P2J', '冀Z4K30A', '青Q31F3Y', '京X3U68P', '粤P6W0T1', '浙LD9F20', '黑AQ8U79', '津T0B1L3', '琼D0DK01', '渝V8X77K', '陕H4M02X', '沪K8W7S0', '津L612CY', '琼U2E68N', '鄂YDK772', '赣G3B80M', '陕B4H8M5', '甘J9R5K1', '贵UB312U', '浙R6PA34', '豫P21V72', '冀K3DD99', '黑DU092M', '川CQ816G', '晋N678PK', '川T65HK2', '闽FD24Q5', '桂X2R99V', '皖ZX3N01', '晋AL98Q2', '皖S9Q7H7', '川Q802LX', '琼F21DU3', '浙MZB988', '粤C035ZT', '津T127WB', '黑J92YL9', '津L93B1R', '贵R86A0C', '川C6W88E', '川STQ089', '沪DFS269', '赣K6R6S7', '新CXX059', '藏E6E0J5', '吉TY9Y56', '赣H71Y6P', '甘Y64BV5', '黑AD426J', '云T559R9', '沪W70W0S', '苏MB1B64', '青EYJ193', '苏PK4A85', '鄂W0UC59', '苏V617UX', '鲁FU211P', '川V12X5U', '沪J541UR', '冀]
data_paths = list(data_dir.glob('*'))
data_paths_str = [str(path) for path in data_paths]
data_paths_str
data_dir.glob('*')
:
pathlib.Path.glob(pattern)
方法用于获取符合指定模式pattern
的所有路径。'*'
是通配符,表示匹配所有文件名,因此data_dir.glob('*')
返回data_dir
目录下所有的文件路径,包括子目录中的文件。
list(data_dir.glob('*'))
:
glob('*')
返回的是一个生成器对象,通过list()
将其转换为列表,即data_paths
包含了所有文件路径的列表。列表推导式
[str(path) for path in data_paths]
:
str(path)
将每个Path
对象path
转换为字符串。[str(path) for path in data_paths]
则遍历data_paths
列表中的每个路径,并将每个路径转换为字符串,形成一个新的列表data_paths_str
。
['015_licence_plate\\000000000_川W9BR26.jpg', '015_licence_plate\\000000000_藏WP66B0.jpg', '015_licence_plate\\000000001_沪E264UD.jpg', '015_licence_plate\\000000001_津D8Z15T.jpg', '015_licence_plate\\000000002_浙E198UJ.jpg', '015_licence_plate\\000000002_陕Z813VB.jpg', '015_licence_plate\\000000003_甘G24298.jpg', '015_licence_plate\\000000003_青SN18Q3.jpg', '015_licence_plate\\000000004_云HZR899.jpg', '015_licence_plate\\000000004_辽G46Z9R.jpg', '015_licence_plate\\000000005_湘G0H422.jpg', '015_licence_plate\\000000005_蒙D35P2J.jpg', '015_licence_plate\\000000006_冀Z4K30A.jpg', '015_licence_plate\\000000006_青Q31F3Y.jpg', '015_licence_plate\\000000007_京X3U68P.jpg', '015_licence_plate\\000000007_粤P6W0T1.jpg', ...]
2.数据可视化
plt.figure(figsize=(14,5))
plt.suptitle("数据示例",fontsize=15)
for i in range(30):
plt.subplot(5,6,i+1)
# plt.xticks([])
# plt.yticks([])
# plt.grid(False)
# 显示图片
images = plt.imread(data_paths_str[i])
plt.imshow(images)
plt.show()
3.标签数字化
import numpy as np
char_enum = ["京","沪","津","渝","冀","晋","蒙","辽","吉","黑","苏","浙","皖","闽","赣","鲁",\
"豫","鄂","湘","粤","桂","琼","川","贵","云","藏","陕","甘","青","宁","新","军","使"]
number = [str(i) for i in range(0, 10)] # 0 到 9 的数字
alphabet = [chr(i) for i in range(65, 91)] # A 到 Z 的字母
char_set = char_enum + number + alphabet
char_set_len = len(char_set)
label_name_len = len(classeNames[0])
# 将字符串数字化
def text2vec(text):
vector = np.zeros([label_name_len, char_set_len])
for i, c in enumerate(text):
idx = char_set.index(c)
vector[i][idx] = 1.0
return vector
all_labels = [text2vec(i) for i in classeNames]
text2vec
函数接受一个字符串text
作为输入,并将其转换为一个二维数组(矩阵)vector
,大小为[label_name_len, char_set_len]
。- 对于字符串
text
中的每个字符c
,使用char_set.index(c)
找到字符c
在char_set
中的索引idx
,然后将vector[i][idx]
设置为1.0
。- 这种编码方式称为“one-hot编码”,用于将文本转换为向量形式,其中每个字符在
char_set
中的位置决定了向量中哪些位置为1
。
4.加载数据文件
import os
import pandas as pd
from torchvision.io import read_image
from torch.utils.data import Dataset
import torch.utils.data as data
from PIL import Image
class MyDataset(data.Dataset):
def __init__(self, all_labels, data_paths_str, transform):
self.img_labels = all_labels # 获取标签信息
self.img_dir = data_paths_str # 图像目录路径
self.transform = transform # 目标转换函数
def __len__(self):
return len(self.img_labels)
def __getitem__(self, index):
image = Image.open(self.img_dir[index]).convert('RGB')#plt.imread(self.img_dir[index]) # 使用 torchvision.io.read_image 读取图像
label = self.img_labels[index] # 获取图像对应的标签
if self.transform:
image = self.transform(image)
return image, label # 返回图像和标签
total_datadir = './03_traffic_sign/'
# 关于transforms.Compose的更多介绍可以参考:https://blog.csdn.net/qq_38251616/article/details/124878863
train_transforms = transforms.Compose([
transforms.Resize([224, 224]), # 将输入图片resize成统一尺寸
transforms.ToTensor(), # 将PIL Image或numpy.ndarray转换为tensor,并归一化到[0,1]之间
transforms.Normalize( # 标准化处理-->转换为标准正太分布(高斯分布),使模型更容易收敛
mean=[0.485, 0.456, 0.406],
std =[0.229, 0.224, 0.225]) # 其中 mean=[0.485,0.456,0.406]与std=[0.229,0.224,0.225] 从数据集中随机抽样计算得到的。
])
total_data = MyDataset(all_labels, data_paths_str, train_transforms)
total_data
5.划分训练集
train_size = int(0.8 * len(total_data))
test_size = len(total_data) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(total_data, [train_size, test_size])
train_size,test_size
(10940, 2735)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=16,
shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=16,
shuffle=True)
print("The number of images in a training set is: ", len(train_loader)*16)
print("The number of images in a test set is: ", len(test_loader)*16)
print("The number of batches per epoch is: ", len(train_loader))
The number of images in a training set is: 10944 The number of images in a test set is: 2736 The number of batches per epoch is: 684
for X, y in test_loader:
print("Shape of X [N, C, H, W]: ", X.shape)
print("Shape of y: ", y.shape, y.dtype)
break
Shape of X [N, C, H, W]: torch.Size([16, 3, 224, 224]) Shape of y: torch.Size([16, 7, 69]) torch.float64
二 自建模型
class Network_bn(nn.Module):
def __init__(self):
super(Network_bn, self).__init__()
"""
nn.Conv2d()函数:
第一个参数(in_channels)是输入的channel数量
第二个参数(out_channels)是输出的channel数量
第三个参数(kernel_size)是卷积核大小
第四个参数(stride)是步长,默认为1
第五个参数(padding)是填充大小,默认为0
"""
self.conv1 = nn.Conv2d(in_channels=3, out_channels=12, kernel_size=5, stride=1, padding=0)
self.bn1 = nn.BatchNorm2d(12)
self.conv2 = nn.Conv2d(in_channels=12, out_channels=12, kernel_size=5, stride=1, padding=0)
self.bn2 = nn.BatchNorm2d(12)
self.pool = nn.MaxPool2d(2,2)
self.conv4 = nn.Conv2d(in_channels=12, out_channels=24, kernel_size=5, stride=1, padding=0)
self.bn4 = nn.BatchNorm2d(24)
self.conv5 = nn.Conv2d(in_channels=24, out_channels=24, kernel_size=5, stride=1, padding=0)
self.bn5 = nn.BatchNorm2d(24)
self.fc1 = nn.Linear(24*50*50, label_name_len*char_set_len)
self.reshape = Reshape([label_name_len,char_set_len])
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = self.pool(x)
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
x = self.pool(x)
x = x.view(-1, 24*50*50)
x = self.fc1(x)
# 最终reshape
x = self.reshape(x)
return x
# 定义Reshape层
class Reshape(nn.Module):
def __init__(self, shape):
super(Reshape, self).__init__()
self.shape = shape
def forward(self, x):
return x.view(x.size(0), *self.shape)
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
model = Network_bn().to(device)
model
Using cpu device
Network_bn( (conv1): Conv2d(3, 12, kernel_size=(5, 5), stride=(1, 1)) (bn1): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv2): Conv2d(12, 12, kernel_size=(5, 5), stride=(1, 1)) (bn2): BatchNorm2d(12, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (pool): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False) (conv4): Conv2d(12, 24, kernel_size=(5, 5), stride=(1, 1)) (bn4): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (conv5): Conv2d(24, 24, kernel_size=(5, 5), stride=(1, 1)) (bn5): BatchNorm2d(24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) (fc1): Linear(in_features=60000, out_features=483, bias=True) (reshape): Reshape() )
import torchsummary
''' 显示网络结构 '''
torchsummary.summary(model, (3, 224, 224))
---------------------------------------------------------------- Layer (type) Output Shape Param # ================================================================ Conv2d-1 [-1, 12, 220, 220] 912 BatchNorm2d-2 [-1, 12, 220, 220] 24 Conv2d-3 [-1, 12, 216, 216] 3,612 BatchNorm2d-4 [-1, 12, 216, 216] 24 MaxPool2d-5 [-1, 12, 108, 108] 0 Conv2d-6 [-1, 24, 104, 104] 7,224 BatchNorm2d-7 [-1, 24, 104, 104] 48 Conv2d-8 [-1, 24, 100, 100] 14,424 BatchNorm2d-9 [-1, 24, 100, 100] 48 MaxPool2d-10 [-1, 24, 50, 50] 0 Linear-11 [-1, 483] 28,980,483 Reshape-12 [-1, 7, 69] 0 ================================================================ Total params: 29,006,799 Trainable params: 29,006,799 Non-trainable params: 0 ---------------------------------------------------------------- Input size (MB): 0.57 Forward/backward pass size (MB): 26.56 Params size (MB): 110.65 Estimated Total Size (MB): 137.79 ----------------------------------------------------------------
注意对比观察模型的输出
[-1, 7, 69]
,我们之前的网络结构输出都是[-1, 7]
、[-1, 2]
、[-1, 4]
这样的二维数据,如果要求模型输出结果是多维数据,那么本案例将是很好的示例。
[-1, 7, 69]
中的-1
是什么意思?在神经网络中,如果我们不确定一个维度的大小,但是希望在计算中自动推断它,可以使用
-1
。这个-1
告诉 PyTorch 在计算中自动推断这个维度的大小,以确保其他维度的尺寸不变,并且能够保持张量的总大小不变。例如,
[-1, 7, 69]
表示这个张量的形状是一个三维张量,其中第一个维度的大小是不确定的,第二维大小为7
,第三大小分别为69
。-1
的作用是使得总的张量大小等于7 * 69
,以适应实际的输入数据大小。在实际的使用中,通常
-1
用在批处理维度上,因为在训练过程中,批处理大小可能会有所不同。使用-1
可以使模型适应不同大小的批处理输入数据。
三、模型训练
1. 优化器与损失函数
optimizer = torch.optim.Adam(model.parameters(),
lr=1e-4,
weight_decay=0.0001)
loss_model = nn.CrossEntropyLoss()
from torch.autograd import Variable
def test(model, test_loader, loss_model):
size = len(test_loader.dataset)
num_batches = len(test_loader)
model.eval()
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in test_loader:
X, y = X.to(device), y.to(device)
pred = model(X)
test_loss += loss_model(pred, y).item()
test_loss /= num_batches
print(f"Avg loss: {test_loss:>8f} \n")
return correct,test_loss
def train(model,train_loader,loss_model,optimizer):
model=model.to(device)
model.train()
for i, (images, labels) in enumerate(train_loader, 0): #0是标起始位置的值。
images = Variable(images.to(device))
labels = Variable(labels.to(device))
optimizer.zero_grad()
outputs = model(images)
loss = loss_model(outputs, labels)
loss.backward()
optimizer.step()
if i % 1000 == 0:
print('[%5d] loss: %.3f' % (i, loss))
test_acc_list = []
test_loss_list = []
epochs = 5
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train(model,train_loader,loss_model,optimizer)
test_acc,test_loss = test(model, test_loader, loss_model)
test_acc_list.append(test_acc)
test_loss_list.append(test_loss)
print("Done!")
Epoch 1 ------------------------------- [ 0] loss: 0.037 Avg loss: 0.039178 Epoch 2 ------------------------------- [ 0] loss: 0.028 Avg loss: 0.036960 Epoch 3 ------------------------------- [ 0] loss: 0.024 Avg loss: 0.035773 Epoch 4 ------------------------------- [ 0] loss: 0.020 Avg loss: 0.032440 Epoch 5 ------------------------------- [ 0] loss: 0.024 Avg loss: 0.033385 Done!
四 结果可视化分析
import numpy as np
import matplotlib.pyplot as plt
x = [i for i in range(1,6)]
plt.plot(x, test_loss_list, label="Loss", alpha=0.8)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.grid(True)
plt.legend()
plt.show()
五 心得
1.没有分类好的数据集,我们学会了如何处理
2.标签编码转换为数值类型
3.自建模型的-1理解为张量处理的松弛量