一般在进行模型训练之前,都要做一个数据集分析的任务。这个在英文中一般缩写为EDA,也就是Exploring Data Analysis(好像是这个)。
import pandas as pd
import torch
from torchvision.utils import make_grid
import matplotlib.pyplot as plt
train_df = pd.read_csv('/Users/xiaoshunv/Desktop/MNIST_csv/train.csv')
n_train = len(train_df)
n_pixels = len(train_df.columns) - 1 #有多少列,就是多少个像素(28*28=784)
n_class = len(set(train_df['label']))#set删除重复项后,就是类别数
print('Number of training samples: {0}'.format(n_train))
print('Number of training pixels: {0}'.format(n_pixels))
print('Number of classes: {0}'.format(n_class))
# 读取测试集
test_df = pd.read_csv('/Users/xiaoshunv/Desktop/MNIST_csv/test.csv')
n_test = len(test_df)
n_pixels = len(test_df.columns)
print('Number of test samples: {0}'.format(n_test))
print('Number of test pixels: {0}'.format(n_pixels))
import numpy as np
from torchvision.utils import make_grid
import torch
import matplotlib.pyplot as plt
random_sel = np.random.randint(len(train_df), size=8)#从0-len(train_df),随机取八个
data = (train_df.iloc[random_sel,1:].values.reshape(-1,1,28,28)/255.)#iloc索引行,随机取八行,然后取数据(第0行是label所以不取)
grid = make_grid(torch.Tensor(data), nrow=8)#把8个图片拼到一起
plt.rcParams['figure.figsize'] = (16, 2)
plt.imshow(grid.numpy().transpose((1,2,0)))#把第0维channel数换到第三维
plt.axis('off')
plt.show()
print(*list(train_df.iloc[random_sel, 0].values), sep = ', ')
# 检查类别是否不均衡
plt.figure(figsize=(8,5))
plt.bar(train_df['label'].value_counts().index, train_df['label'].value_counts())
plt.xticks(np.arange(n_class))
plt.xlabel('Class', fontsize=16)
plt.ylabel('Count', fontsize=16)
plt.grid('on', axis='y')
plt.show()
导入需要的包和数据集
import pandas as pd
import torch
from torch.utils.data import Dataset,DataLoader
from torchvision import transforms
train_df = pd.read_csv('/Users/xiaoshunv/Desktop/MNIST_csv/train.csv')
test_df = pd.read_csv('/Users/xiaoshunv/Desktop/MNIST_csv/test.csv')
n_train = len(train_df)
n_test = len(test_df)
n_pixels = len(train_df.columns) - 1
n_class = len(set(train_df['label']))
构建一个dataset
class MNIST_data(Dataset):
def __init__(self, file_path,
transform=transforms.Compose([transforms.ToPILImage(), transforms.ToTensor(),
transforms.Normalize(mean=(0.5,), std=(0.5,))])
):
df = pd.read_csv(file_path)
if len(df.columns) == n_pixels:
# test data
self.X = df.values.reshape((-1, 28, 28)).astype(np.uint8)[:, :, :, None]
self.y = None
else:
# training data
self.X = df.iloc[:, 1:].values.reshape((-1, 28, 28)).astype(np.uint8)[:, :, :, None]
self.y = torch.from_numpy(df.iloc[:, 0].values)
self.transform = transform
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
if self.y is not None:
return self.transform(self.X[idx]), self.y[idx]
else:
return self.transform(self.X[idx])
可以看到,这个dataset中,根据是否有标签分成返回两个不同的值。(训练集的话,同时返回数据和标签,测试集中仅仅返回数据)。
batch_size = 64
train_dataset = MNIST_data('./MNIST_csv/train.csv',
transform= transforms.Compose([
transforms.ToPILImage(),
transforms.RandomRotation(degrees=20),
transforms.ToTensor(),
transforms.Normalize(mean=(0.5,), std=(0.5,))]))
test_dataset = MNIST_data('./MNIST_csv/test.csv')
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size, shuffle=False)
关于这段代码:
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.features1 = nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1)
self.features = nn.Sequential(
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(32, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(64 * 7 * 7, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(512, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(512, 10),
)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def forward(self, x):
x = self.features1(x)
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
这个模型类整体来看中规中矩,都是之前讲到的方法。小测试:还记得xavier初始化时怎么回事吗?xavier初始化方法是一个非常常用的方法,在之前的文章中也详细的推导了这个。
之后呢,我们对模型实例化,然后给模型的参数传到优化器中,然后设置一个学习率衰减的策略,学习率衰减就是训练的epoch越多,学习率就越低的这样一个方法,在后面的文章中会详细讲述 。
import torch.optim as optim
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = Net().to(device)
# model = torchvision.models.resnet50(pretrained=True).to(device)
optimizer = optim.Adam(model.parameters(), lr=0.003)
criterion = nn.CrossEntropyLoss().to(device)
exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
print(model)
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
# 读入数据
data = data.to(device)
target = target.to(device)
# 计算模型预测结果和损失
output = model(data)
loss = criterion(output, target)
optimizer.zero_grad() # 计算图梯度清零
loss.backward() # 损失反向传播
optimizer.step()# 然后更新参数
if (batch_idx + 1) % 50 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, (batch_idx + 1) * len(data), len(train_loader.dataset),
100. * (batch_idx + 1) / len(train_loader), loss.item()))
exp_lr_scheduler.step()
定义了一个训练一个epoch的函数,然后下面是训练10个epoch的主函数代码。
log = [] # 记录一下loss的变化情况
n_epochs = 2
for epoch in range(n_epochs):
train(epoch)
# 把log化成折线图
import matplotlib.pyplot as plt
plt.plot(log)
plt.show()
def prediciton(data_loader):
model.eval()
test_pred = torch.LongTensor()
for i, data in enumerate(data_loader):
data = data.to(device)
output = model(data)
pred = output.cpu().data.max(1, keepdim=True)[1]
test_pred = torch.cat((test_pred, pred), dim=0)
return test_pred
test_pred = prediciton(test_loader)
类似train,写一个预测的函数,返回预测的值。然后像是在EDA中那样,抽取测试集的8个数字,看看图像和预测结果的匹配情况
from torchvision.utils import make_grid
random_sel = np.random.randint(len(test_df), size=8)
data = (test_df.iloc[random_sel,:].values.reshape(-1,1,28,28)/255.)
grid = make_grid(torch.Tensor(data), nrow=8)
plt.rcParams['figure.figsize'] = (16, 2)
plt.imshow(grid.numpy().transpose((1,2,0)))
plt.axis('off')
plt.show()
print(*list(test_pred[random_sel].numpy()), sep = ', ')