Logistic Regression,解决分类问题
Logistic函数——饱和函数
logistic 回归代码:
#sigmoid函数满足以下条件:
#1.都是单调的增函数
# 2.都有上下限,0-1
# 3.都满足饱和函数
import torch
import torch.nn.functional as F
#准备数据
x_data = torch.Tensor([[1.0],[2.0],[3.0]])
y_data = torch.Tensor([[0],[0],[1]])
#构建logist回归模型
class LogisticRegressionModel(torch.nn.Module):
def __init__(self):
super(LogisticRegressionModel,self).__init__()
#线性回归
self.linear = torch.nn.Linear(1,1)
def forward(self,x):
#定义前向传播,用sigmoid激活函数
#self.linear(x)线性变换
#sigmoid(self.linear(x)) 进行非线性变换
y_pred = F.sigmoid(self.linear(x))
return y_pred
model = LogisticRegressionModel()
#二分类交叉熵的损失函数 size_average是否求损失值的均值
criterion = torch.nn.BCELoss(size_average=False)
#构建SGD优化器
optimizer = torch.optim.SGD(model.parameters(),lr=0.01)
#训练
for epoch in range(1000):
y_pred = model(x_data)
loss = criterion(y_pred,y_data)
print(epoch,loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('progress:', epoch, loss.item())
x_test = torch.Tensor([[8.0]])
y_test = model(x_test)
print('y_pred=',y_test.data)
为什么要进行批标准化呢?
最后模型的学习能力并不是越强越好,越强会将噪声也学习到,对真实的估计值产生偏移,所以学习能力需要具有一定的泛化能力才是最好的。
创建数据:
设计模型:
构建损失函数和优化器
训练:
不同的激活函数:
import torch
import numpy as np
import torch.nn.functional as F
# 准备数据
xy = np.loadtxt('diabetes.csv.gz', deliniter='', dtype=np.float32)
x_data = torch.from_numpy(xy[:, :-1]) # 除最后一列的所有行列数据
y_data = torch.from_numpy(xy[:, [-1]]) # 取最后一列,用[]形成一个矩阵
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear1 = torch.nn.Linear(8, 6)
self.linear2 = torch.nn.Linear(6, 4)
self.linear3 = torch.nn.Linear(4, 1)
# sigmoid 给函数构建非线性变换
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
x = self.sigmoid(self.linear1(x))
x = self.sigmoid(self.linear2(x))
x = self.sigmoid(self.linear3(x))
return x
model = Model()
# 损失函数
criterion = torch.nn.BCELoss(size_average=True)
# 优化器
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
for epoch in range(100):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
print(epoch, loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
Mini-Batch
Epoch:所有样本都进行了一次训练。
Batch-Size:每次训练所用的样本数量。
Iteration:batch的数量,内层的迭代一共进行的次数
import torch
import numpy as np
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
class DiabetesDataset(Dataset):
def __init__(self,filepath):
xy = np.loadtxt(filepath,delimiter='',dtype=np.float32)
self.len = xy.shape[0]
self.x_data = torch.from_numpy(xy[:,:-1])
self.y_data = torch.from_numpy(xy[:,[-1]])
def __getitem__(self, index):
return self.x_data[index],self.y_data[index]
def __len__(self):
return self.len
dataset = DiabetesDataset('diabetes.csv.gz')
train_loader = DataLoader(dataset=dataset,
batch_size=32,
shuffle=True,
num_workers=2)
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear1 = torch.nn.Linear(8, 6)
self.linear2 = torch.nn.Linear(6, 4)
self.linear3 = torch.nn.Linear(4, 1)
# sigmoid 给函数构建非线性变换
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
x = self.sigmoid(self.linear1(x))
x = self.sigmoid(self.linear2(x))
x = self.sigmoid(self.linear3(x))
return x
model = Model()
# 损失函数
criterion = torch.nn.BCELoss(size_average=True)
# 优化器
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
for epoch in range(100):
for i,data in enumerate(train_loader,0):
inputs,labels =data
y_pred = model(inputs)
loss = criterion(y_pred,labels)
print(epoch,i,loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()