线性回归
import warnings
warnings.filterwarnings("ignore")
import torch
from torch.autograd import Variable
x_data = Variable(torch.Tensor([[1.0,2.0,0.5], [2.0,4.0,0.3], [3.0,6.0,0.1]]))
y_data = Variable(torch.Tensor([[1.5], [1.8], [0.9]]))
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(3, 1)
def forward(self, x):
y_pred = self.linear(x)
return y_pred
model = Model()
criterion = torch.nn.MSELoss(size_average=False)
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
for epoch in range(100):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 10 ==0 :
print(epoch, loss.item())
0 34.39658737182617
10 1.4057011604309082
20 1.149032473564148
30 0.9488809704780579
40 0.7927986979484558
50 0.6710829138755798
60 0.5761667490005493
70 0.5021493434906006
80 0.4444289803504944
90 0.3994176983833313
model.eval()
test_data = Variable(torch.Tensor([[4.0,8.0,0.1]]))
predicted = model(test_data).detach().numpy()[0][0]
print(predicted)
1.4094565