pytorch 入门案例1 最基础模型 a+b=c

import torch
import torch.nn as nn
from torch.optim import SGD
import matplotlib.pyplot as plt

##################初始化输入输出参数########################
x = [[1,2],[3,4],[5,6],[7,8]]
y = [[3],[7],[11],[15]]

x =torch.tensor(x).float()
y=torch.tensor(y).float()

device=("cuda" if torch.cuda.is_available() else "cpu")
x=x.to(device)
y=y.to(device)

##################定义网络########################

class mynet(nn.Module):
    def __init__(self):
        super(mynet,self).__init__()
        self.in_hidden = nn.Linear(2,8)
        self.sig = nn.Sigmoid()
        self.hidden_out = nn.Linear(8,1)

    def forward(self,x):
        x = self.in_hidden(x)
        x = self.sig(x)
        x = self.hidden_out(x)

        return x


model0 = mynet()
model0=model0.to(device)

#以这种方式,可以看到每一层神经网络的权重,作为一个模型搭建的验证
print(model0.in_hidden.weight)

##################定义损失函数及优化器########################

#线性连续结果用MSE
loss_func =nn.MSELoss()
opt = SGD(model0.parameters(),lr=0.01)

loss_vale=[]
##################训练########################
def train(model,x,y,times):
    for _ in range(times):
        opt.zero_grad()
        loss = loss_func(model(x),y)
        loss.backward()
        opt.step()
        print(loss.item())

train(model0,x,y,1000)





你可能感兴趣的:(pytorch,pytorch)