搭建神经网络基础综合

Phoebe ·
更新时间:2024-11-11
· 754 次阅读

搭建神经网络基础综合

以下内容是根据torch官网和莫烦python学习所得

基本步骤 载入数据,训练集,预测集,标注集 搭建网络,即 class Net 实例化网络 net 创建 optimizer 确定损失函数 loss_func 开始训练 计算预测值 predict 计算损失函数值 loss 优化器 zerograd() 损失反馈 loss.backward() 优化器步进 optimizer.step() import torch from torch.autograd import Variable import torch.nn.functional as F import matplotlib.pyplot as plt import torch.utils.data as Data torch.manual_seed(1) # fake data. y = 2.863* x^2 + 5.652 * x + 3.423 x = torch.unsqueeze(torch.linspace(-5, 5, 5000), dim=1) y = 2.863 * x.pow(2) + 5.652 * x + 3.423 * (torch.rand(x.size()) - 0.5) # plt.scatter(x.numpy(), y.numpy()) # plt.show() # 参见批训练 torch_dataset = Data.TensorDataset(x, y) loader = Data.DataLoader( dataset=torch_dataset, batch_size=100, shuffle=True, num_workers=2, ) # create Net class Net(torch.nn.Module): def __init__(self): super(Net, self).__init__() self.hidden = torch.nn.Linear(1, 20) self.predict = torch.nn.Linear(20, 1) def forward(self, x): x = F.relu(self.hidden(x)) x = self.predict(x) return x if __name__ == '__main__': # 实例化网络对象 net_SGD = Net() net_Momentum = Net() net_RMSProp = Net() net_Adam = Net() nets = [net_SGD, net_Momentum, net_RMSProp, net_Adam] # 创建 optimizer opt_SGD = torch.optim.SGD(net_SGD.parameters(), lr=0.01) opt_Momentum = torch.optim.SGD(net_Momentum.parameters(), lr=0.01, momentum=0.8) opt_RMSProp = torch.optim.RMSprop(net_RMSProp.parameters(), lr=0.01, alpha=0.9) opt_Adam = torch.optim.Adam(net_Adam.parameters(), lr=0.01, betas=(0.9, 0.99)) optimizer = [opt_SGD, opt_Momentum, opt_RMSProp, opt_Adam] # 创建损失函数 loss_func = torch.nn.MSELoss() loss_net = [[], [], [], []] for epoch in range(100): # 整套数据训练100次 for step, (batch_x, batch_y) in enumerate(loader): # 每次取100个样本训练 for net, opt, loss_opt in zip(nets, optimizer, loss_net): # 分别用四个网络训练 predict = net(batch_x) loss = loss_func(predict, batch_y) opt.zero_grad() loss.backward() opt.step() loss_opt.append(loss.data.numpy()) # 将损失函数的值储存起来 labels = ['SGD', 'Momentum', 'RMSprop', 'Adam'] for i, l_his in enumerate(loss_net): plt.plot(l_his, label=labels[i]) plt.legend(loc='best') plt.xlabel('Steps') plt.ylabel('Loss') plt.ylim((0, 1100)) plt.show() # 以下是不采用 batch 的方法 # for t in range(1000): # for net, opt, loss_opt in zip(nets, optimizer, loss_net): # predict = net(x) # loss = loss_func(predict, y) # opt.zero_grad() # loss.backward() # opt.step() # loss_opt.append(loss.data.numpy()) # labels = ['SGD', 'Momentum', 'RMSprop', 'Adam'] # for i, l_his in enumerate(loss_net): # plt.plot(l_his, label=labels[i]) # plt.legend(loc='best') # plt.xlabel('Steps') # plt.ylabel('Loss') # plt.ylim((0, 1100)) # plt.show()
作者:wq_151



网络基础 神经网络

需要 登录 后方可回复, 如果你还没有账号请 注册新账号
相关文章