PyTorch学习笔记之nn的简单实例
Posted Joyce_song94
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了PyTorch学习笔记之nn的简单实例相关的知识,希望对你有一定的参考价值。
method 1
1 import torch 2 from torch.autograd import Variable 3 4 N, D_in, H, D_out = 64, 1000, 100, 10 5 x = Variable(torch.randn(N, D_in)) 6 y = Variable(torch.randn(N, D_out), requires_grad=False) 7 8 # define our model as a sequence of layers 9 model = torch.nn.Sequential( 10 torch.nn.Linear(D_in, H), 11 torch.nn.ReLU(), 12 torch.nn.Linear(H, D_out)) 13 # nn defines common loss functions 14 loss_fn = torch.nn.MSELoss(size_average=False) 15 16 learning_rate = 1e-4 17 18 for t in range(500): 19 # forward pass: feed data to model, and prediction to loss function 20 y_pred = model(x) 21 loss = loss_fn(y_pred, y) 22 23 # backward pass: compute all gradients 24 model.zero_grad() 25 loss.backward() 26 27 # make gradient step on each model parameter 28 for param in model.parameters(): 29 param.data -= learning_rate * param.grad.data
method 2
1 import torch 2 from torch.autograd import Variable 3 4 N, D_in, H, D_out = 64, 1000, 100, 10 5 x = Variable(torch.randn(N, D_in)) 6 y = Variable(torch.randn(N, D_out), requires_grad=False) 7 8 # define our model as a sequence of layers 9 model = torch.nn.Sequential( 10 torch.nn.Linear(D_in, H), 11 torch.nn.ReLU(), 12 torch.nn.Linear(H, D_out)) 13 # nn defines common loss functions 14 loss_fn = torch.nn.MSELoss(size_average=False) 15 16 learning_rate = 1e-4 17 # use an optimizer for different update rules 18 optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) 19 20 for t in range(500): 21 # forward pass: feed data to model, and prediction to loss function 22 y_pred = model(x) 23 loss = loss_fn(y_pred, y) 24 25 # backward pass: compute all gradients 26 model.zero_grad() 27 loss.backward() 28 29 # update all parameters after computing gradients 30 optimizer.step()
PyTorch nn Define new Modules
1 import torch 2 from torch.autograd import Variable 3 import torch.nn as nn 4 5 # define our whole model as a single Module 6 class TwoLayerNet(nn.Module): 7 # Initializer sets up two children (Modules can contain modules) 8 def _init_(self, D_in, H, D_out): 9 super(TwoLayerNet, self)._init_() 10 self.linear1 = torch.nn.Linear(D_in, H) 11 self.linear2 = torch.nn.Linear(H, D_out) 12 13 # Define forward pass using child modules and autograd ops on Variables 14 # No need to define backward - autograd will handle it 15 def forward(self, x): 16 h_relu = self.linear1(x).clamp(min=0) 17 y_pred = self.linear2(h_relu) 18 return y_pred 19 20 N, D_in, H, D_out = 64, 1000, 100, 10 21 x = Variable(torch.randn(N, D_in)) 22 y = Variable(torch.randn(N, D_out), requires_grad=False) 23 24 # Construct and train an instance of our model 25 model = TwoLayerNet(D_in, H, D_out) 26 27 criterion = torch.nn.MSELoss(size_average=False) 28 optimizer = torch.optim.SGD(model.parameters(), lr=1e-4) 29 for t in range(500): 30 y_pred = model(x) 31 loss = criterion(y_pred, y) 32 33 model.zero_grad() 34 loss.backward() 35 optimizer.step()
以上是关于PyTorch学习笔记之nn的简单实例的主要内容,如果未能解决你的问题,请参考以下文章