PyTorch学习RNN回归
Posted My heart will go ~~
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了PyTorch学习RNN回归相关的知识,希望对你有一定的参考价值。
RNN回归
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data as Data
import torchvision
import numpy as np
import matplotlib.pyplot as plt
torch.manual_seed(1) # reproducible
TIME_STEP=10#考虑多少时间点的数据,
INPUT_SIZE=1#每个时间点给RNN多少个数据点
LR = 0.02 # 学习率
DOWNLOAD_MNIST = False
# steps=np.linspace(0,np.pi*2,100,dtype=np.float32)#生成一列数据,0-2pi的100个均匀分布的数据
# x_np=np.sin(steps)
# y_np=np.cos(steps)
# plt.plot(steps,y_np,'r-',label='target(cos)')
# plt.plot(steps,x_np,'b-',label='input(sin)')
# plt.legend(loc='best')
# plt.show()
class RNN(nn.Module):
def __init__(self):
super(RNN,self).__init__()
self.rnn=nn.RNN(
input_size=INPUT_SIZE,#1
hidden_size=32,#多少个神经元
num_layers=1,
batch_first=True,
)
self.out=nn.Linear(32,1)
def forward(self,x,h_state):
r_out,h_state=self.rnn(x,h_state)
outs=[]
for time_step in range(r_out.size(1)):
outs.append(self.out(r_out[:,time_step,:]))
return torch.stack(outs,dim=1),h_state
rnn=RNN()
print(rnn)
#优化
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.MSELoss()
h_state=None
for step in range(60): # gives batch data
start,end =step*np.pi,(step+1)*np.pi
steps=np.linspace(start,end,TIME_STEP,dtype=np.float32)
x_np = np.sin(steps) # float32 for converting torch FloatTensor
y_np = np.cos(steps)
x = Variable(torch.from_numpy(x_np[np.newaxis, :, np.newaxis])) # shape (batch, time_step, input_size)
y = Variable(torch.from_numpy(y_np[np.newaxis, :, np.newaxis]))
prediction, h_state = rnn(x, h_state) # rnn 对于每个 step 的 prediction, 还有最后一个 step 的 h_state
h_state = Variable(h_state.data) # 要把 h_state 重新包装一下才能放入下一个 iteration, 不然会报错
loss = loss_func(prediction, y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step()
以上是关于PyTorch学习RNN回归的主要内容,如果未能解决你的问题,请参考以下文章
利用knn svm cnn 逻辑回归 mlp rnn等方法实现mnist数据集分类(pytorch实现)