Pytorch 中的简单数据召回 RNN

Posted

技术标签:

【中文标题】Pytorch 中的简单数据召回 RNN【英文标题】:Simple Data recall RNN in Pytorch 【发布时间】:2019-08-09 01:06:35 【问题描述】:

我正在学习 Pytorch,并正在尝试制作一个可以记住以前输入的网络。 我尝试了 2 种不同的输入/输出结构(见下文),但没有得到任何按我想要的方式工作。

输入 1:

在:[4,2,7,8]

输出[[0,0,4],[0,4,2],[4,2,7],[2,7,8]]

代码:

def histroy(num_samples=4,look_back=3):
    data=np.random.randint(10,size=(num_samples)).tolist()
    lab=[[0]*look_back]
    for i in data:
        lab.append(lab[-1][1:]+[i])
    return data,lab[1:]

输入2:

在:[4,2,7,8]

输出:[0,4,2,7]

def histroy(num_samples=4):
    data=np.random.randint(10,size=(num_samples)).tolist()
    lab=[0]
    for i in data:
        lab.append(i)
    return data,lab

我尝试了许多不同的网络结构和训练方法,但似乎没有任何效果。

我认为我唯一正确的是 net.hidden = net.init_hidden() 应该超出每个时代和 loss.backward(retain_graph=True) 但这似乎没有任何作用

目前,它可以学习序列中的最后一个数字,但似乎永远不会学习其他数字

我的最后一次尝试:

import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim

def histroy(num_samples=4,look_back=3):
    data=np.random.randint(10,size=(num_samples)).tolist()
    lab=[[0]*look_back]
    for i in data:
        lab.append(lab[-1][1:]+[i])
    return data,lab[1:]

class Net(nn.Module):
    def __init__(self, input_dim, hidden_dim, batch_size, output_dim=10, num_layers=1):
        super(Net, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.batch_size = batch_size
        self.num_layers = num_layers

        self.memory = nn.RNN(self.input_dim,self.hidden_dim,self.num_layers)
        self.linear = nn.Linear(self.hidden_dim, output_dim)
        self.first=True


    def init_hidden(self):
        # This is what we'll initialise our hidden state as
        return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim),
                torch.zeros(self.num_layers, self.batch_size, self.hidden_dim))

    def forward(self, input):
        self.memory_out, self.hidden = self.memory(input.view(len(input), self.batch_size, -1))
        y_pred = self.linear(self.memory_out[-1].view(self.batch_size, -1))
        return y_pred.view(-1)


if __name__ == '__main__':
    data_amount = 10000
    batch_size = 1  # default is 32
    data_amount-=data_amount%batch_size
    number_of_times_on_the_same_data = 250
    look_back=5

    net=Net(input_dim=1,hidden_dim=25,batch_size=batch_size,output_dim=look_back)
    data,labs=histroy(data_amount,look_back)
    data = torch.Tensor(data).float()
    labs = torch.Tensor(labs).float()


    optimizer = optim.Adam(net.parameters())
    criterion = torch.nn.MSELoss(size_average=False)

    for epoch in range(number_of_times_on_the_same_data):  # loop over the dataset multiple times
        running_loss = 0.0
        data, labs = histroy(data_amount, look_back)
        data = torch.Tensor(data).float()
        labs = torch.Tensor(labs).float()
        net.hidden = net.init_hidden()
        print("epoch",epoch)
        for i in range(0, data_amount, batch_size):
            inputs = data[i:i + batch_size]
            labels = labs[i:i + batch_size]
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = net(inputs)

            loss = criterion(outputs, labels)
            loss.backward(retain_graph=True)
            optimizer.step()
            running_loss += loss.item()


            if i >= data_amount-batch_size:
                print("loss",loss)
                net.hidden = net.init_hidden()
                print("Outputs",outputs)
                print("Input", data[-1*look_back:])
                print("labels",labels)

【问题讨论】:

【参考方案1】:

您的网络出现的问题是您的输入是形状 1:

for i in range(0, data_amount, batch_size):
            inputs = data[i:i + batch_size]
            labels = labs[i:i + batch_size]   
            print(inputs.shape,labels.shape)

>>>torch.Size([1]) torch.Size([1, 5])
>>>torch.Size([1]) torch.Size([1, 5])...

这就是你的 RNN 只预测你的最后一个数字的原因,因为在这种情况下你没有使用你的 look_back 属性。您必须修复代码才能获得大小为 [1,5] 的输入。您的代码应如下所示:

import torch
import numpy as np
import torch.nn as nn
import torch.optim as optim

def histroy(num_samples=4,look_back=3):
    data=np.random.randint(10,size=(num_samples)).tolist()
    lab=[[0]*look_back]
    for i in data:
        lab.append(lab[-1][1:]+[i])
    return lab[:-1],lab[1:]

class Net(nn.Module):
    def __init__(self, input_dim, hidden_dim, batch_size, output_dim=10, num_layers=1):
        super(Net, self).__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.batch_size = batch_size
        self.num_layers = num_layers

        self.memory = nn.RNN(self.input_dim,self.hidden_dim,self.num_layers)
        self.linear = nn.Linear(self.hidden_dim, output_dim)
        self.first=True


    def init_hidden(self):
        # This is what we'll initialise our hidden state as
        return (torch.zeros(self.num_layers, self.batch_size, self.hidden_dim),
                torch.zeros(self.num_layers, self.batch_size, self.hidden_dim))

    def forward(self, input):
        self.memory_out, self.hidden = self.memory(input.view(len(input), self.batch_size, -1))
        y_pred = self.linear(self.memory_out[-1].view(self.batch_size, -1))
        return y_pred.view(-1)


if __name__ == '__main__':
    data_amount = 10000
    batch_size = 1  # default is 32
    data_amount-=data_amount%batch_size
    number_of_times_on_the_same_data = 250
    look_back=5

    net=Net(input_dim=1,hidden_dim=25,batch_size=batch_size,output_dim=look_back)
    data,labs=histroy(data_amount,look_back)
    data = torch.Tensor(data).float()
    labs = torch.Tensor(labs).float()


    optimizer = optim.Adam(net.parameters())
    criterion = torch.nn.MSELoss(size_average=False)

    for epoch in range(number_of_times_on_the_same_data):  # loop over the dataset multiple times
        running_loss = 0.0
        data, labs = histroy(data_amount, look_back)
        data = torch.Tensor(data).float()
        labs = torch.Tensor(labs).float()
        net.hidden = net.init_hidden()
        print("epoch",epoch)
        for i in range(0, data_amount, batch_size):
            inputs = data[i:i + batch_size].view(-1)
            labels = labs[i:i + batch_size]
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = net(inputs)

            loss = criterion(outputs, labels)
            loss.backward(retain_graph=True)
            optimizer.step()
            running_loss += loss.item()


            if i >= data_amount-batch_size:
                print("loss",loss)
                net.hidden = net.init_hidden()
                print("Outputs",outputs)
                print("Input", data[i:i + batch_size][-1])
                print("labels",labels)

输出:

>>>epoch 0
>>>loss tensor(17.7415, grad_fn=<MseLossBackward>)
>>>Outputs tensor([2.0897, 3.1410, 4.7382, 1.0532, 4.2003], grad_fn=<ViewBackward>)
>>>Input tensor([8., 2., 3., 5., 1.])
>>>labels tensor([[2., 3., 5., 1., 0.]])...

【讨论】:

以上是关于Pytorch 中的简单数据召回 RNN的主要内容,如果未能解决你的问题,请参考以下文章

你能解释一下提供的例子中的分类报告(召回率和精度)吗?

关于推荐中的召回和实时推荐思考

关于推荐中的召回和实时推荐思考

模型中的精确率和召回率是相同的

PyTorch基础——使用神经网络识别文字中的情感信息

精确率和召回率