PyTorch NotImplementedError 转发
Posted
技术标签:
【中文标题】PyTorch NotImplementedError 转发【英文标题】:PyTorch NotImplementedError in forward 【发布时间】:2019-02-13 22:29:36 【问题描述】:import torch
import torch.nn as nn
device = torch.device('cuda' if torch.cuda.is_available() else
'cpu')
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.layer = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2), # 16x16x650
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1), # 32x16x650
nn.ReLU(),
nn.Dropout2d(0.5),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), # 64x16x650
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2), # 64x8x325
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU()) # 64x8x325
self.fc = nn.Sequential(
nn.Linear(64*8*325, 128),
nn.ReLU(),
nn.Linear(128, 256),
nn.ReLU(),
nn.Linear(256, 1),
)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
# HYPERPARAMETER
learning_rate = 0.0001
num_epochs = 15
import data
def main():
model = Model().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),
lr=learning_rate)
total_step = len(data.train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(data.train_loader):
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print('Epoch [/], Step [/], Loss: :.4f'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item()))
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in data.test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: %'.format(100 * correct / total))
if __name__ == '__main__':
main()
错误:
File "/home/rladhkstn8/Desktop/SWID/tmp/pycharm_project_853/model.py", line 82, in <module>
main()
File "/home/rladhkstn8/Desktop/SWID/tmp/pycharm_project_853/model.py", line 56, in main
outputs = model(images)
File "/home/rladhkstn8/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 477, in __call__
result = self.forward(*input, **kwargs)
File "/home/rladhkstn8/anaconda3/lib/python3.6/site-packages/torch/nn/modules/module.py", line 83, in forward
raise NotImplementedError
NotImplementedError
我不知道问题出在哪里。我知道应该实现NotImplementedError
,但是当有未实现的代码时就会发生。
【问题讨论】:
如果您尝试通过ModuleList
而不是Sequential
呼叫前转,也会发生这种情况
【参考方案1】:
只需在 Model 类中取消缩进你的 forward 方法。
像这样:
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.layer = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2), # 16x16x650
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1), # 32x16x650
nn.ReLU(),
nn.Dropout2d(0.5),
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1), # 64x16x650
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2), # 64x8x325
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.ReLU()) # 64x8x325
self.fc = nn.Sequential(
nn.Linear(64*8*325, 128),
nn.ReLU(),
nn.Linear(128, 256),
nn.ReLU(),
nn.Linear(256, 1),
)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
【讨论】:
【参考方案2】:请仔细查看您的__init__
函数的indentation:您的forward
是__init__
的一部分,而不是您的模块的一部分。
【讨论】:
我不明白你的意思。一个代码示例会很有帮助。 @samisnotinsane 如果您要从定义__init__
的位置垂直握住标尺并让它垂直向下运行您的代码,则应该在标尺碰到它的线的位置定义forward
。相反,您的从标尺缩进一个制表符,即标尺和forward
之间有一个制表符的空格。您使用两个制表符缩进了def forward
,而不是像def __init__
这样的一个制表符。这意味着您在__init__
中定义了forward
,当它是它自己的方法时,独立于__init__
。【参考方案3】:
当您没有从超类中实现所需的方法时,会发生此错误,在我的情况下,我在函数名称前有拼写错误。我建议你检查你的代码缩进。
【讨论】:
以上是关于PyTorch NotImplementedError 转发的主要内容,如果未能解决你的问题,请参考以下文章