Pytorch CIFAR10图像分类 工具函数utils篇
Posted 风信子的猫Redamancy
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了Pytorch CIFAR10图像分类 工具函数utils篇相关的知识,希望对你有一定的参考价值。
Pytorch CIFAR10图像分类 工具函数utils篇
这里贴一下汇总篇: 汇总篇
虽然我代码给到了github上,但是还是有很多人问我的utils.py文件的代码是什么,为了方便,我直接给出来吧,方便大家查阅
得到分类的概率
import torch
import torch.nn as nn
import time
import matplotlib.pyplot as plt
from torch.utils.tensorboard import SummaryWriter
def get_acc(outputs, label):
total = outputs.shape[0]
probs, pred_y = outputs.data.max(dim=1) # 得到概率
correct = (pred_y == label).sum().data
return correct / total
旧版的训练函数
旧版的的训练函数没有scheduler参数,所以不会有学习率的调整变化的函数
参数介绍
- net : 所选的网络模型
- trainloader: 训练集加载器
- testloader: 测试集加载器
- epoches:训练次数
- optimizer:优化器
- criterion:损失函数
- writer:是否使用tensorboard可视化,默认为None
- verbose:是否使用记录准确率,损失值,学习率,默认为True
def old_train(net, trainloader, testloader, epoches, optimizer , criterion, writer = None , verbose = True):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0
train_acc_list, test_acc_list = [],[]
train_loss_list, test_loss_list = [],[]
lr_list = []
for i in range(epoches):
start = time.time()
train_loss = 0
train_acc = 0
test_loss = 0
test_acc = 0
if torch.cuda.is_available():
net = net.to(device)
net.train()
for step,data in enumerate(trainloader,start=0):
im,label = data
im = im.to(device)
label = label.to(device)
optimizer.zero_grad()
# 释放内存
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
# formard
outputs = net(im)
loss = criterion(outputs,label)
# backward
loss.backward()
# 更新参数
optimizer.step()
train_loss += loss.data
# probs, pred_y = outputs.data.max(dim=1) # 得到概率
# # 正确的个数
# train_acc += (pred_y==label).sum().item()
# # 总数
# total += label.size(0)
train_acc += get_acc(outputs,label)
# 打印下载进度
rate = (step + 1) / len(trainloader)
a = "*" * int(rate * 50)
b = "." * (50 - int(rate * 50))
print('\\r train :3d|:3d :^3.0f% [->] '.format(i+1,epoches,int(rate*100),a,b),end='')
train_loss = train_loss / len(trainloader)
train_acc = train_acc * 100 / len(trainloader)
if verbose:
train_acc_list.append(train_acc.item())
train_loss_list.append(train_loss.item())
# print('train_loss::.6f train_acc::3.2f%' .format(train_loss ,train_acc),end=' ')
# 记录学习率
lr = optimizer.param_groups[0]['lr']
if verbose:
lr_list.append(lr)
# 更新学习率
# scheduler.step(train_loss)
if testloader is not None:
net.eval()
with torch.no_grad():
for step,data in enumerate(testloader,start=0):
im,label = data
im = im.to(device)
label = label.to(device)
# 释放内存
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
outputs = net(im)
loss = criterion(outputs,label)
test_loss += loss.data
# probs, pred_y = outputs.data.max(dim=1) # 得到概率
# test_acc += (pred_y==label).sum().item()
# total += label.size(0)
test_acc += get_acc(outputs,label)
rate = (step + 1) / len(testloader)
a = "*" * int(rate * 50)
b = "." * (50 - int(rate * 50))
print('\\r test :3d|:3d :^3.0f% [->] '.format(i+1,epoches,int(rate*100),a,b),end='')
test_loss = test_loss / len(testloader)
test_acc = test_acc * 100 / len(testloader)
if verbose:
test_loss_list.append(test_loss.item())
test_acc_list.append(test_acc.item())
end = time.time()
print(
'\\rEpoch [:>3d/:>3d] Train Loss::>.6f Train Acc::>3.2f% Test Loss::>.6f Test Acc::>3.2f% Learning Rate::>.6f'.format(
i + 1, epoches, train_loss, train_acc, test_loss, test_acc,lr), end='')
else:
end = time.time()
print('\\rEpoch [:>3d/:>3d] Train Loss::>.6f Train Acc::>3.2f% Learning Rate::>.6f'.format(i+1,epoches,train_loss,train_acc,lr),end = '')
time_ = int(end - start)
h = time_ / 3600
m = time_ % 3600 /60
s = time_ % 60
time_str = "\\tTime %02d:%02d" % ( m, s)
# ====================== 使用 tensorboard ==================
if writer is not None:
writer.add_scalars('Loss', 'train': train_loss,
'valid': test_loss, i+1)
writer.add_scalars('Acc', 'train': train_acc ,
'valid': test_acc, i+1)
writer.add_scalars('Learning Rate',lr,i+1)
# =========================================================
# 打印所用时间
print(time_str)
# 如果取得更好的准确率,就保存模型
Acc =
Loss =
Acc['train_acc'] = train_acc_list
Acc['test_acc'] = test_acc_list
Loss['train_loss'] = train_loss_list
Loss['test_loss'] = test_loss_list
Lr = lr_list
return Acc, Loss, Lr
加强的训练函数
参数介绍
- net : 所选的网络模型
- trainloader: 训练集加载器
- testloader: 测试集加载器
- epoches:训练次数
- optimizer:优化器
- criterion:损失函数
- writer:是否使用tensorboard可视化,默认为None
- verbose:是否使用记录准确率,损失值,学习率,默认为True
- scheduler:学习率调整策略
- path:保存迭代次数中最优的模型的权重,默认为model.pth
def train(net, trainloader, testloader, epoches, optimizer , criterion, scheduler , path = './model.pth', writer = None ,verbose = False):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0
train_acc_list, test_acc_list = [],[]
train_loss_list, test_loss_list = [],[]
lr_list = []
for i in range(epoches):
start = time.time()
train_loss = 0
train_acc = 0
test_loss = 0
test_acc = 0
if torch.cuda.is_available():
net = net.to(device)
net.train()
for step,data in enumerate(trainloader,start=0):
im,label = data
im = im.to(device)
label = label.to(device)
optimizer.zero_grad()
# 释放内存
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
# formard
outputs = net(im)
loss = criterion(outputs,label)
# backward
loss.backward()
# 更新参数
optimizer.step()
train_loss += loss.data
# probs, pred_y = outputs.data.max(dim=1) # 得到概率
# # 正确的个数
# train_acc += (pred_y==label).sum().item()
# # 总数
# total += label.size(0)
train_acc += get_acc(outputs,label)
# 打印下载进度
rate = (step + 1) / len(trainloader)
a = "*" * int(rate * 50)
b = "." * (50 - int(rate * 50))
print('\\r train :3d|:3d :^3.0f% [->] '.format(i+1,epoches,int(rate*100),a,b),end='')
train_loss = train_loss / len(trainloader)
train_acc = train_acc * 100 / len(trainloader)
if verbose:
train_acc_list.append(train_acc.item())
train_loss_list.append(train_loss.item())
# print('train_loss::.6f train_acc::3.2f%' .format(train_loss ,train_acc),end=' ')
# 记录学习率
lr = optimizer.param_groups[0]['lr']
if verbose:
lr_list.append(lr)
# 更新学习率
scheduler.step(train_loss)
if testloader is not None:
net.eval()
with torch.no_grad():
for step,data in enumerate(testloader,start=0):
im,label = data
im = im.to(device)
label = label.to(device)
# 释放内存
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
outputs = net(im)
loss = criterion(outputs,label)
test_loss += loss.data
# probs, pred_y = outputs.data.max(dim=1) # 得到概率
# test_acc += (pred_y==label).sum().item()
# total += label.size(0)
test_acc += get_acc(outputs,label)
rate = (step + 1) / len(testloader)
a = "*" * int(rate * 50)
b = "." * (50 - int(rate * 50))
print('\\r test :3d|:3d :^3.0f% [->] '.format(i+1,epoches,int(rate*100),a,b),end='')
test_loss = test_loss / len(testloader)
test_acc = test_acc * 100 / len(testloader)
if verbose:
test_loss_list.append(test_loss.item())
test_acc_list.append(test_acc.item())
end = time.time()
print(
'\\rEpoch [:>3d/:>3d] Train Loss::>.6f Train Acc::>3.2f% Test Loss::>.6f Test Acc::>3.2f% Learning Rate::>.6f'.format(
i + 1, epoches, train_loss, train_acc, test_loss, test_acc,lr), end='')
else:
end = time.time()
print('\\rEpoch [:>3d/:>3d] Train Loss::>.6f Train Acc::>3.2f% Learning Rate::>.6f'.format(i+1,epoches,train_loss,train_acc,lr),end = '')
time_ = int(end - start)
h = time_ / 3600
m = time_ % 3600 /60
s = time_ % 60
time_str = "\\tTime %02d:%02d" % ( m, s)
# ====================== 使用 tensorboard ==================
if writer is not None:
writer.add_scalars('Loss', 'train': train_loss,
'valid': test_loss, i+1)
writer.add_scalars('Acc', 'train': train_acc ,
'valid': test_acc, i+1)
writer.add_scalars('Learning Rate',lr,i+1)
# =========================================================
# 打印所用时间
print(time_str)
# 如果取得更好的准确率,就保存模型
if test_acc > best_acc:
torch.save(net,path)
best_acc = test_acc
Acc =
Loss =
Acc['train_acc'] = train_acc_list
Acc['test_acc'] = test_acc_list
Loss['train_loss'] = train_loss_list
Loss['test_loss'] = test_loss_list
Lr = lr_list
return Acc, Loss, Lr
没有TensorBoard可视化的训练函数
def train2(net, trainloader, testloader, epoches, optimizer , criterion, scheduler , path = './model.pth', verbose = False):
device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0
train_acc_list, test_acc_list = [],[]
train_loss_list, test_loss_list = [],[]
lr_list = []
for i in range(epoches):
start = time.time()
train_loss = 0
train_acc = 0
test_loss = 0
test_acc = 0
if torch.cuda.is_available():
net = net.to(device)
net.train()
for step,data in enumerate(trainloader,start=0):
im,label = data
im = im.to(device)
label = label.to(device)
optimizer.zero_grad()
# 释放内存
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
# formard
outputs = net(im)
loss = criterion(outputs,label)
# backward
loss.backward()
# 更新参数
optimizer.step()
train_loss += loss.data
# probs, pred_y = outputs.data.max(dim=1) # 得到概率
# # 正确的个数
# train_acc += (pred_y==label).sum().item()
# # 总数
# total += label.size(0)
train_acc += get_acc(outputs,label)
# 打印下载进度
rate = (step + 1) / len(trainloader)
a = "*" * int(rate * 50)
b = "." * (50 - int(rate * 50))
print('\\r train :3d|:3d :^3.0f% [->] '.format(i+1,epoches,int(rate*100),a,b),end='')
train_loss = train_loss / len(trainloader)
train_acc = train_acc * 100 / 以上是关于Pytorch CIFAR10图像分类 工具函数utils篇的主要内容,如果未能解决你的问题,请参考以下文章