pytorch复习
Posted 钟钟终
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了pytorch复习相关的知识,希望对你有一定的参考价值。
基本函数:
dir()函数:让我们知道此工具箱以及此工具箱中有什么东西
help()函数:能让我们知道每个工具的使用方法
蚂蚁、蜜蜂数据集
目录操作,以及基本数据集的制作
from torch.utils.data import Dataset
import cv2
import os
from PIL import Image
class MyData(Dataset):
def __init__(self, root_dir, label_dir):
self.root_dir = root_dir
self.label_dir = label_dir
self.path = os.path.join(self.root_dir, self.label_dir)
self.img_path = os.listdir(self.path)
def __getitem__(self, idx):
img_name = self.img_path[idx]
img_item_path = os.path.join(self.root_dir, self.label_dir, img_name)
img = Image.open(img_item_path)
label = self.label_dir
return img, label
def __len__(self):
return len(self.img_path)
root_dir = "hymenoptera_data/train"
ants_label_dir = "ants"
bees_label_dir = "bees"
ants_dataset = MyData(root_dir, ants_label_dir)
bees_dataset = MyData(root_dir, bees_label_dir)
train_dataset=ants_dataset + bees_dataset
DataSet
使用官方数据集进行下载
import torchvision
from torch.utils.tensorboard import SummaryWriter
dataset_transform=torchvision.transforms.Compose(
torchvision.transforms.ToTensor()
)
train_set=torchvision.datasets.CIFAR10(root="./dataset",transform=dataset_transform,train=True,download=True)
test_set=torchvision.datasets.CIFAR10(root="./dataset",transform=dataset_transform,train=False,download=True)
writer=SummaryWriter("p10")
for i in range(10):
img,target=test_set[i]
writer.add_image("test_set",img,i)
writer.close()
Dataloader
import torchvision
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter("loader")
test_set = torchvision.datasets.CIFAR10(root="./dataset",
transform=torchvision.transforms.ToTensor(),
train=False, download=True)
test_loader = DataLoader(dataset=test_set, batch_size=64,
shuffle=False, num_workers=0,
drop_last=True)
# 测试数据集中第一张图片以及target
img, target = test_set[0]
print(img.shape)
print(target)
# 训练
for epoch in range(2):
step = 0
for data in test_loader:
imgs, targets = data
writer.add_images(f"Epoch:epoch", imgs, step)
step += 1
writer.close()
神经网络
eg:
import torch
import torch.nn as nn
class Model(nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self,input):
output=input+1
return output
model=Model()
x=torch.tensor(1.0)
output=model(x)
print(output)
卷积核:
import torch
import torch.nn as nn
import torch.nn.functional as F
input=torch.tensor([[1,2,0,3,1],
[0,1,2,3,1],
[1,2,1,0,0],
[5,2,3,1,1],
[2,1,0,1,1]])
# 卷积核
kernel=torch.tensor([[1,2,1],
[0,1,0],
[2,1,0]])
# 调整格式
input=torch.reshape(input,(1,1,5,5))
kernel=torch.reshape(kernel,(1,1,3,3))
print(input.shape)
print(kernel.shape)
output=F.conv2d(input,kernel,stride=1)
print(output)
output=F.conv2d(input,kernel,stride=1,padding=1)
print(output)
nn.Conv2d:
import torch
import torchvision
from torch import nn
from torch.nn import Conv2d
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
test_dataset = torchvision.datasets.CIFAR10("./dataset", train=False, transform=torchvision.transforms.ToTensor(),
download=True)
test_load = DataLoader(test_dataset, batch_size=64)
# 模型
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv1 = Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1, padding=0) # 初始化中,有了一个卷积层
def forward(self, x):
x = self.conv1(x)
return x
model = Model()
writer=SummaryWriter("logs")
step = 0
for data in test_load:
imgs, targets = data
output = model(imgs)
print(imgs.shape)
print(output.shape)
# torch.Size([64, 3, 32, 32])
writer.add_images("input",imgs,step)
# torch.Size([64, 6, 30, 30]) -> [xxx,3,30,30]
output=torch.reshape(output,(-1,3,30,30))
print(output.shape)
print("----------------------")
writer.add_images("output",output,step)
step+=1
最大池化:
import torch
import torchvision
from torch import nn
from torch.nn import MaxPool2d
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
# input=torch.tensor([[1,2,0,3,1],
# [0,1,2,3,1],
# [1,2,1,0,0],
# [5,2,3,1,1],
# [2,1,0,1,1]],dtype=torch.float32)
# input=torch.reshape(input,(-1,1,5,5))
# print(input.shape)
dataset=torchvision.datasets.CIFAR10("./dataset",train=False,download=True,
transform=torchvision.transforms.ToTensor())
dataloader=DataLoader(dataset,batch_size=64)
writer=SummaryWriter("logs_maxpool")
class Model(nn.Module):
def __init__(self):
super(Model,self).__init__()
self.maxpool1=MaxPool2d(kernel_size=3,ceil_mode=True)
def forward(self,input):
output=self.maxpool1(input)
return output
model=Model()
step=0
for data in dataloader:
imgs,targets=data
writer.add_images("input",imgs,step)
output=model(imgs)
writer.add_images("output",output,step)
step+=1
writer.close()
非线性:
ReLU()、Sigmoid()函数
import torch
import torchvision
from torch import nn
from torch.nn import MaxPool2d, ReLU,Sigmoid
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset=torchvision.datasets.CIFAR10("./dataset",train=False,download=True,
transform=torchvision.transforms.ToTensor())
dataloader=DataLoader(dataset,batch_size=64)
writer=SummaryWriter("logs_relu")
class Model(nn.Module):
def __init__(self):
super(Model,self).__init__()
self.relu=ReLU()
self.sigmoid=Sigmoid()
def forward(self,input):
output=self.sigmoid(input)
return output
model=Model()
step=0
for data in dataloader:
imgs,targets=data
writer.add_images("input",imgs,step)
output=model(imgs)
writer.add_images("output",output,step)
step+=1
writer.close()
线性层以及其他层的介绍
import torch
import torchvision
from torch import nn
from torch.nn import MaxPool2d, ReLU,Sigmoid
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
dataset=torchvision.datasets.CIFAR10("./dataset",
train=False,
download=True,
transform=torchvision.transforms.ToTensor())
dataloader=DataLoader(dataset,batch_size=64,drop_last=True)
class Model(nn.Module):
def __init__(self):
super(Model,self).__init__()
self.linear=nn.Linear(196608,10)
def forward(self,input):
output=self.linear(input)
return output
model=Model()
for data in dataloader:
imgs,targets=data
print(imgs.shape)
output=torch.flatten(imgs)
print(output.shape)
output=model(output)
print(output.shape)
以上是关于pytorch复习的主要内容,如果未能解决你的问题,请参考以下文章