pytorch

Posted So Youth

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了pytorch相关的知识,希望对你有一定的参考价值。

PyTorch基础

import torch
torch.__version__ #return '1.13.1+cu116'

基本使用方法

矩阵

x = torch.empty(5, 3)
		tensor([[1.4586e-19, 1.1578e+27, 2.0780e-07],
		        [6.0542e+22, 7.8675e+34, 4.6894e+27],
		        [1.6217e-19, 1.4333e-19, 2.7530e+12],
		        [7.5338e+28, 8.1173e-10, 4.3861e-43],
		        [2.8912e-03, 4.3861e-43, 2.8912e-03]])
		        

随机值

x = torch.rand(5, 3)#5行三列的随机值
		tensor([[0.1511, 0.6433, 0.1245],
		        [0.8949, 0.8577, 0.3564],
		        [0.7810, 0.5037, 0.7101],
		        [0.1997, 0.4917, 0.1746],
		        [0.4288, 0.9921, 0.4862]])		

初始化一个全零的矩阵

x = torch.zeros(5, 3, dtype=torch.long)
		tensor([[0, 0, 0],
		        [0, 0, 0],
		        [0, 0, 0],
		        [0, 0, 0],
		        [0, 0, 0]])

直接传入数据

x = torch.tensor([5.5, 3])
tensor([5.5000, 3.0000])
x = x.new_ones(5, 3, dtype=torch.double)    
		tensor([[1., 1., 1.],
		        [1., 1., 1.],
		        [1., 1., 1.],
		        [1., 1., 1.],
		        [1., 1., 1.]], dtype=torch.float64)		
x = torch.randn_like(x, dtype=torch.float) #返回一个x大小相同的张量,其由均值为0、方差为1的标准正态分布填充
		tensor([[ 0.6811, -1.2104, -1.2676],
		        [-0.3295,  0.1155, -0.5736],
		        [-1.3656, -0.4973, -0.7043],
		        [-1.3670, -0.3296,  3.1743],
		        [ 1.3443,  0.3373,  0.6182]])   
		        

展示矩阵大小

x.size()
		torch.Size([5, 3])

基本计算方法

y = torch.rand(5, 3)#随机5行三列矩阵
		tensor([[0.0542, 0.9674, 0.5902],
		        [0.7749, 0.1682, 0.2871],
		        [0.1747, 0.3728, 0.2077],
		        [0.9092, 0.3087, 0.3981],
		        [0.4231, 0.8725, 0.6005]])
x		        
		tensor([[ 0.6811, -1.2104, -1.2676],
		        [-0.3295,  0.1155, -0.5736],
		        [-1.3656, -0.4973, -0.7043],
		        [-1.3670, -0.3296,  3.1743],
		        [ 1.3443,  0.3373,  0.6182]])	
x + y
		tensor([[ 0.7353, -0.2430, -0.6774],
		        [ 0.4454,  0.2837, -0.2865],
		        [-1.1908, -0.1245, -0.4967],
		        [-0.4578, -0.0209,  3.5723],
		        [ 1.7674,  1.2098,  1.2187]])	
torch.add(x, y)#一样的也是加法
		tensor([[ 0.7353, -0.2430, -0.6774],
		        [ 0.4454,  0.2837, -0.2865],
		        [-1.1908, -0.1245, -0.4967],
		        [-0.4578, -0.0209,  3.5723],
		        [ 1.7674,  1.2098,  1.2187]])			        	        	        

索引

x[:, 1]
		tensor([-1.2104,  0.1155, -0.4973, -0.3296,  0.3373])		

x.view() 类似于reshape(),重塑维度

x = torch.randn(4, 4)
		tensor([[ 0.1811, -1.4025, -1.2865, -1.6370],
		        [-0.2279,  1.0993, -0.4067, -0.2652],
		        [-0.5673,  0.2697,  1.8822, -1.3748],
		        [-0.3731, -0.9595,  1.8725, -0.8774]])
y = x.view(16)
		tensor([-0.3035, -2.5819,  1.2449, -0.3448,  1.0095, -0.1734,  1.5666,  0.5170,
		        -1.0587,  0.1241, -0.5550, -1.6905,  0.8625, -1.3681, -0.1491,  0.2202])	
z = x.view(-1, 8) #-1是值得注意的,x中总共16个元素,现在定义一行是8列(8个元素),16/8 = 2,所以是2行
		tensor([[-0.3035, -2.5819,  1.2449, -0.3448,  1.0095, -0.1734,  1.5666,  0.5170],
		        [-1.0587,  0.1241, -0.5550, -1.6905,  0.8625, -1.3681, -0.1491,  0.2202]])
print(x.size(), y.size(), z.size())
		torch.Size([4, 4]) torch.Size([16]) torch.Size([2, 8])

与Numpy的协同操作(互转)

a = torch.ones(5)
		tensor([1., 1., 1., 1., 1.])
b = a.numpy()
		array([1., 1., 1., 1., 1.], dtype=float32)

import numpy as np
a = np.ones(5)
		array([1., 1., 1., 1., 1.])
b = torch.from_numpy(a)
		tensor([1., 1., 1., 1., 1.], dtype=torch.float64)

autograb机制

需要求导的,可以手动定义:

x = torch.randn(3,4)#torch.randn:用来生成随机数字的tensor,这些随机数字满足标准正态分布(0~1)
		tensor([[-1.5885,  0.6992, -0.2198,  1.2736],
		        [ 0.6211, -0.3729,  0.1261,  1.4094],
		        [ 0.7418, -0.2801, -0.0672, -0.5614]])
x = torch.randn(3,4,requires_grad=True)
		tensor([[ 0.9318, -1.0761,  0.6794,  1.2261],
		        [-1.7192, -0.6009, -0.3852,  0.2492],
		        [-0.1853,  0.2066,  0.9497, -0.3329]], requires_grad=True)
#方法2
x = torch.randn(3,4)
x.requires_grad=True
		tensor([[-1.9635,  0.5769,  1.2705, -0.8758],
		        [ 1.2847, -1.0498, -0.3650, -0.5059],
		        [ 0.2780,  0.0816,  0.7754,  0.2048]], requires_grad=True)	   
b = torch.randn(3,4,requires_grad=True)
t = x + b
y = t.sum()
		tensor(4.4444, grad_fn=<SumBackward0>)
y.backward()
b.grad
		tensor([[1., 1., 1., 1.],
		        [1., 1., 1., 1.],
		        [1., 1., 1., 1.]])		
虽然没有指定t的requires_grad但是需要用到它,也会默认的
x.requires_grad, b.requires_grad, t.requires_grad#return (True, True, True)		        
     		        

#计算流程
x = torch.rand(1)
b = torch.rand(1, requires_grad = True)
w = torch.rand(1, requires_grad = True)
y = w * x 
z = y + b 
x.requires_grad, b.requires_grad, w.requires_grad, y.requires_grad#注意y也是需要的
		(False, True, True, True)
x.is_leaf, w.is_leaf, b.is_leaf, y.is_leaf, z.is_leaf
		(True, True, True, False, False)	
返向传播计算
z.backward(retain_graph=True)#如果不清空会累加起来
w.grad 累加后的结果
		tensor([1.6244])
b.grad
		tensor([2.])
	

做一个线性回归

构造一组输入数据X和其对应的标签y
x_values = [i for i in range(11)]
x_train = np.array(x_values, dtype=np.float32)
x_train = x_train.reshape(-1, 1)
x_train.shape #return (11, 1)

y_values = [2*i + 1 for i in x_values]
y_train = np.array(y_values, dtype=np.float32)
y_train = y_train.reshape(-1, 1)
y_train.shape# (11, 1)

import torch
import torch.nn as nn
线性回归模型:其实线性回归就是一个不加激活函数的全连接层
class LinearRegressionModel(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(LinearRegressionModel, self).__init__()
        self.linear = nn.Linear(input_dim, output_dim)  

    def forward(self, x):#前向传播
        out = self.linear(x)
        return out
input_dim = 1
output_dim = 1

model = LinearRegressionModel(input_dim, output_dim)
		LinearRegressionModel(
		  (linear): Linear(in_features=1, out_features=1, bias=True)
		)
指定好参数和损失函数
epochs = 1000  #执行次数
learning_rate = 0.01 #准确率
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)#优化模型
criterion = nn.MSELoss()#绝对值损失函数是计算预测值与目标值的差的绝对值
训练模型
for epoch in range(epochs):
    epoch += 1
    # 注意转行成tensor
    inputs = torch.from_numpy(x_train)
    labels = torch.from_numpy(y_train)

    # 梯度要清零每一次迭代
    optimizer.zero_grad() 

    # 前向传播
    outputs = model(inputs)

    # 计算损失
    loss = criterion(outputs, labels)

    # 返向传播
    loss.backward()

    # 更新权重参数
    optimizer.step()
    if epoch % 50 == 0:
        print('epoch , loss '.format(epoch, loss.item()))
				epoch 50, loss 0.4448287785053253
				epoch 100, loss 0.25371354818344116
				epoch 150, loss 0.14470864832401276
				epoch 200, loss 0.08253632485866547
				epoch 250, loss 0.04707561805844307
				epoch 300, loss 0.026850251480937004
				epoch 350, loss 0.015314370393753052
				epoch 400, loss 0.008734731003642082
				epoch 450, loss 0.004981952253729105
				epoch 500, loss 0.002841521752998233
				epoch 550, loss 0.0016206930158659816
				epoch 600, loss 0.0009243797394447029
				epoch 650, loss 0.0005272324196994305
				epoch 700, loss 0.0003007081104442477
				epoch 750, loss 0.00017151293286588043
				epoch 800, loss 9.782632696442306e-05
				epoch 850, loss 5.579544449574314e-05
				epoch 900, loss 3.182474029017612e-05
				epoch 950, loss 1.8151076801586896e-05
				epoch 1000, loss 1.0352457138651516e-05        
测试模型预测结果
predicted = model(torch.from_numpy(x_train).requires_grad_()).data.numpy()
		array([[ 0.99918383],
		       [ 2.9993014 ],
		       [ 4.9994187 ],
		       [ 6.9995365 ],
		       [ 8.999654  ],
		       [10.999771  ],
		       [12.999889  ],
		       [15.000007  ],
		       [17.000124  ],
		       [19.000242  ],
		       [21.000359  ]], dtype=float32)
模型的保存与读取
torch.save(model.state_dict(), 'model.pkl')
model.load_state_dict(torch.load('model.pkl'))
		<All keys matched successfully>

使用GPU进行训练:只需要把数据和模型传入到cuda里面就可以了

import torch
import torch.nn as nn
import numpy as np


class LinearRegressionModel(nn.Module):
    def __init__(self, input_dim, output_dim):
        super(LinearRegressionModel, self).__init__()
        self.linear = nn.Linear(input_dim, output_dim)  

    def forward(self, x如何安装Pytorch

如何安装Pytorch

win10 上安装 pytorch + cuda

win10 上安装 pytorch + cuda

win10 上安装 pytorch + cuda

win10 上安装 pytorch + cuda