在Pytorch中实现相同的卷积

Posted

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了在Pytorch中实现相同的卷积相关的知识,希望对你有一定的参考价值。

我正在尝试实现相同的卷积。令人惊讶的是,似乎这种标准层在Pytorch中是不可用的。我希望这有助于确保图层可用。我需要将其推广为使用所有超级参数的帮助。现在,它仅使用内核大小(即可变步幅,拨号或可能影响事物的任何其他因素都不在其中。任何人都知道如何使用卷积的所有超级参数(在Conv Nets中)对其进行概括吗?

代码:

def get_cnn(C, out_filters=[14,13,12,3], kernels=[(9,7),(7,7),(5,3),(3,3)]):
    N = len(out_filters)
    print()
    print(f'C = {C}')
    model_layers = OrderedDict()
    in_channels = C
    x = torch.randn(1,C,10,10)
    out = x
    print(f'initial_size = {x.size()}')
    for i in range(1,N):
        print(f'--- building layer = {i}')
        ## create conv layer
        kernel_size = kernels[i]
        out_channels = out_filters[i]
        # to make sure its a same convolution (ignoring stride)
        print(f'in_channels = {in_channels}')
        print(f'out_channels = {out_channels}')
        padding = ((kernels[i][0]-1)//2,(kernels[i][1]-1)//2)
        conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding)
        #conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size)
        model_layers[f'Conv{i}'] = conv
        ## compute dummy data
        out = conv(out)
        print(f'x.size() = {x.size()}')
        print(f'out.size() = {out.size()}')
        ## add activation
        model_layers[f'ReLU{i}'] = torch.nn.ReLU()
        ## so that next layer works
        in_channels = out_channels
    ## make sequential model
    mdl = torch.nn.Sequential(model_layers)
    y = mdl(x)
    return mdl


参考:


完整的运行代码:

import torch

from collections import OrderedDict

import random

def do_conv():
    x = torch.randn(1,3,5,6)
    conv = torch.nn.Conv2d(in_channels=3, out_channels=5, kernel_size=(3,3))
    y = conv(x)
    print(y)

def get_cnn(C, out_filters=[14,13,12,3], kernels=[(9,7),(7,7),(5,3),(3,3)]):
    N = len(out_filters)
    print()
    print(f'C = {C}')
    model_layers = OrderedDict()
    in_channels = C
    x = torch.randn(1,C,10,10)
    out = x
    print(f'initial_size = {x.size()}')
    for i in range(1,N):
        print(f'--- building layer = {i}')
        ## create conv layer
        kernel_size = kernels[i]
        out_channels = out_filters[i]
        # to make sure its a same convolution (ignoring stride)
        print(f'in_channels = {in_channels}')
        print(f'out_channels = {out_channels}')
        padding = ((kernels[i][0]-1)//2,(kernels[i][1]-1)//2)
        conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding)
        #conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size)
        model_layers[f'Conv{i}'] = conv
        ## compute dummy data
        out = conv(out)
        print(f'x.size() = {x.size()}')
        print(f'out.size() = {out.size()}')
        ## add activation
        model_layers[f'ReLU{i}'] = torch.nn.ReLU()
        ## so that next layer works
        in_channels = out_channels
    ## make sequential model
    mdl = torch.nn.Sequential(model_layers)
    y = mdl(x)
    return mdl

def get_hardcoded_variable_size_input():
    C = 3
    variable_size_input = [torch.randn(1,C,15,15), torch.randn(1,C,12,16), torch.randn(1,C,15,11)]
    return variable_size_input

def get_variable_size_input(N=3):
    variable_size_input = []
    for i in range(N):
        C,H,W = 3, random.randint(11,20), random.randint(14,19)
        x = torch.randn(C,H,W)
        variable_size_input.append(x)
    return variable_size_input

def any_input_output_equals_input():
    '''
    test that no matter what input, the output of the network is same as the input
    '''
    X_list = get_hardcoded_variable_size_input()
    for i in range(len(X_list)):
        print(f'data point i = {i}')
        ## get data
        x = X_list[i]
        print(x.size())
        ## create cnn
        _,C,_,_ = x.size()
        cnn = get_cnn(C)
        # pass data
        y = cnn(x)
        ## make sure input and output have same size
        assert x.size() == y.size(), f'Error: {x.size()} != {y.size()}'
    ##

if __name__ == '__main__':
    #do_conv()
    print()
    print('start main')
    any_input_output_equals_input()
    print('Done a')
答案

也许像这样的东西可能有用吗?

import torch
import torch.nn
class PaddedConv2d(nn.Module):
    """
    A simple class to perform convolutions with padding so that input and
    output size is the same 
    """

    def __init__(self, conv2d, pad2d_type):
        """
        Parameters
        ---

        * conv2d : torch.nn.Conv2d
            a convolutional layer used in this PaddedConv2d

        * pad2d_type : type
            a padding layer from torch.nn. I don't want the
            instance itself since it needs the padding size to be created;
            instead, I want the callable which returns an instance of 
            a padding layer, which will be created on the fly during the 
            "forward" pass.
        """

        super().__init__(self)
        self.conv2d = conv2d
        self.pad2d_type = pad2d_type

    def forward(self, in):
        """
        Parameters
        ---
        * in : torch.Tensor
             the input tensor to be padded and then convolved. Shape (batch_size, channels, rows, cols)
        """
        # computing padding size:
        pad_h = torch.ceil((self.conv2d.kernel[0] - in.shape[2] * (1 - self.conv2d.stride[0]) - self.conv2d.stride[0]) / 2)
        pad_w = torch.ceil((self.conv2d.kernel[1] - in.shape[3] * (1 - self.conv2d.stride[1]) - self.conv2d.stride[1]) / 2)
        padder = self.pad2d_type((pad_w, pad_w, pad_h, pad_h))

        return self.conv2d(padder(in))

以上是关于在Pytorch中实现相同的卷积的主要内容,如果未能解决你的问题,请参考以下文章

pytorch 中实现CNN,对CNN的理解心得

在pytorch中实现与TensorFlow类似的"same"方式padding

在 pytorch 中执行卷积(非互相关)

卷积神经网络模型之——LeNet网络结构与代码实现

在Pytorch中实现WNGrad?

PyTorch快速搭建多个具有相同卷积核尺寸不同卷积核数量的卷积层