ResNet过程

Posted 月疯

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了ResNet过程相关的知识,希望对你有一定的参考价值。

#ResNet  因为网络传播的层次太深,后面的很难传播到前面,所以增加了一个短接层,深层次网络可以退化成一个浅层次网络

#filter_num 卷积核数量
#stride 步长
class BasicBlock(layers.Layer):
    def __init__(self,filter_num,stride=1):
        super(BasicBlock, self).__init__()
        # 俩个卷积层(一般选择3*3或者1*1,这样卷积之后大小不变)
        self.conv1 = layers.Conv2D(filter_num,(3,3),strides=stride,padding='same')
        # 批标准化的实现过程
        self.bn1 = layers.BatchNormalization()
        # 激活函数
        self.relu = layers.Activation('relu')

        self.conv2 = layers.Conv2D(filter_num,(3,3),strides=1,padding='same')
        self.bn2 = layers.BatchNormalization()

        # 下采样层
        if stride != 1:
            self.downsample = Sequential()
            self.downsample.add(layers.Conv2D(filter_num, (1, 1), strides=stride))
        else:
            self.downsample = lambda x:x


    # 前向传播的一个base block
    def call(self,inputs,training=None):
        # [b, h, w, c]
        out = self.conv1(inputs)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        identity = self.downsample(inputs)

        output = layers.add([out,identity])
        # output = self.relu(output)
        output = tf.nn.relu(output)

        return output

 

#多个basi_block组成了ResBlock
class ResNet(keras.Model):
    #layer_dims#[2,2,2,2]表示分别输入4个resblock,每个resblock包含俩个basiblock
    #num_classes 表示图片的分类,是100个类别,全连接层分类
    def __init__(self,layer_dims,num_classes=100):
        super(ResNet, self).__init__()
        self.stem = Sequential([layers.Conv2D(64,(3,3),strides=(1,1)),#64通道,3*3卷积核,
                                layers.BatchNormalization(),
                                layers.Activation('relu'), #激活
                                layers.MaxPool2D(pool_size=(2,2),strides=(1,1),padding='same') #池化
                                ])

        self.layer1 = self.build_resblock(64, layer_dims[0])
        self.layer2 = self.build_resblock(128,layer_dims[1],stride = 2)
        self.layer3 = self.build_resblock(256,layer_dims[2],stride = 2)
        self.layer4 = self.build_resblock(512,layer_dims[3],stride = 2)

        # output:[b,512,h,w]
        #自适应确定输出层
        self.avgpool = layers.GlobalAveragePooling2D()
        self.fc = layers.Dense(num_classes)


    #前向计算
    def call(self, inputs, training=None):
        x = self.stem(inputs)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        #[b,c]
        x = self.avgpool(x)
        #[b,100]
        x = self.fc(x)
        return x
    # filter_num核的数量,blocks数量,就是一个resblock包含多少basiblock
    def build_resblock(self,filter_num,blocks,stride=1):

        res_blocks = Sequential()

        res_blocks.add(BasicBlock(filter_num,stride))

        for _ in range(1,blocks):
            res_blocks.add(BasicBlock(filter_num,stride=1))

        return res_blocks

假设传入4个resblock,每个ResBlock包含2个basiblock,下面是18层和34层网络

def resnet18():
    #4个res block,每个res Block包含俩个basi Block
    return ResNet([2,2,2,2])

def resnet34():

    return ResNet([3,4,6,3])

整体代码:

#CIRF100实战
#深层次网络退化成一个浅层次网络
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import  layers,Sequential


#filter_num 卷积核数量
#stride 步长
class BasicBlock(layers.Layer):
    def __init__(self,filter_num,stride=1):
        super(BasicBlock, self).__init__()
        # 俩个卷积层(一般选择3*3或者1*1,这样卷积之后大小不变)
        self.conv1 = layers.Conv2D(filter_num,(3,3),strides=stride,padding='same')
        # 批标准化的实现过程
        self.bn1 = layers.BatchNormalization()
        # 激活函数
        self.relu = layers.Activation('relu')

        self.conv2 = layers.Conv2D(filter_num,(3,3),strides=1,padding='same')
        self.bn2 = layers.BatchNormalization()

        # 下采样层
        if stride != 1:
            self.downsample = Sequential()
            self.downsample.add(layers.Conv2D(filter_num, (1, 1), strides=stride))
        else:
            self.downsample = lambda x:x


    # 前向传播的一个base block
    def call(self,inputs,training=None):
        # [b, h, w, c]
        out = self.conv1(inputs)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        identity = self.downsample(inputs)

        output = layers.add([out,identity])
        # output = self.relu(output)
        output = tf.nn.relu(output)

        return output

#多个basi_block组成了ResBlock
class ResNet(keras.Model):
    #layer_dims#[2,2,2,2]表示分别输入4个resblock,每个resblock包含俩个basiblock
    #num_classes 表示图片的分类,是100个类别,全连接层分类
    def __init__(self,layer_dims,num_classes=100):
        super(ResNet, self).__init__()
        self.stem = Sequential([layers.Conv2D(64,(3,3),strides=(1,1)),#64通道,3*3卷积核,
                                layers.BatchNormalization(),
                                layers.Activation('relu'), #激活
                                layers.MaxPool2D(pool_size=(2,2),strides=(1,1),padding='same') #池化
                                ])

        self.layer1 = self.build_resblock(64, layer_dims[0])
        self.layer2 = self.build_resblock(128,layer_dims[1],stride = 2)
        self.layer3 = self.build_resblock(256,layer_dims[2],stride = 2)
        self.layer4 = self.build_resblock(512,layer_dims[3],stride = 2)

        # output:[b,512,h,w]
        #自适应确定输出层
        self.avgpool = layers.GlobalAveragePooling2D()
        self.fc = layers.Dense(num_classes)


    #前向计算
    def __call__(self, inputs, training=None):
        x = self.stem(inputs)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        #[b,c]
        x = self.avgpool(x)
        #[b,100]
        x = self.fc(x)
        return x
    # filter_num核的数量,blocks数量,就是一个resblock包含多少basiblock
    def build_resblock(self,filter_num,blocks,stride=1):

        res_blocks = Sequential()

        res_blocks.add(BasicBlock(filter_num,stride))

        for _ in range(1,blocks):
            res_blocks.add(BasicBlock(filter_num,stride=1))

        return res_blocks

def resnet18():
    #4个res block,每个res Block包含俩个basi Block
    return ResNet([2,2,2,2])

def resnet34():

    return ResNet([3,4,6,3])


 训练和测试代码:

import tensorflow as tf
from tensorflow.keras import layers,optimizers,datasets,Sequential
import os
from ResNet实战 import resnet18
# os.environ['TF_CPP_MIN_LOG_LEVEL']='1' #显示所有信息
os.environ['TF_CPP_MIN_LOG_LEVEL']='1' #只显示Waring和Error信息
# os.environ['TF_CPP_MIN_LOG_LEVEL']='3' #只显示Error信息

tf.random.set_seed(2345)
#VGG的实质是AlexNet结构的增强版
#创建卷积层


#数据预处理函数
def preprocess(x,y):
    #[0-1]标准化
    x = tf.cast(x,dtype=tf.float32) /255.
    y = tf.cast(y,dtype=tf.int32)
    return x,y

#加载数据集
(x,y),(x_test,y_test) = datasets.cifar100.load_data()
#挤压掉一个shape
y = tf.squeeze(y,axis=1)
y_test = tf.squeeze(y_test,axis=1)

print(x.shape,y.shape,x_test.shape,y_test.shape)

#
train_db = tf.data.Dataset.from_tensor_slices((x,y))
train_db = train_db.shuffle(1000).map(preprocess).batch(64)

test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test))
test_db = test_db.shuffle(1000).map(preprocess).batch(64)

def main():
    model = resnet18()
    #指定全连接输入
    model.build(input_shape=[None,32,32,3])
    #打印参数
    model.summary()
    #设置优化器
    optimizer = optimizers.Adam(lr = 1e-3)
    #
    for epoch in range(50):
        for step,(x,y) in enumerate(train_db):
            with tf.GradientTape() as tape:

                # [b, 512] => [b, 100]
                logits =model(x)
                #y一列输出,深度100# [b] => [b, 100]
                y_onehot =tf.one_hot(y,depth=100)
                #计算loss,交叉熵的方法
                loss =tf.losses.categorical_crossentropy(y_onehot,logits,from_logits=True)
                #计算均值
                loss = tf.reduce_mean(loss)

            grads = tape.gradient(loss,model.trainable_variables)
            optimizer.apply_gradients(zip(grads,model.trainable_variables))

            #每100次打印一下loss
            if step % 100==0:
                print(epoch,step,'loss:',float(loss))

            total_num =0
            total_correct =0
            for x,y in test_db:

                logits =model(x)
                prob =tf.nn.softmax(logits,axis=1)
                pred = tf.argmax(prob,axis=1)
                pred = tf.cast(pred,dtype=tf.int32)

                correct =tf.cast(tf.equal(correct,y),dtype=tf.int32)
                correct = tf.reduce_sum(correct)

                total_num += x.shape[0]
                total_correct += int(correct)

            acc = total_correct / total_num
            print(epoch,'acc:',acc)




if __name__ == "__main__":
    main()

注释:

BatchNormalization:批标准化的实现过程
1、求每一个训练批次数据的均值
2、求每一个训练批次数据的方差
3、数据进行标准化
4、训练参数γ,β
5、输出y通过γ与β的线性变换得到原来的数值

提示错误:

错误:
raise NotImplementedError('When subclassing the `Model` class, you should '
NotImplementedError: When subclassing the `Model` class, you should implement a `call` method.

处理:检查发现call() 方法写成__call__

 

运行结果:1000w参数,电脑新能差,没跑出模型

 

 

以上是关于ResNet过程的主要内容,如果未能解决你的问题,请参考以下文章

ResNet过程

十分钟一起学会ResNet残差网络

深度学习|ResNet

猿创征文|深度学习基于ResNet18网络完成图像分类

Deep Learning tips -- ResNet的残差连接为什么是有用的?

Deep Learning tips -- ResNet的残差连接为什么是有用的?