VGG13卷积过程

Posted 月疯

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了VGG13卷积过程相关的知识,希望对你有一定的参考价值。

其实VGG13也是属于Alexnet的扩展和延伸,AlexNet是在深度上对卷积网络做了调整。

 

 今天实战一个CIRF100,卷积过程:

import tensorflow as tf
from tensorflow.keras import layers,optimizers,datasets,Sequential
import os

# os.environ['TF_CPP_MIN_LOG_LEVEL']='1' #显示所有信息
os.environ['TF_CPP_MIN_LOG_LEVEL']='1' #只显示Waring和Error信息
# os.environ['TF_CPP_MIN_LOG_LEVEL']='3' #只显示Error信息

tf.random.set_seed(2345)
#VGG的实质是AlexNet结构的增强版
#创建卷积层
conv_layers=[
    #总共十层卷积网络
    #第一组卷积和池化层
    layers.Conv2D(64,kernel_size=[3,3],padding="same",activation=tf.nn.relu),
    layers.Conv2D(64,kernel_size=[3,3],padding="same",activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2,2],strides=2,padding="same"),

    #第二组卷积和池化层
    layers.Conv2D(128,kernel_size=[3,3],padding="same",activation=tf.nn.relu),
    layers.Conv2D(128,kernel_size=[3,3],padding="same",activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2,2],strides=2,padding="same"),

    #第三组卷积和池化层
    layers.Conv2D(256,kernel_size=[3,3],padding="same",activation=tf.nn.relu),
    layers.Conv2D(256,kernel_size=[3,3],padding="same",activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2,2],strides=2,padding="same"),

    #第四组卷积和池化层
    layers.Conv2D(512,kernel_size=[3,3],padding="same",activation=tf.nn.relu),
    layers.Conv2D(512,kernel_size=[3,3],padding="same",activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2,2],strides=2,padding="same"),

    #第五组卷积和池化层
    layers.Conv2D(512,kernel_size=[3,3],padding="same",activation=tf.nn.relu),
    layers.Conv2D(512,kernel_size=[3,3],padding="same",activation=tf.nn.relu),
    layers.MaxPool2D(pool_size=[2,2],strides=2,padding="same")
]

#数据预处理函数
def preprocess(x,y):
    #[0-1]标准化
    x = tf.cast(x,dtype=tf.float32) /255.
    y = tf.cast(y,dtype=tf.int32)
    return x,y

#加载数据集
(x,y),(x_test,y_test) = datasets.cifar100.load_data()
#挤压掉一个shape
y = tf.squeeze(y,axis=1)
y_test = tf.squeeze(y_test,axis=1)

print(x.shape,y.shape,x_test.shape,y_test.shape)

#
train_db = tf.data.Dataset.from_tensor_slices((x,y))
train_db = train_db.shuffle(1000).map(preprocess).batch(64)

test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test))
test_db = test_db.shuffle(1000).map(preprocess).batch(64)

def main():
    # 创建卷积网络层,传入一个list
    #输入[b,32,32,3] => [b,1,1,512]
    conv_net=Sequential(conv_layers)
    # conv_net.build(input_shape=[None, 32, 32, 3])
    # #随便设定一个输入测试
    # x = tf.random.normal([4,32,32,3])
    # out = conv_net(x)
    # print(out.shape)

    #创建全连接层网络,三层网络
    fc_net = Sequential([
        layers.Dense(256,activation=tf.nn.relu),
        layers.Dense(128,activation=tf.nn.relu),
        layers.Dense(100,activation=None)
    ])

    # 指定卷积输入
    conv_net.build(input_shape=[None, 32, 32, 3])
    #指定全连接输入
    fc_net.build(input_shape=[None,512])
    #设置优化器
    optimizer = optimizers.Adam(lr = 1e-4)

    #[1,2] +[3,4]=[1,2,3,4]
    variables =conv_net.trainable_variables + fc_net.trainable_variables
    #
    for epoch in range(50):
        for step,(x,y) in enumerate(train_db):
            with tf.GradientTape() as tape:
                # [b, 32, 32, 3] => [b, 1, 1, 512]
                out =conv_net(x)
                # flatten, => [b, 512]
                #扁平化,搞成一列
                out=tf.reshape(out,[-1,512])
                # [b, 512] => [b, 100]
                logits =fc_net(out)
                #y一列输出,深度100# [b] => [b, 100]
                y_onehot =tf.one_hot(y,depth=100)
                #计算loss,交叉熵的方法
                loss =tf.losses.categorical_crossentropy(y_onehot,logits,from_logits=True)
                #计算均值
                loss = tf.reduce_mean(loss)

            grads = tape.gradient(loss,variables)
            optimizer.apply_gradients(zip(grads,variables))

            #每100次打印一下loss
            if step % 100==0:
                print(epoch,step,'loss:',float(loss))

            total_num =0
            total_correct =0
            for x,y in test_db:
                out =conv_net(x)
                out = tf.reshapeout,[-1,512]
                logits =fc_net(out)
                prob =tf.nn.softmax(logits,axis=1)
                pred = tf.argmax(prob,axis=1)
                pred = tf.cast(pred,dtype=tf.int32)

                correct =tf.cast(tf.equal(correct,y),dtype=tf.int32)
                correct = tf.reduce_sum(correct)

                total_num += x.shape[0]
                total_correct += int(correct)

            acc = total_correct / total_num
            print(epoch,'acc:',acc)




if __name__ == "__main__":
    main()

总结:

卷积过程总结:
0、数据预处理(下载数据,调整数据格式)input(10000, 32, 32, 3) output(10000,)
1、conv_layers 创建卷积层
2、fc_net 创建网络层
3、卷积和网络层连接起来 Sequential()
4、设置优化器和损失函数,进行迭代
5、验证数据集准确率

在这个之前需要了解一些基础知识:

import tensorflow as tf

t = [1,2,3,4,5,6,7,8,9]
n_t = tf.reshape(t,[3,3])
print(n_t)
# tf.Tensor(
# [[1 2 3]
#  [4 5 6]
#  [7 8 9]], shape=(3, 3), dtype=int32)
a1 = tf.random.normal([1,2,3,1])
print(a1)
# tf.Tensor(
# [[[[-0.61823624]
#    [ 0.39007753]
#    [-1.216511  ]]
#
#   [[ 0.7147906 ]
#    [ 0.04909178]
#    [-0.9400272 ]]]], shape=(1, 2, 3, 1), dtype=float32)
#删除第一个维度
a2 = tf.squeeze(a1, axis=0)
print(a2.shape)
# (2, 3, 1)
a3 = tf.squeeze(a1)
print(a3.shape)
# (2, 3)
# 增加维度:expand_dims()
x = tf.random.normal([2,3])
y = tf.expand_dims(x, axis=0)
print(y.shape)
# (1, 2, 3)

#维度交换
a1 = tf.random.normal([10, 11, 12])
a2 = tf.transpose(a1, perm = [0, 2, 1])
print(a2.shape)
# (10, 12, 11)

items = [(1, 3), (2, 1), (3, 3),(4,5)]
#默认下表是0开始
for i, item in enumerate(items):
	print(i,item)
# 0 (1, 3)
# 1 (2, 1)
# 2 (3, 3)
# 3 (4, 5)
#默认下表是从1开始
for i, item in enumerate(items, 1):
    print(i, item)
# 输出:
# 1 (1, 3)
# 2 (2, 1)
# 3 (3, 3)
# 4 (4, 5)


names = ['1', '2', '3', '4']
jobs = ['a', 'b', 'c', 'd','f']# 长度可以不同
# 同时迭代多个列表
for name, job in zip(names, jobs):
  print(name, job)

# 1 a
# 2 b
# 3 c
# 4 d

# 常用的是构建随机数据与连续序列数据:
# tf.random_normal 正态分布
# tf.random_uniform 均匀分布
# tf.random_npoisson 泊松分布
# tf.random_gamma 伽马分布
# tf.range 连续序列
# tf.zeros

# 特殊的张量可以使用下面函数来构建
# tf.Variable 函数用于创建变量//tf.global_variables_initializer()用于初始化所有变量;w.initializer用于初始化单个变量
# tf.constant 张量的创建
# tf.placeholder
# tf.SparseTensor
# tf.cast()修改张量数据的数据类型

# from keras.models import Model
# model = Model(ipt,opt)    #定义模型,传入输入输出相应的层
# model.summary()           #查看这个模型的网络层结构信息,如下图所示



# rmsprop = optimizers.RMSprop(lr=0.001, rho=0.9, epsilon=None, decay=0.0)
# model.compile(optimizer=rmsprop,loss='categorical_crossentropy',metrics=['acc'])
#
#
# 常见损失函数
#
# mean_squared_error:均方误差MSE
# mean_absolute_error:绝对值均差MAE
# binary_crossentropy:用于二分类
# categorical_crossentropy:用于多分类
#
#
# 常见的评价标准
# mae
# mse
# binary_accuracy
# categorical_accuracy

 

以上是关于VGG13卷积过程的主要内容,如果未能解决你的问题,请参考以下文章

VGG13卷积过程

TensorFlow2 100 行代码实现 VGG13

TensorFlow2 100 行代码实现 VGG13

VGG16工作原理

机器学习笔记:VGG 16

深度学习图像分类入门,从VGG16卷积神经网络开始