[ML]keras和tensorflow实现同样的模型

Posted otwos

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了[ML]keras和tensorflow实现同样的模型相关的知识,希望对你有一定的参考价值。

import tensorflow as tf
from input_data import Data
import matplotlib.pyplot as plt
import os


data=Data("./data/")


X=tf.placeholder(tf.float32,[None,40,40,3])
y=tf.placeholder(tf.float32,[None,62])
keep_prob=tf.placeholder(tf.float32)

conv1_1=tf.layers.conv2d(X,filters=6,kernel_size=4,strides=1,padding=‘same‘,activation=tf.nn.relu)
conv1_2=tf.layers.conv2d(conv1_1,filters=12,kernel_size=4,strides=1,padding=‘same‘,activation=tf.nn.relu)
pool1=tf.layers.max_pooling2d(conv1_2,pool_size=4,strides=2,padding=‘same‘)

dropout1=tf.nn.dropout(pool1,keep_prob=keep_prob)

conv2_1=tf.layers.conv2d(dropout1,filters=24,kernel_size=4,strides=1,padding=‘same‘,activation=tf.nn.relu)
conv2_2=tf.layers.conv2d(conv2_1,filters=48,kernel_size=4,strides=1,padding=‘same‘,activation=tf.nn.relu)
pool2=tf.layers.max_pooling2d(conv2_2,pool_size=4,strides=2,padding=‘same‘)
print pool2.shape
reshape1=tf.reshape(pool2,[-1,4800])
print reshape1.shape
dropout2=tf.nn.dropout(reshape1,keep_prob=keep_prob)

dense1=tf.layers.dense(dropout2,units=62)

loss = tf.losses.softmax_cross_entropy(onehot_labels=y,logits=dense1)
step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
accuracy = tf.metrics.accuracy(labels=tf.argmax(y,axis=1),predictions=tf.argmax(dense1,axis=1))[1]


plt.ion()
plt.show()

x_draw=[]
y_draw=[]

weight_path="./result/tf/weights.w"
weight_path_index="./result/tf/weights.w.index"


saver = tf.train.Saver()

with tf.Session() as sess:

    sess.run([tf.global_variables_initializer(),tf.initialize_local_variables()])

    if os.path.exists(weight_path_index):
        saver.restore(sess, weight_path)


    train_batch_size = 100
    test_batch_size = 100

    x_axis=0

    for i in range(50*50):
        X_train, y_train = data.next_batch(train_batch_size, ‘train‘)
        _,loss_val,accuracy_val=sess.run([step,loss,accuracy],feed_dict={X:X_train,y:y_train,keep_prob:0.7})
        print("train\tloss_val=>" + str(loss_val) + "\t\taccuracy_val=>" + str(accuracy_val))

        if i%50==49:
            X_test, y_test = data.next_batch(test_batch_size, ‘test‘)
            loss_val,accuracy_val=sess.run([loss,accuracy],feed_dict={X:X_test,y:y_test,keep_prob:1})
            print("test\tloss_val=>"+str(loss_val)+"\t\taccuracy_val=>"+str(accuracy_val))

            x_draw.append(x_axis)
            y_draw.append(accuracy_val)
            x_axis+=1

            plt.title("tf")
            plt.plot(x_draw, y_draw, color=‘b‘)
            plt.pause(0.1)
            saver.save(sess,weight_path)

    plt.savefig("./result/tf/result.png")

  

# -*- coding: utf-8 -*-

from keras.models import Sequential
from keras.layers import Dense,Flatten,Dropout,Reshape
from keras.layers.convolutional import Conv2D,MaxPooling2D
from keras import backend as K
import matplotlib.pyplot as plt
import os
from input_data import Data
from keras.optimizers import Adam

data=Data(‘./data/‘)


def build4(_name):
    model = Sequential(name=_name)

    model.add(Conv2D(input_shape=(40, 40, 3), filters=6, kernel_size=4, strides=1, padding=‘same‘, activation=‘relu‘))
    model.add(Conv2D(filters=12, kernel_size=4, strides=1, padding=‘same‘, activation=‘relu‘))
    model.add(MaxPooling2D(pool_size=4, strides=2, padding=‘same‘))

    model.add(Dropout(0.3))

    model.add(Conv2D(filters=24, kernel_size=4, strides=1, padding=‘same‘, activation=‘relu‘))
    model.add(Conv2D(filters=48, kernel_size=4, strides=1, padding=‘same‘, activation=‘relu‘))
    model.add(MaxPooling2D(pool_size=4, strides=2, padding=‘same‘))


    model.add(Flatten())
    model.add(Dropout(0.3))

    model.add(Dense(units=62, activation=‘softmax‘))

    model.compile(optimizer=Adam(lr=0.001), loss=‘categorical_crossentropy‘, metrics=[‘accuracy‘])
    return model



# model=build1("x1")
# model=build2("x2")
# model=build3("x3") #收敛快
model=build4("x4")
# model=build5("x5")
# model=build6("x6")
# model=build7("x7")
# model=build8("x8")
# model=build9("x9")
# model=build10("x10")
# model=build11("x11")

print(model.summary())

weight_path="./result/"+model.name+"/weights.w"
if not os.path.exists("./result/"+model.name):
    os.mkdir("./result/"+model.name)

if os.path.exists(weight_path):
    model.load_weights(weight_path)

plt.ion()
plt.show()

x_draw=[]
y_draw=[]

train_batch_size=5000
test_batch_size=100

for i in range(50):
    X, y = data.next_batch(train_batch_size, ‘train‘)
    model.fit(X,y,batch_size=100,epochs=1,verbose=1)
    model.save_weights(weight_path)


    X_test, y_test = data.next_batch(test_batch_size, ‘test‘)
    loss, accuracy = model.evaluate(X_test,y_test,batch_size=test_batch_size)

    print("epoch"+str(i)+",accuracy=>"+str(accuracy))

    x_draw.append(i)
    y_draw.append(accuracy)

    plt.title(model.name)
    plt.plot(x_draw,y_draw,color=‘b‘)

    plt.pause(0.1)

plt.savefig("./result/"+model.name+"/result.png")
‘‘‘‘‘‘

  

 

以上是关于[ML]keras和tensorflow实现同样的模型的主要内容,如果未能解决你的问题,请参考以下文章

Tensorflow.keras:AlreadyExistsError

用Keras.NET 做一个图像识别的训练

win7+cuda+anaconda python+tensorflow-gpu+keras安装成功版本匹配汇总

Hands-On Machine Learning with Scikit-Learn, Keras, and TensorFlow ——Chapter 2

无法使用 Plaidml 在 GPU 上运行 Keras 模型

牛逼!大神用OpenCV/Keras/TensorFlow实现口罩检测