TensorFlow 从入门到精通—— 手写数字识别

Posted aJupyter

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了TensorFlow 从入门到精通—— 手写数字识别相关的知识,希望对你有一定的参考价值。

import tensorflow as tf
print(tf.__version__)
2.6.0

一、数据集

mnist = tf.keras.datasets.mnist
(train_images,train_labels),(test_images,test_labels) = mnist.load_data()
train_images.shape,train_labels.shape
((60000, 28, 28), (60000,))
# 可视化image
import matplotlib.pyplot as plt

def plot_image(image):
    plt.imshow(image.reshape(28,28),cmap='binary')
    plt.show()
plot_image(train_images[0])

# 处理数据集:将数据集拉成二维的
x_data = tf.cast(train_images.reshape(-1,28*28),dtype=tf.float32)


# 划分数据集
valid_percent = 0.2
train_num = int(x_data.shape[0] * (1-valid_percent))

x_train = x_data[:train_num] # 训练集
y_train = train_labels[:train_num]

x_valid = x_data[train_num:] # 验证集
y_valid = train_labels[train_num:]

x_test = tf.cast(test_images.reshape(-1,28*28),dtype=tf.float32) # 测试集
y_test = test_labels
x_data.shape[0] * 0.8
48000.0
# 归一化
x_train = x_train / 255.0
x_valid = x_valid / 255.0
x_test = x_test / 255.0
# 对标签进行独热编码
y_train = tf.one_hot(y_train,depth=10)
y_valid = tf.one_hot(y_valid,depth=10)
y_test = tf.one_hot(y_test,depth=10)

二、模型

W = tf.Variable(tf.random.normal(shape=(28*28,10)))
B = tf.Variable(tf.zeros(10))
def model(x,w,b):
    model_ = tf.matmul(x,w) + b
    return tf.nn.softmax(model_)
def loss(x,y,w,b):
    pred = model(x,w,b)
    loss_ = tf.keras.losses.categorical_crossentropy(y_true=y,y_pred=pred)
    return tf.reduce_mean(loss_)
def grad(x,y,w,b):
    with tf.GradientTape() as tape:
        loss_ = loss(x,y,w,b)
    return tape.gradient(loss_,[w,b])
# 准确率
def accuracy(x,y,w,b):
  pred = model(x,w,b)
  # print(pred,y)
  corrent_prediction = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
  # print(corrent_prediction,corrent_prediction.shape)
  return tf.reduce_mean(tf.cast(corrent_prediction,tf.float32))

三、训练

training_epochs = 20
learining_rate = 0.001
batch_size = 50
total_step = train_num // batch_size
optimizer = tf.keras.optimizers.Adam(learining_rate)
train_loss_list = []
valid_loss_list = []
train_acc_list = []
valid_acc_list = []
for epoch in range(training_epochs):
    for step in range(total_step):
        xs = x_train[step*batch_size:(step+1)*batch_size]
        ys = y_train[step*batch_size:(step+1)*batch_size]
#         print(xs.shape,ys.shape)
        grads = grad(xs,ys,W,B)
        optimizer.apply_gradients(zip(grads,[W,B]))
    train_loss = loss(x_train,y_train,W,B).numpy()
    valid_loss = loss(x_valid,y_valid,W,B).numpy()
    train_acc = accuracy(x_train,y_train,W,B).numpy()
    valid_acc = accuracy(x_valid,y_valid,W,B).numpy()
    train_acc_list.append(train_acc)
    valid_acc_list.append(valid_acc)
    train_loss_list.append(train_loss)
    valid_loss_list.append(valid_loss)
    print(f"{epoch+1}:train_loss:{train_loss}train_acc:{train_acc}valid_loss:{valid_loss}valid_acc:{valid_acc}")
1:train_loss:1.5718567371368408train_acc:0.706458330154419valid_loss:1.4870891571044922valid_acc:0.7197499871253967
2:train_loss:0.9688711762428284train_acc:0.799958348274231valid_loss:0.9067797660827637valid_acc:0.8144166469573975
3:train_loss:0.7616745829582214train_acc:0.8380833268165588valid_loss:0.7226295471191406valid_acc:0.8461666703224182
4:train_loss:0.6529313921928406train_acc:0.8577708601951599valid_loss:0.6307686567306519valid_acc:0.8637499809265137
5:train_loss:0.5832834839820862train_acc:0.8693541884422302valid_loss:0.5737946629524231valid_acc:0.8729166388511658
6:train_loss:0.5349117517471313train_acc:0.8781874775886536valid_loss:0.5341132283210754valid_acc:0.8794999718666077
7:train_loss:0.49877774715423584train_acc:0.8850208520889282valid_loss:0.504259467124939valid_acc:0.8845833539962769
8:train_loss:0.4704902470111847train_acc:0.8897500038146973valid_loss:0.4805634021759033valid_acc:0.8882499933242798
9:train_loss:0.4476514160633087train_acc:0.8929374814033508valid_loss:0.4613181948661804valid_acc:0.89041668176651
10:train_loss:0.42855820059776306train_acc:0.8967708349227905valid_loss:0.44507089257240295valid_acc:0.8933333158493042
11:train_loss:0.41240566968917847train_acc:0.8991041779518127valid_loss:0.4314454197883606valid_acc:0.8954166769981384
12:train_loss:0.39855533838272095train_acc:0.9014999866485596valid_loss:0.4198707342147827valid_acc:0.8973333239555359
13:train_loss:0.38649922609329224train_acc:0.9040833115577698valid_loss:0.40982159972190857valid_acc:0.8990833163261414
14:train_loss:0.3758573532104492train_acc:0.9057083129882812valid_loss:0.40100958943367004valid_acc:0.9013333320617676
15:train_loss:0.3664068877696991train_acc:0.9076458215713501valid_loss:0.3931334316730499valid_acc:0.9024166464805603
16:train_loss:0.35793840885162354train_acc:0.9091874957084656valid_loss:0.3861088752746582valid_acc:0.9037500023841858
17:train_loss:0.3502780497074127train_acc:0.9105416536331177valid_loss:0.37972745299339294valid_acc:0.9049999713897705
18:train_loss:0.34334996342658997train_acc:0.9118541479110718valid_loss:0.3739684522151947valid_acc:0.905916690826416
19:train_loss:0.3370097279548645train_acc:0.9130833148956299valid_loss:0.3686791658401489valid_acc:0.906166672706604
20:train_loss:0.33120784163475037train_acc:0.91427081823349valid_loss:0.3638934791088104valid_acc:0.9070833325386047
# 可视化损失
import matplotlib.pyplot as plt

plt.plot(train_loss_list,'r',label='train')
plt.plot(valid_loss_list,'g',label='valid')
plt.legend(loc=1)
<matplotlib.legend.Legend at 0x7fef85ebd990>

# 可视化准确率
import matplotlib.pyplot as plt

plt.plot(train_acc_list,'r',label='trainacc')
plt.plot(valid_acc_list,'g',label='validacc')
plt.legend(loc=1)
<matplotlib.legend.Legend at 0x7fef85d90510>

四、预测

# 在测试集上评估模型准确率
acc_test = accuracy(x_test,y_test,W,B).numpy()
acc_test
0.9087
# 定义预测函数
def predict(x,w,b):
  pred = model(x,w,b)
  res = tf.argmax(pred,1).numpy()
  return res
pred_test = predict(x_test,W,B)
# 定义可视化函数
import numpy as np
import matplotlib.pyplot as plt

def plot_image_labels_prediction(images, # 图像列表
                 labels, # 标签列表
                 preds, # 预测值列表
                 index=0, # 从第index个开始
                 num=10 # 缺省一次显示10幅
                 ):
  fig = plt.gcf() # 获取当前图表
  fig.set_size_inches(10,4) # 1英寸等于2.54cm
  if num > 10:
    num = 10 # 最多显示10个子图
  for i in range(0,num):
    ax = plt.subplot(2,5,i+1)
    ax.imshow(np.reshape(images[index],(28,28)),cmap='binary') # 显示第index个图像
    title = 'label='+str(labels[index]) # 构建该图上要显示的title信息
    if len(preds) > 0:
      title += ',predict='+str(preds[index])

    ax.set_title(title,fontsize=10) # 显示图上的title信息
    ax.set_xticks([]) # 不显示坐标轴
    ax.set_yticks([])
    index = index + 1
    plt.show()
plot_image_labels_prediction(test_images,test_labels,pred_test,9000,20)









以上是关于TensorFlow 从入门到精通—— 手写数字识别的主要内容,如果未能解决你的问题,请参考以下文章

MATLAB从入门到精通:MATLAB识别 自带手写数字集的CNN(LeNet5)

Tensorflow实践 mnist手写数字识别

TensorFlow 入门之手写识别(MNIST) softmax算法

TensorFlow 从入门到精通:tensorflow.nn 详解

Tensorflow快速入门2--实现手写数字识别

TensorFlow 从入门到精通:tensorflow.nn 详解