CNN训练错误执行Tensorflow Python以识别狗和猫的图像

Posted

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了CNN训练错误执行Tensorflow Python以识别狗和猫的图像相关的知识,希望对你有一定的参考价值。

当我尝试训练我的CNN用于识别Python Tensorflow中的猫和狗图像时,第一个纪元被成功执行但后来出现了错误。如果有人请帮忙解决这个问题。我跟着博客,然后继续调整他们习惯的CNN解决我的问题,但它没有很好地解决。

还有一个dataset.py文件,用于加载猫和狗的数据进行训练。我想这个错误可能是由于最新的Tensorflow更新根据我在谷歌上看到的,但我不确定,因为我是CNN类型的神经网络的初学者。

命令行日志:

Training Epoch 1 --- Training Accuracy:  50.0%, Validation Accuracy:  43.8%,  
Validation Loss: 0.703
2018-03-05 17:14:34.438263: W c:l	ensorflow_1501918863922work	ensorflow-
1.2.1	ensorflowcoreframeworkop_kernel.cc:1158] Not found: Failed to 
create a directory:
Traceback (most recent call last):
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpythonclientsession.py", line 1139, in _do_call
return fn(*args)
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpythonclientsession.py", line 1121, in _run_fn
status, run_metadata)
File "C:UsersDitiMiniconda3envsditilibcontextlib.py", line 88, in __ 
_exit__
next(self.gen)
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpythonframeworkerrors_impl.py", line 466, in 
raise_exception_on_not_ok_status
pywrap_tensorflow.TF_GetCode(status))
tensorflow.python.framework.errors_impl.NotFoundError: Failed to create a 
directory:
     [[Node: save/SaveV2 = SaveV2[dtypes=[DT_FLOAT, DT_FLOAT, DT_FLOAT, 
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, 
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, 
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, 
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, 
DT_FLOAT], _device="/job:localhost/replica:0/task:0/cpu:0"]
(_arg_save/Const_0_0, save/SaveV2/tensor_names, 
save/SaveV2/shape_and_slices, Variable, Variable/Adam, Variable/Adam_1, 
Variable_1, Variable_1/Adam, Variable_1/Adam_1, Variable_2, Variable_2/Adam, 
Variable_2/Adam_1, Variable_3, Variable_3/Adam, Variable_3/Adam_1, 
Variable_4, Variable_4/Adam, Variable_4/Adam_1, Variable_5, Variable_5/Adam, 
Variable_5/Adam_1, Variable_6, Variable_6/Adam, Variable_6/Adam_1, 
Variable_7, Variable_7/Adam, Variable_7/Adam_1, Variable_8, Variable_8/Adam, 
Variable_8/Adam_1, Variable_9, Variable_9/Adam, Variable_9/Adam_1, 
beta1_power, beta2_power)]]

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpython	rainingsaver.py", line 1472, in save
{self.saver_def.filename_tensor_name: checkpoint_file})
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpythonclientsession.py", line 789, in run
run_metadata_ptr)
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpythonclientsession.py", line 997, in _run
feed_dict_string, options, run_metadata)
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpythonclientsession.py", line 1132, in _do_run
target_list, options, run_metadata)
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpythonclientsession.py", line 1152, in _do_call
raise type(e)(node_def, op, message)
tensorflow.python.framework.errors_impl.NotFoundError: Failed to create a 
directory:
     [[Node: save/SaveV2 = SaveV2[dtypes=[DT_FLOAT, DT_FLOAT, DT_FLOAT, 
 DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, 
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, 
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, 
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, 
DT_FLOAT], _device="/job:localhost/replica:0/task:0/cpu:0"]
(_arg_save/Const_0_0, save/SaveV2/tensor_names, 
save/SaveV2/shape_and_slices, Variable, Variable/Adam, Variable/Adam_1, 
Variable_1, Variable_1/Adam, Variable_1/Adam_1, Variable_2, Variable_2/Adam, 
Variable_2/Adam_1, Variable_3, Variable_3/Adam, Variable_3/Adam_1, 
Variable_4, Variable_4/Adam, Variable_4/Adam_1, Variable_5, Variable_5/Adam, 
Variable_5/Adam_1, Variable_6, Variable_6/Adam, Variable_6/Adam_1, 
Variable_7, Variable_7/Adam, Variable_7/Adam_1, Variable_8, Variable_8/Adam, 
Variable_8/Adam_1, Variable_9, Variable_9/Adam, Variable_9/Adam_1, 
beta1_power, beta2_power)]]

Caused by op 'save/SaveV2', defined at:
File "train.py", line 177, in <module>
saver = tf.train.Saver()
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpython	rainingsaver.py", line 1139, in __init__
self.build()
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpython	rainingsaver.py", line 1170, in build
restore_sequentially=self._restore_sequentially)
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpython	rainingsaver.py", line 689, in build
save_tensor = self._AddSaveOps(filename_tensor, saveables)
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpython	rainingsaver.py", line 276, in _AddSaveOps
save = self.save_op(filename_tensor, saveables)
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpython	rainingsaver.py", line 219, in save_op
tensors)
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpythonopsgen_io_ops.py", line 745, in save_v2
tensors=tensors, name=name)
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpythonframeworkop_def_library.py", line 767, in 
apply_op
op_def=op_def)
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpythonframeworkops.py", line 2506, in create_op
original_op=self._default_original_op, op_def=op_def)
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpythonframeworkops.py", line 1269, in __init__
self._traceback = _extract_stack()

NotFoundError (see above for traceback): Failed to create a directory:
     [[Node: save/SaveV2 = SaveV2[dtypes=[DT_FLOAT, DT_FLOAT, DT_FLOAT, 
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, 
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, 
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, 
DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, DT_FLOAT, 
DT_FLOAT], _device="/job:localhost/replica:0/task:0/cpu:0"]
(_arg_save/Const_0_0, save/SaveV2/tensor_names, 
save/SaveV2/shape_and_slices, Variable, Variable/Adam, Variable/Adam_1, 
Variable_1, Variable_1/Adam, Variable_1/Adam_1, Variable_2, Variable_2/Adam, 
Variable_2/Adam_1, Variable_3, Variable_3/Adam, Variable_3/Adam_1, 
Variable_4, Variable_4/Adam, Variable_4/Adam_1, Variable_5, Variable_5/Adam, 
Variable_5/Adam_1, Variable_6, Variable_6/Adam, Variable_6/Adam_1, 
Variable_7, Variable_7/Adam, Variable_7/Adam_1, Variable_8, Variable_8/Adam, 
Variable_8/Adam_1, Variable_9, Variable_9/Adam, Variable_9/Adam_1, 
beta1_power, beta2_power)]]


During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "train.py", line 205, in <module>
train(num_iteration=3000)
File "train.py", line 200, in train
saver.save(session, 'dogs-cats-model')
File "C:UsersDitiMiniconda3envsditilibsite-
packages	ensorflowpython	rainingsaver.py", line 1488, in save
raise exc

ValueError: Parent directory of dogs-cats-model doesn't exist, can't save.

mycode的:

import dataset
import tensorflow as tf
import time
from datetime import timedelta
import math
import random
import numpy as np

#Adding Seed so that random initialization is consistent
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)


batch_size = 32

#Prepare input data
classes = ['dogs','cats']
num_classes = len(classes)

# 20% of the data will automatically be used for validation
validation_size = 0.2
img_size = 128
num_channels = 3
train_path='training_data'

# We shall load all the training and validation images and labels into 
memory using openCV and use that during training
data = dataset.read_train_sets(train_path, img_size, classes, 
validation_size=validation_size)


print("Complete reading input data. Will Now print a snippet of it")
print("Number of files in Training-
set:		{}".format(len(data.train.labels)))
print("Number of files in Validation-
set:	{}".format(len(data.valid.labels)))



session = tf.Session()
x = tf.placeholder(tf.float32, shape=[None, img_size,img_size,num_channels], 
name='x')

## labels
y_true = tf.placeholder(tf.float32, shape=[None, num_classes], 
name='y_true')
y_true_cls = tf.argmax(y_true, dimension=1)



##Network graph params
filter_size_conv1 = 3 
num_filters_conv1 = 32

filter_size_conv2 = 3
num_filters_conv2 = 32

filter_size_conv3 = 3
num_filters_conv3 = 64

fc_layer_size = 128

def create_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=0.05))

def create_biases(size):
return tf.Variable(tf.constant(0.05, shape=[size]))



def create_convolutional_layer(input,
           num_input_channels, 
           conv_filter_size,        
           num_filters):  

## We shall define the weights that will be trained using create_weights 
function.
weights = create_weights(shape=[conv_filter_size, conv_filter_size, 
num_input_channels, num_filters])
## We create biases using the create_biases function. These are also 
trained.
biases = create_biases(num_filters)

## Creating the convolutional layer
layer = tf.nn.conv2d(input=input,
                 filter=weights,
                 strides=[1, 1, 1, 1],
                 padding='SAME')

layer += biases

## We shall be using max-pooling.  
layer = tf.nn.max_pool(value=layer,
                        ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1],
                        padding='SAME')
## Output of pooling is fed to Relu which is the activation function for us.
layer = tf.nn.relu(layer)

return layer



def create_flatten_layer(layer):
#We know that the shape of the layer will be [batch_size img_size img_size 
num_channels] 
# But let's get it from the previous layer.
layer_shape = layer.get_shape()

## Number of features will be img_height * img_width* num_channels. But we 
shall calculate it in place of hard-coding it.
num_features = layer_shape[1:4].num_elements()

## Now, we Flatten the layer so we shall have to reshape to num_features
layer = tf.reshape(layer, [-1, num_features])

return layer


def create_fc_layer(input,          
         num_inputs,    
         num_outputs,
         use_relu=True):

#Let's define trainable weights and biases.
weights = create_weights(shape=[num_inputs, num_outputs])
biases = create_biases(num_outputs)

# Fully connected layer takes input x and produces wx+b.Since, these are 
matrices, we use matmul function in Tensorflow
layer = tf.matmul(input, weights) + biases
if use_relu:
    layer = tf.nn.relu(layer)

return layer


layer_conv1 = create_convolutional_layer(input=x,
           num_input_channels=num_channels,
           conv_filter_size=filter_size_conv1,
           num_filters=num_filters_conv1)
layer_conv2 = create_convolutional_layer(input=layer_conv1,
           num_input_channels=num_filters_conv1,
           conv_filter_size=filter_size_conv2,
           num_filters=num_filters_conv2)

layer_conv3= create_convolutional_layer(input=layer_conv2,
           num_input_channels=num_filters_conv2,
           conv_filter_size=filter_size_conv3,
           num_filters=num_filters_conv3)

layer_flat = create_flatten_layer(layer_conv3)

layer_fc1 = create_fc_layer(input=layer_flat,
                 num_inputs=layer_flat.get_shape()[1:4].num_elements(),
                 num_outputs=fc_layer_size,
                 use_relu=True)

layer_fc2 = create_fc_layer(input=layer_fc1,
                 num_inputs=fc_layer_size,
                 num_outputs=num_classes,
                 use_relu=False) 

y_pred = tf.nn.softmax(layer_fc2,name='y_pred')

y_pred_cls = tf.argmax(y_pred, dimension=1)
session.run(tf.global_variables_initializer())
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=layer_fc2,
                                                labels=y_true)
cost = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cost)
correct_prediction = tf.equal(y_pred_cls, y_true_cls)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))


session.run(tf.global_variables_initializer()) 


def show_progress(epoch, feed_dict_train, feed_dict_validate, val_loss):
acc = session.run(accuracy, feed_dict=feed_dict_train)
val_acc = session.run(accuracy, feed_dict=feed_dict_validate)
msg = "Training Epoch {0} --- Training Accuracy: {1:>6.1%}, Validation 
Accuracy: {2:>6.1%},  Validation Loss: {3:.3f}"
print(msg.format(epoch + 1, acc, val_acc, val_loss))

total_iterations = 0

saver = tf.train.Saver()
def train(num_iteration):
global total_iterations

for i in range(total_iterations,
               total_iterations + num_iteration):

    x_batch, y_true_batch, _, cls_batch = data.train.next_batch(batch_size)
    x_valid_batch, y_valid_batch, _, valid_cls_batch = 
data.valid.next_batch(batch_size)


    feed_dict_tr = {x: x_batch,
                       y_true: y_true_batch}
    feed_dict_val = {x: x_valid_batch,
                          y_true: y_valid_batch}

    session.run(optimizer, feed_dict=feed_dict_tr)

    if i % int(data.train.num_examples/batch_size) == 0: 
        val_loss = session.run(cost, feed_dict=feed_dict_val)
        epoch = int(i / int(data.train.num_examples/batch_size))    

        show_progress(epoch, feed_dict_tr, feed_dict_val, val_loss)
        saver.save(session, 'dogs-cats-model') 


total_iterations += num_iteration

 train(num_iteration=3000)
答案

在显示进度之后,错误发生在行saver.save(session, 'dogs-cats-model')中。现在解决这个问题,将路径从'dogs-cats-model'更改为'./dogs-cats-model'。这应该解决它。

以上是关于CNN训练错误执行Tensorflow Python以识别狗和猫的图像的主要内容,如果未能解决你的问题,请参考以下文章

使用预训练 (Tensorflow) CNN 提取特征

准确率高,预测效果差--CNN与TensorFlow Python的结合。

使用Python,Keras和TensorFlow训练第一个CNN

使用Python,Keras和TensorFlow训练第一个CNN

训练CNN模型图像分类期间的tensorflow NaN损失

tensorflow训练自己的数据集实现CNN图像分类