使用汇总操作时出现 AttributeError

Posted

技术标签:

【中文标题】使用汇总操作时出现 AttributeError【英文标题】:AttributeError while using summary operation 【发布时间】:2017-06-16 04:18:02 【问题描述】:

我使用 TensorFlow 构建了一个 CNN。网络运行良好,但我遇到了一个问题:我无法从学习过程中可视化和绘制图表。

因此,我按照this 教程实施了必要的命令以使用 TensorBoard

但是,当我运行代码时,我收到以下错误消息:

AttributeError: 'module' object has no attribute 'scalar'

参考以下命令(具体到带**的行):

main函数中:

 W_conv1 = weight_variable([first_conv_kernel_size, first_conv_kernel_size, 
 with tf.name_scope('weights'):
     **variable_summaries(W_conv1)**

variable_summaries函数中:

def variable_summaries(var):
  with tf.name_scope('summaries'):
    mean = tf.reduce_mean(var)
    **tf.summary.scalar('mean', mean)**

这个错误信息是什么?我一步一步按照教程,我找不到错误。

感谢您的帮助,谢谢! :)

整个代码:

import build_database_tuple
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np

# few functions to initialize the weights of the layers properly (positive etc.)


def weight_variable(shape):
    initial = tf.truncated_normal(shape, stddev=0.1)
    return tf.Variable(initial)


def bias_variable(shape):
    initial = tf.constant(0.1, shape=shape)
    return tf.Variable(initial)


# convolution and pooling layers definition
def conv2d(x, W):
    return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')


def max_pool_2x2(x):
    return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1], padding='SAME')

def variable_summaries(var):
  """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
  with tf.name_scope('summaries'):
    mean = tf.reduce_mean(var)
    tf.summary.scalar('mean', mean)
    with tf.name_scope('stddev'):
      stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
    tf.summary.scalar('stddev', stddev)
    tf.summary.scalar('max', tf.reduce_max(var))
    tf.summary.scalar('min', tf.reduce_min(var))
    tf.summary.histogram('histogram', var)

# from the previous code (mnist):
print('START')
# INTIAL PARAMETERS
# database:
data_home_dir='/home/dir/to/data/'
validation_ratio=(1.0/8)
patch_size=32
test_images_num=5000*1 # csv_batchsize*number of test batches files
train_images_num=78000+78000-test_images_num # posnum + negnum
# model parameters:
first_conv_kernel_size=5
first_conv_output_channels=32
sec_conv_kernel_size=5
sec_conv_output_channels=64
fc_vec_size=512
# train and test parameters
train_epoches_num=5
train_batch_size=100
test_batch_size=100
learning_rate=1*(10**(-4))
summaries_dir='/dir/to/log/files/'



# load data
folds = build_database_tuple.load_data(data_home_dir=data_home_dir,validation_ratio=validation_ratio,patch_size=patch_size)

# starting the session. using the InteractiveSession we avoid build the entiee comp. graph before starting the session
sess = tf.InteractiveSession()

# start building the computational graph
# the 'None' indicates the number of classes - a value that we wanna leave open for now
x = tf.placeholder(tf.float32, shape=[None, patch_size**2]) #input images - 28x28=784
y_ = tf.placeholder(tf.float32, shape=[None, 2]) #output classes (using one-hot vectors)

# the vriables for the linear layer
W = tf.Variable(tf.zeros([(patch_size**2),2])) #weights - 784 input features and 10 outputs
b = tf.Variable(tf.zeros([2])) #biases - 10 classes

# initialize all the variables using the session, in order they could be used in it
sess.run(tf.initialize_all_variables())

# implementation of the regression model
y = tf.nn.softmax(tf.matmul(x,W) + b)

# Done!

# FIRST LAYER:
with tf.name_scope('layer1'):
    # build the first layer
    W_conv1 = weight_variable([first_conv_kernel_size, first_conv_kernel_size, 1, first_conv_output_channels]) # 5x5 patch, 1 input channel, 32 output channels (features)
    b_conv1 = bias_variable([first_conv_output_channels])

    x_image = tf.reshape(x, [-1,patch_size,patch_size,1]) # reshape x to a 4d tensor. 2,3 are the image dimensions, 4 is ine color channel

    # apply the layers
    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1)

    with tf.name_scope('weights'):
        variable_summaries(W_conv1)
    with tf.name_scope('biases'):
        variable_summaries(b_conv1)

# SECOND LAYER:
with tf.name_scope('layer2'):
    # 64 features each 5x5 patch
    W_conv2 = weight_variable([sec_conv_kernel_size, sec_conv_kernel_size, patch_size, sec_conv_output_channels])
    b_conv2 = bias_variable([sec_conv_output_channels])

    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)

    with tf.name_scope('weights'):
        variable_summaries(W_conv2)
    with tf.name_scope('biases'):
        variable_summaries(b_conv2)



# FULLY CONNECTED LAYER:
with tf.name_scope('fc'):

    # 1024 neurons, 8x8 - new size after 2 pooling layers
    W_fc1 = weight_variable([(patch_size/4) * (patch_size/4) * sec_conv_output_channels, fc_vec_size])
    b_fc1 = bias_variable([fc_vec_size])

    h_pool2_flat = tf.reshape(h_pool2, [-1, (patch_size/4) * (patch_size/4) * sec_conv_output_channels])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

# dropout layer - meant to reduce over-fitting
with tf.name_scope('dropout'):

    keep_prob = tf.placeholder(tf.float32)
    tf.summary.scalar('dropout_keep_probability', keep_prob)
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    with tf.name_scope('weights'):
        variable_summaries(W_fc1)
    with tf.name_scope('biases'):
        variable_summaries(b_fc1)


# READOUT LAYER:
with tf.name_scope('softmax'):
    # softmax regression
    W_fc2 = weight_variable([fc_vec_size, 2])
    b_fc2 = bias_variable([2])

    y_conv=tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

    with tf.name_scope('weights'):
        variable_summaries(W_fc2)
    with tf.name_scope('biases'):
        variable_summaries(b_fc2)



# TRAIN AND EVALUATION:
with tf.name_scope('cross_entropy'):
    # cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1])) # can be numerically unstable. old working calculation
    cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
    tf.summary.scalar('cross_entropy', cross_entropy)

with tf.name_scope('train'):
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)

with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
        correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    with tf.name_scope('accuracy'):
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', accuracy)

# Merge all the summaries and write them out to /tmp/mnist_logs (by default)
merged = tf.summary.merge_all()
train_writer = tf.train.SummaryWriter(summaries_dir + '/train', sess.graph)
test_writer = tf.train.SummaryWriter(summaries_dir + '/test')

#tf.global_variables_initializer().run()
sess.run(tf.initialize_all_variables())

# variables for the plotting process
p11 = []
p12 = []
p21 = []
p22 = []
f0 = plt.figure()
f1 = plt.figure()
train_accuracy=0

# starting the training process
for i in range(((train_images_num*train_epoches_num)/train_batch_size)):

    if i%50 == 0: # for every 100 iterations
        #train_accuracy = accuracy.eval(feed_dict=x:batch[0], y_: batch[1], keep_prob: 1.0)


        # calculate test accuracy
        val_batch = folds.validation.next_batch(train_batch_size)
        #val_accuracy = accuracy.eval(feed_dict=x: val_batch[0], y_: val_batch[1], keep_prob: 1.0)
        summary, val_accuracy = sess.run([merged, accuracy], feed_dict=x: val_batch[0], y_: val_batch[1], keep_prob: 1.0)
        test_writer.add_summary(summary, i)
        print('Accuracy at step %s: %s' % (i, val_accuracy))

    # The train step
    else:
        summary, _ = sess.run([merged, train_step], feed_dict=x: batch[0], y_: batch[1], keep_prob: 0.5)
        train_writer.add_summary(summary, i)

# Save Network
saver = tf.train.Saver()
save_path = saver.save(sess,'/dir/to/model/files/model.ckpt')
print("Model saved in file: %s" % save_path)

【问题讨论】:

您使用的是哪个 TensorFlow 版本?看来 TensorFlow 0.10 使用的是tf.scalar_summary()tf.summary.scalar() 应该适用于 TensorFlow 0.12。 谢谢!更新到 0.12 解决了这个问题! 【参考方案1】:

在sunside 的the comment 之后,我更新了我的tensorflow 版本并解决了问题。

显然,tf.scalar_summary() 在 tensorflow 版本 0.10 上工作,但在较新版本(至少为 0.12)更新为 tf.summary.scalar()

终端中的pip install -U tensorflow立即解决了问题:)

【讨论】:

以上是关于使用汇总操作时出现 AttributeError的主要内容,如果未能解决你的问题,请参考以下文章

自己处理Webview时出现的问题的汇总

自己处理Webview时出现的问题的汇总

转换为自定义功能时出现了代码中断?

获取操作时出现 REstkit 错误

使用自定义操作栏布局时出现 Robolectric InflateException

操作系统缓存实现:使用 malloc 时出现分段错误