使用 CNN 和 LSTM 在 Tensorflow 中占位符大小和类型的错误

Posted

技术标签:

【中文标题】使用 CNN 和 LSTM 在 Tensorflow 中占位符大小和类型的错误【英文标题】:Error in Placeholder size and type in Tensorflow using CNN and LSTM 【发布时间】:2018-10-31 02:25:12 【问题描述】:

我使用这段代码结合了 CNN 和 LSTM:

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import tensorflow as tf
import pyfftw
from scipy import signal
import xlrd
from tensorflow.python.tools import freeze_graph
from tensorflow.python.tools import optimize_for_inference_lib
import seaborn as sns

from sklearn.metrics import confusion_matrix

##matplotlib inline
plt.style.use('ggplot')


## define funtions
def read_data(file_path):
##    column_names = ['user-id','activity','timestamp', 'x-axis', 'y-axis', 'z-axis']
    column_names = ['activity','timestamp', 'Ax', 'Ay', 'Az', 'Lx', 'Ly', 'Lz', 'Gx', 'Gy', 'Gz', 'Mx', 'My', 'Mz'] ## 3 sensors
    data = pd.read_csv(file_path,header = None, names = column_names)
    return data

def feature_normalize(dataset):
    mu = np.mean(dataset,axis = 0)
    sigma = np.std(dataset,axis = 0)
    return (dataset - mu)/sigma

def plot_axis(ax, x, y, title):
    ax.plot(x, y)
    ax.set_title(title)
    ax.xaxis.set_visible(False)
    ax.set_ylim([min(y) - np.std(y), max(y) + np.std(y)])
    ax.set_xlim([min(x), max(x)])
    ax.grid(True)

def plot_activity(activity,data):
    fig, (ax0, ax1, ax2) = plt.subplots(nrows = 3, figsize = (15, 10), sharex = True)
    plot_axis(ax0, data['timestamp'], data['Ax'], 'x-axis')
    plot_axis(ax1, data['timestamp'], data['Ay'], 'y-axis')
    plot_axis(ax2, data['timestamp'], data['Az'], 'z-axis')
    plt.subplots_adjust(hspace=0.2)
    fig.suptitle(activity)
    plt.subplots_adjust(top=0.90)
    plt.show()

def windows(data, size):
    start = 0
    while start < data.count():
        yield start, start + size
        start += (size / 2)

def segment_signal(data, window_size = None, num_channels=None): # edited
    segments = np.empty((0,window_size,num_channels)) #change from 3 to 9 channels for AGM fusion #use variable num_channels=9
    labels = np.empty((0))
    for (n_start, n_end) in windows(data['timestamp'], window_size):
##        x = data["x-axis"][start:end]
##        y = data["y-axis"][start:end]
##        z = data["z-axis"][start:end]
        n_start = int(n_start)
        n_end = int(n_end)
        Ax = data["Ax"][n_start:n_end]
        Ay = data["Ay"][n_start:n_end]
        Az = data["Az"][n_start:n_end]
        Lx = data["Lx"][n_start:n_end]
        Ly = data["Ly"][n_start:n_end]
        Lz = data["Lz"][n_start:n_end]
        Gx = data["Gx"][n_start:n_end]
        Gy = data["Gy"][n_start:n_end]
        Gz = data["Gz"][n_start:n_end]
        Mx = data["Mx"][n_start:n_end]
        My = data["My"][n_start:n_end]
        Mz = data["Mz"][n_start:n_end]
        if(len(data['timestamp'][n_start:n_end]) == window_size): # include only windows with size of 90
            segments = np.vstack([segments,np.dstack([Ax,Ay,Az,Gx,Gy,Gz,Mx,My,Mz])])
            labels = np.append(labels,stats.mode(data["activity"][n_start:n_end])[0][0])
    return segments, labels

def weight_variable(shape, restore_name):
    initial = tf.truncated_normal(shape, stddev = 0.1)
    return tf.Variable(initial, name=restore_name)

def bias_variable(shape, restore_name):
    initial = tf.constant(0.0, shape = shape)
    return tf.Variable(initial, name=restore_name)

def depthwise_conv2d(x, W):
    return tf.nn.depthwise_conv2d(x,W, [1, 1, 1, 1], padding='VALID')

def apply_depthwise_conv(x,weights,biases):
    return tf.nn.relu(tf.add(depthwise_conv2d(x, weights),biases))

def apply_max_pool(x,kernel_size,stride_size):
    return tf.nn.max_pool(x, ksize=[1, 1, kernel_size, 1], 
                          strides=[1, 1, stride_size, 1], padding='VALID') 

#------------------------get dataset----------------------#

## run shoaib_dataset.py to generate dataset_shoaib_total.txt

## get data from dataset_shoaib_total.txt
dataset_belt = read_data('dataset_shoaibsensoractivity_participant_belt.txt')
dataset_left_pocket = read_data('dataset_shoaibsensoractivity_participant_left_pocket.txt')
dataset_right_pocket = read_data('dataset_shoaibsensoractivity_participant_right_pocket.txt')
dataset_upper_arm = read_data('dataset_shoaibsensoractivity_participant_upper_arm.txt')
dataset_wrist = read_data('dataset_shoaibsensoractivity_participant_wrist.txt')



#--------------------preprocessing------------------------#

dataset_belt['Ax'] = feature_normalize(dataset_belt['Ax'])
dataset_belt['Ay'] = feature_normalize(dataset_belt['Ay'])
dataset_belt['Az'] = feature_normalize(dataset_belt['Az'])
dataset_belt['Gx'] = feature_normalize(dataset_belt['Gx'])
dataset_belt['Gy'] = feature_normalize(dataset_belt['Gy'])
dataset_belt['Gz'] = feature_normalize(dataset_belt['Gz'])
dataset_belt['Mx'] = feature_normalize(dataset_belt['Mx'])
dataset_belt['My'] = feature_normalize(dataset_belt['My'])
dataset_belt['Mz'] = feature_normalize(dataset_belt['Mz'])

dataset_left_pocket['Ax'] = feature_normalize(dataset_left_pocket['Ax'])
dataset_left_pocket['Ay'] = feature_normalize(dataset_left_pocket['Ay'])
dataset_left_pocket['Az'] = feature_normalize(dataset_left_pocket['Az'])
dataset_left_pocket['Gx'] = feature_normalize(dataset_left_pocket['Gx'])
dataset_left_pocket['Gy'] = feature_normalize(dataset_left_pocket['Gy'])
dataset_left_pocket['Gz'] = feature_normalize(dataset_left_pocket['Gz'])
dataset_left_pocket['Mx'] = feature_normalize(dataset_left_pocket['Mx'])
dataset_left_pocket['My'] = feature_normalize(dataset_left_pocket['My'])
dataset_left_pocket['Mz'] = feature_normalize(dataset_left_pocket['Mz'])

dataset_right_pocket['Ax'] = feature_normalize(dataset_right_pocket['Ax'])
dataset_right_pocket['Ay'] = feature_normalize(dataset_right_pocket['Ay'])
dataset_right_pocket['Az'] = feature_normalize(dataset_right_pocket['Az'])
dataset_right_pocket['Gx'] = feature_normalize(dataset_right_pocket['Gx'])
dataset_right_pocket['Gy'] = feature_normalize(dataset_right_pocket['Gy'])
dataset_right_pocket['Gz'] = feature_normalize(dataset_right_pocket['Gz'])
dataset_right_pocket['Mx'] = feature_normalize(dataset_right_pocket['Mx'])
dataset_right_pocket['My'] = feature_normalize(dataset_right_pocket['My'])
dataset_right_pocket['Mz'] = feature_normalize(dataset_right_pocket['Mz'])

dataset_upper_arm['Ax'] = feature_normalize(dataset_upper_arm['Ax'])
dataset_upper_arm['Ay'] = feature_normalize(dataset_upper_arm['Ay'])
dataset_upper_arm['Az'] = feature_normalize(dataset_upper_arm['Az'])
dataset_upper_arm['Gx'] = feature_normalize(dataset_upper_arm['Gx'])
dataset_upper_arm['Gy'] = feature_normalize(dataset_upper_arm['Gy'])
dataset_upper_arm['Gz'] = feature_normalize(dataset_upper_arm['Gz'])
dataset_upper_arm['Mx'] = feature_normalize(dataset_upper_arm['Mx'])
dataset_upper_arm['My'] = feature_normalize(dataset_upper_arm['My'])
dataset_upper_arm['Mz'] = feature_normalize(dataset_upper_arm['Mz'])


dataset_wrist['Ax'] = feature_normalize(dataset_wrist['Ax'])
dataset_wrist['Ay'] = feature_normalize(dataset_wrist['Ay'])
dataset_wrist['Az'] = feature_normalize(dataset_wrist['Az'])
dataset_wrist['Gx'] = feature_normalize(dataset_wrist['Gx'])
dataset_wrist['Gy'] = feature_normalize(dataset_wrist['Gy'])
dataset_wrist['Gz'] = feature_normalize(dataset_wrist['Gz'])
dataset_wrist['Mx'] = feature_normalize(dataset_wrist['Mx'])
dataset_wrist['My'] = feature_normalize(dataset_wrist['My'])
dataset_wrist['Mz'] = feature_normalize(dataset_wrist['Mz'])


#------------------fixed hyperparameters--------------------#

window_size = 200 #from 90 #FIXED at 4 seconds


#----------------input hyperparameters------------------#

input_height = 1
input_width = window_size
num_labels = 7
num_channels = 9 #from 3 channels #9 channels for AGM


#-------------------sliding time window----------------#

segments_belt, labels_belt = segment_signal(dataset_belt, window_size=window_size, num_channels=num_channels)
labels_belt = np.asarray(pd.get_dummies(labels_belt), dtype = np.int8)
reshaped_segments_belt = segments_belt.reshape(len(segments_belt), (window_size*num_channels)) #use variable num_channels instead of constant 3 channels

segments_left_pocket, labels_left_pocket = segment_signal(dataset_left_pocket, window_size=window_size, num_channels=num_channels)
labels_left_pocket = np.asarray(pd.get_dummies(labels_left_pocket), dtype = np.int8)
reshaped_segments_left_pocket = segments_left_pocket.reshape(len(segments_left_pocket), (window_size*num_channels)) #use variable num_channels instead of constant 3 channels

segments_right_pocket, labels_right_pocket = segment_signal(dataset_right_pocket, window_size=window_size, num_channels=num_channels)
labels_right_pocket = np.asarray(pd.get_dummies(labels_right_pocket), dtype = np.int8)
reshaped_segments_right_pocket = segments_right_pocket.reshape(len(segments_right_pocket), (window_size*num_channels)) #use variable num_channels instead of constant 3 channels

segments_upper_arm, labels_upper_arm = segment_signal(dataset_upper_arm, window_size=window_size, num_channels=num_channels)
labels_upper_arm = np.asarray(pd.get_dummies(labels_upper_arm), dtype = np.int8)
reshaped_segments_upper_arm = segments_upper_arm.reshape(len(segments_upper_arm), (window_size*num_channels)) #use variable num_channels instead of constant 3 channels

segments_wrist, labels_wrist = segment_signal(dataset_wrist, window_size=window_size, num_channels=num_channels)
labels_wrist = np.asarray(pd.get_dummies(labels_wrist), dtype = np.int8)
reshaped_segments_wrist = segments_wrist.reshape(len(segments_wrist), (window_size*num_channels)) #use variable num_channels instead of constant 3 channels



##reshaped_segments = np.vstack([reshaped_segments1,reshaped_segments2,reshaped_segments3,reshaped_segments4,reshaped_segments5,reshaped_segments6,reshaped_segments7,reshaped_segments8,reshaped_segments9,reshaped_segments10])
##labels = np.vstack([labels1,labels2,labels3,labels4,labels5,labels6,labels7,labels8,labels9,labels10])



# all locations
reshaped_segments = np.vstack([reshaped_segments_belt,reshaped_segments_left_pocket,reshaped_segments_right_pocket,reshaped_segments_upper_arm,reshaped_segments_wrist])
labels = np.vstack([labels_belt,labels_left_pocket,labels_right_pocket,labels_upper_arm,labels_wrist]) 


#------------divide data into test and training `set-----------#

train_test_split = np.random.rand(len(reshaped_segments)) < 0.70
train_x = reshaped_segments[train_test_split]
train_y = labels[train_test_split]
test_x = reshaped_segments[~train_test_split]
test_y = labels[~train_test_split]



#---------------training hyperparameters----------------#

batch_size = 10
kernel_size = 60 #from 60 #optimal 2
depth = 15 #from 60 #optimal 15
num_hidden = 1000 #from 1000 #optimal 80

learning_rate = 0.0001
training_epochs = 8


total_batches = train_x.shape[0] ##// batch_size # included // batch_size



#---------define placeholders for input----------#

X = tf.placeholder(tf.float32, shape=[None,input_width * num_channels], name="input")
X_reshaped = tf.reshape(X,[-1,input_height,input_width,num_channels])
Y = tf.placeholder(tf.float32, shape=[None,num_labels])


#---------------------perform convolution-----------------#

# first convolutional layer 
c_weights = weight_variable([1, kernel_size, num_channels, depth], restore_name="c_weights")
c_biases = bias_variable([depth * num_channels], restore_name="c_biases")

c = apply_depthwise_conv(X_reshaped,c_weights,c_biases)
p = apply_max_pool(c,20,2)

# second convolutional layer
c2_weights = weight_variable([1, 6,depth*num_channels,depth//10], restore_name="c2_weights")
c2_biases = bias_variable([(depth*num_channels)*(depth//10)], restore_name="c2_biases")

c2 = apply_depthwise_conv(p,c2_weights,c2_biases)


n_classes = 7
n_hidden = 128
n_inputs = 540 # 540 = 60*3 not 180 # or 7*9*10
lstm_size = 128

rnnW = 
    'hidden': tf.Variable(tf.random_normal([n_inputs, n_hidden])),
    'output': tf.Variable(tf.random_normal([n_hidden, n_classes]))


rnnBiases = 
    'hidden': tf.Variable(tf.random_normal([n_hidden], mean=1.0)),
    'output': tf.Variable(tf.random_normal([n_classes]))


c2Reshape = tf.reshape(c2, [-1, 7, 200])
shuff = tf.transpose(c2Reshape, [1, 0, 2])
shuff = tf.reshape(shuff, [-1, n_inputs])

# Linear activation, reshaping inputs to the LSTM's number of hidden:
hidden = tf.nn.relu(tf.matmul(
    shuff, rnnW['hidden']
) + rnnBiases['hidden'])

# Split the series because the rnn cell needs time_steps features, each of shape:
hidden = tf.split(axis=0, num_or_size_splits=7, value=hidden)

lstm_cell = tf.contrib.rnn.LSTMCell(lstm_size, forget_bias=1.0)
# Stack two LSTM layers, both layers has the same shape
lstm_layers = tf.contrib.rnn.MultiRNNCell([lstm_cell] * 2)

lstmOutputs, _ = tf.contrib.rnn.static_rnn(lstm_layers, hidden, dtype=tf.float32)
lstmLastOutput = lstmOutputs[-1]
y_ = tf.matmul(lstmLastOutput, rnnW['output']) + rnnBiases['output']





#-----------------loss optimization-------------#

loss = -tf.reduce_sum(Y * tf.log(y_))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(loss)
##optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(loss)


#-----------------compute accuracy---------------#

correct_prediction = tf.equal(tf.argmax(y_,1), tf.argmax(Y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

cost_history = np.empty(shape=[1],dtype=float)
saver = tf.train.Saver()



#-----------------run session--------------------#

session = tf.Session()
session.run(tf.global_variables_initializer())

for epoch in range(8):
    for b in range(total_batches):    
        offset = (b * batch_size) % (train_y.shape[0] - batch_size)
        batch_x = train_x[offset:(offset + batch_size), :]
        batch_y = train_y[offset:(offset + batch_size), :]
        _, c = session.run([optimizer, loss],feed_dict=X: batch_x, Y : batch_y)
        cost_history = np.append(cost_history,c)
    print("Epoch: ",epoch," Training Loss: ",c," Training Accuracy: ",\
            session.run(accuracy, feed_dict=X: train_x, Y: train_y))

print("Testing Accuracy:", session.run(accuracy, feed_dict=X: test_x, Y: test_y))

if 1==1:
    print ("Testing Accuracy: ", session.run(accuracy, feed_dict=X: test_x, Y: test_y),'\n')
    pred_y = session.run(tf.argmax(y_ ,1),feed_dict=X: test_x)
    cm = confusion_matrix(np.argmax(test_y ,1),pred_y)
    print (cm, '\n')
    plt.imshow(cm)
    plt.title('Confusion Matrix')
    plt.rcParams['image.cmap'] = 'afmhot'
    plt.colorbar()
    tick_marks = np.arange(len(['Wal', 'Std', 'Jog', 'Sit', 'Bik', 'Wlu', 'Wld']))
    plt.xticks(tick_marks, ['Wal', 'Std', 'Jog', 'Sit', 'Bik', 'Wlu', 'Wld'])
    plt.yticks(tick_marks, ['Wal', 'Std', 'Jog', 'Sit', 'Bik', 'Wlu', 'Wld'])

    fmt = '.2f'
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j, i, format(cm[i, j], fmt),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.figure()
    plt.show()

但是,我总是收到此错误:

Traceback(最近一次调用最后一次):文件 "C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", 第 1322 行,在 _do_call 中 返回 fn(*args) 文件 "C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", 第 1307 行,在 _run_fn 选项、feed_dict、fetch_list、target_list、run_metadata)文件“C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py”, 第 1409 行,在 _call_tf_sessionrun run_metadata) tensorflow.python.framework.errors_impl.InvalidArgumentError: 不兼容的形状:[10,7] vs. [20,7] [[Node: mul = Mul[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_Placeholder_0_0, Log)]]

在处理上述异常的过程中,又发生了一个异常:

Traceback(最近一次调用最后一次):文件“”,第 6 行,in _, c = session.run([优化器, 损失],feed_dict=X: batch_x, Y: batch_y) 文件 "C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", 第 900 行,运行中 run_metadata_ptr) 文件 "C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", 第 1135 行,在 _run feed_dict_tensor, options, run_metadata) 文件 "C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", 第 1316 行,在 _do_run run_metadata) 文件 "C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\client\session.py", 第 1335 行,在 _do_call 中 raise type(e)(node_def, op, message) tensorflow.python.framework.errors_impl.InvalidArgumentError: 不兼容的形状:[10,7] vs. [20,7] [[Node: mul = Mul[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_Placeholder_0_0, Log)]]

由操作“mul”引起,定义在:文件“”,第 1 行,in 文件 "C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\idlelib\run.py", 第 130 行,主要 ret = 方法(*args,**kwargs)文件“C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\idlelib\run.py”, 第 357 行,在运行代码中 exec(code, self.locals) File "", line 2, in File "C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\ops\math_ops.py", 第 979 行,在 binary_op_wrapper 中 返回 func(x, y, name=name) 文件 "C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\ops\math_ops.py", 第 1211 行,在 _mul_dispatch 中 返回 gen_math_ops.mul(x, y, name=name) 文件 "C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\ops\gen_math_ops.py", 第 5066 行,单位为 mul "Mul", x=x, y=y, name=name) 文件 "C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\framework\op_def_library.py ", 第 787 行,在 _apply_op_helper op_def=op_def) 文件 "C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\framework\ops.py", 第 3392 行,在 create_op 中 op_def=op_def) 文件 "C:\Users\Charlene\AppData\Local\Programs\Python\Python35\lib\site-packages\tensorflow\python\framework\ops.py", 第 1718 行,在 init 中 self._traceback = self._graph._extract_stack() # pylint: disable=protected-access

InvalidArgumentError(参见上面的回溯):不兼容的形状: [10,7] vs. [20,7] [[节点:mul = Mul[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/device:CPU:0"](_arg_Placeholder_0_0, Log)]]

我在这里看到的主要错误是:

不兼容的形状:[10,7] 与 [20,7]

其中 10 是批量大小,7 是类数。

是什么导致了错误?

【问题讨论】:

【参考方案1】:

看来错误发生在这里:

loss = -tf.reduce_sum(Y * tf.log(y_))

您的Y(10, 7),这是意料之中的,但由于某种原因,y_(20, 7)

尝试在此行之前跟踪c2 的形状:

c2Reshape = tf.reshape(c2, [-1, 7, 200])

以及它后面的c2Reshape 的形状(或者暂时用c2Reshape = tf.reshape(c2, [10, 7, 200]) 替换该行,看看它是否失败),我怀疑这就是20 的来源。

【讨论】:

当我尝试将批量大小更改为 20 时,y_ 将是 (20,7),形状更改为 (40,7)。 我实际上并不建议更改批量大小。问题的最可能原因是您在调整大小时的形状是错误的。解决此问题的一种类似方法是将调整大小更改为c2Reshape = tf.reshape(c2, [-1, 7, 400]),但我还没有阅读您的整个模型,问题很可能在此之前发生在某个地方。 我尝试了c2Reshape = tf.reshape(c2, [10, 7, 200]),我得到了这个错误 ValueError: 尺寸大小必须能被 7560 整除,但对于输入形状为 [?,1,56,135]、[3] 和输入的“Reshape_1”(操作:“Reshape”)为 14000计算为部分形状的张量:输入[1] = [10,7,200]。 所以你的c2(batch_size, 1, 56, 135),而你正在将其调整为(?, 7, 200),这对我来说似乎是错误的。应该是(?, 7, 1080) 吗? (?, 7, 200) 来自哪里?

以上是关于使用 CNN 和 LSTM 在 Tensorflow 中占位符大小和类型的错误的主要内容,如果未能解决你的问题,请参考以下文章

使用 LSTM 和 CNN 对 Keras 进行故障排除以进行时间序列分类

在 keras 中使用 CNN-LSTM 模型进行序列到序列分类

如何使用 CNN 和 LSTM 元素改进文本分类模型? [关闭]

使用CNN和LSTM构建图像字幕标题生成器

在 keras 中使用带有 LSTM 的 CNN 时,池化层是强制性的吗?

使用 Keras 训练 CNN-LSTM 时卡在第一个 epoch