使用 TensorFlow 的“IndexError:列表索引超出范围”错误

Posted

技术标签:

【中文标题】使用 TensorFlow 的“IndexError:列表索引超出范围”错误【英文标题】:"IndexError: list index out of range" error using TensorFlow 【发布时间】:2017-03-19 03:22:25 【问题描述】:

我使用 tensorflow 复制“GoogLeNet”,数据集是牛津花 17。

这是我的代码。

# This code is implementation of GoogLeNet, which is proposed in "https://www.cs.unc.edu/~wliu/papers/GoogLeNet.pdf"
# This code is referred from "https://github.com/tflearn/tflearn/blob/master/examples/images/googlenet.py"

from __future__ import division, print_function, absolute_import

# This code is extracted from "https://github.com/tflearn/tflearn/blob/master/tflearn/datasets/oxflower17.py"
import oxflower17

import tensorflow as tf

import numpy as np

X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227,227))

x = tf.placeholder(tf.float32, [None, 227, 227, 3])
y = tf.placeholder(tf.float32, [None, 17])
keep_prob = tf.placeholder(tf.float32) #dropout (keep probability)

trainX, trainY, testX, testY = X[0:1224], Y[0:1224], X[1224:1360], Y[1224:1360] # Divide training sets and test sets
trainX = trainX.reshape(-1, 227, 227, 3)
testX = testX.reshape(-1, 227, 227, 3)

print (len(trainX))
print (len(testX))

# Parameters
batch_size = 64
test_size = len(testX)

# Create some wrappers
def conv2d(x, W, b, strides):  # Conv2D wrapper, with bias and relu activation
    x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
    x = tf.nn.bias_add(x, b)
    return tf.nn.relu(x)

def maxpool2d(x, k, strides):  # MaxPool2D wrapper
    return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, strides, strides, 1], padding='SAME')


def avgpool2d(x, k, strides):  # AveragePool2D wrapper
    return tf.nn.avg_pool(x, ksize=[1, k, k, 1], strides=[1, strides, strides, 1], padding='SAME')


def local_response_normalization(incoming, depth_radius=5, bias=1.0, alpha=0.0001, beta=0.75,
                                 name="LocalResponseNormalization"):
    return tf.nn.lrn(incoming, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta, name=name)


weights = 
    ...


biases = 
    ...


# Create NN

x = tf.reshape(x, shape=[-1, 227, 227, 1])

conv1_7_7 = conv2d(x, weights['w_c1_77'], biases['b_c1_77'], strides=2)
pool1_3_3 = maxpool2d(conv1_7_7, k=3, strides=2)
pool1_3_3 = local_response_normalization(pool1_3_3)

conv2_1_1 = conv2d(pool1_3_3, weights['w_c2_11'], biases['b_c2_11'], strides=1)
conv2_3_3 = conv2d(conv2_1_1, weights['w_c2_33'], biases['b_c2_33'], strides=1)
conv2_3_3_lrn = local_response_normalization(conv2_3_3)
pool2_3_3 = maxpool2d(conv2_3_3_lrn, k=3, strides=2)

# Inception module (3a)
inception_3a_1_1 = conv2d(pool2_3_3, weights['w_inception_3a_11'], biases['b_inception_3a_11'], strides=1)
inception_3a_3_3_reduce = conv2d(pool2_3_3, weights['w_inception_3a_33_reduce'], biases['b_inception_3a_33_reduce'],
                                 strides=1)
inception_3a_3_3 = conv2d(inception_3a_3_3_reduce, weights['w_inception_3a_33'], biases['b_inception_3a_33'], strides=1)
inception_3a_5_5_reduce = conv2d(pool2_3_3, weights['w_inception_3a_55_reduce'], biases['b_inception_3a_55_reduce'],
                                 strides=1)
inception_3a_5_5 = conv2d(inception_3a_5_5_reduce, weights['w_inception_3a_55'], biases['b_inception_3a_55'], strides=1)
inception_3a_maxpool = maxpool2d(pool2_3_3, k=3, strides=1)
inception_3a_maxpool_reduce = conv2d(inception_3a_maxpool, weights['w_inception_3a_mp_reduce'],
                                     biases['b_inception_3a_mp_reduce'], strides=1)

inception_3a_concat = tf.concat(3, [inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_maxpool_reduce])

...

# Inception module (5b)
inception_5b_1_1 = conv2d(inception_5a_concat, weights['w_inception_5b_11'], biases['b_inception_5b_11'], strides=1)
inception_5b_3_3_reduce = conv2d(inception_5a_concat, weights['w_inception_5b_33_reduce'],
                                 biases['b_inception_5b_33_reduce'], strides=1)
inception_5b_3_3 = conv2d(inception_5b_3_3_reduce, weights['w_inception_5b_33'], biases['b_inception_5b_33'], strides=1)
inception_5b_5_5_reduce = conv2d(inception_5a_concat, weights['w_inception_5b_55_reduce'],
                                 biases['b_inception_5b_55_reduce'], strides=1)
inception_5b_5_5 = conv2d(inception_5b_5_5_reduce, weights['w_inception_5b_55'], biases['b_inception_5b_55'], strides=1)
inception_5b_maxpool = maxpool2d(inception_5a_concat, k=3, strides=1)
inception_5b_maxpool_reduce = conv2d(inception_5b_maxpool, weights['w_inception_5a_mp_reduce'],
                                     biases['b_inception_5a_mp_reduce'], strides=1)

inception_5b_concat = tf.concat(3, [inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_maxpool_reduce])

pool5_7_7 = avgpool2d(inception_5b_concat, 7, 1)

pool5_7_7_dropout = tf.nn.dropout(pool5_7_7, 0.4)

fc = tf.reshape(pool5_7_7_dropout, [-1, weights['w_fc'].get_shape().as_list()[0]])
fc = tf.add(tf.matmul(fc, weights['w_fc']), biases['b_fc'])

#### Network design is finished.

cost_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(fc, y))

optimizer = tf.train.MomentumOptimizer(learning_rate=0.001, momentum=0.9)

predict = tf.argmax(fc, 1)

init = tf.initialize_all_variables()

# Launch the graph
# This code is extracted from "http://pythonkim.tistory.com/56"
# Some variables are changed
with tf.Session() as sess:
    sess.run(init)

    for i in range(1):

        training_batch = zip(range(0, len(trainX), batch_size), range(batch_size, len(trainX)+1, batch_size))
        tf.reset_default_graph() # added by minho, from "https://github.com/tensorflow/tensorflow/issues/1470"
        for start, end in training_batch:
            sess.run(optimizer, feed_dict=x: trainX[start:end], y: trainY[start:end], keep_prob: 1.0) # modified by minho

        test_indices = np.arange(len(testX))  # Get A Test Batch
        np.random.shuffle(test_indices)
        test_indices = test_indices[0:test_size]

        print(len(testX[test_indices]))

        print(i, np.mean(np.argmax(testY[test_indices], axis=1) ==
                         sess.run(predict, feed_dict=x: testX[test_indices], y: testY[test_indices], keep_prob: 1.0))) # modified by minho

这是一个错误日志。

文件“/home/mh0205/GoogLeNet/googlenet.py”,第 443 行,在 sess.run(predict, feed_dict=x: testX[test_indices], y: testY[test_indices], keep_prob: 1.0))) # 由minho文件修改 “/home/mh0205/anaconda2/lib/python2.7/site-packages/tensorflow/python/client/session.py”, 第 1159 行,在 退出 self._default_graph_context_manager.exit(exec_type, exec_value, exec_tb) 文件 “/home/mh0205/anaconda2/lib/python2.7/contextlib.py”,第 35 行,在 退出 self.gen.throw(类型,值,回溯)文件“/home/mh0205/anaconda2/lib/python2.7/site-packages/tensorflow/python/framework/ops.py”, 第 3671 行,在 get_controller 中 如果 self.stack[-1] 不是默认值:IndexError: list index out of range

我无法修复错误。请帮帮我。

【问题讨论】:

【参考方案1】:

添加:

tf.reset_default_graph()

在您的代码之前。

如果tensorflow 尚未导入,请添加:

import tensorflow as tf

【讨论】:

我遇到了类似的错误,解决方案与tf.reset_default_graph()有关,请您解释一下您的解决方案是如何工作的。我想知道错误与重置图表有什么关系。

以上是关于使用 TensorFlow 的“IndexError:列表索引超出范围”错误的主要内容,如果未能解决你的问题,请参考以下文章

干货使用TensorFlow官方Java API调用TensorFlow模型(附代码)

tensorflow 怎么设置成cpu运行

如何理解TensorFlow中的tensor

tensorflow 为啥用训练好的数据出来的概率不变

如何让 Tensorflow Profiler 在 Tensorflow 2.5 中使用“tensorflow-macos”和“tensorflow-metal”工作

tensorflow怎么gpu加速