如何修复AttributeError:模块'tensorflow'没有属性'reset_default_graph'
Posted
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了如何修复AttributeError:模块'tensorflow'没有属性'reset_default_graph'相关的知识,希望对你有一定的参考价值。
我正在尝试根据教程构建基于ai的聊天机器人,他们使用的是python 3.5和tensorflow 1.0.0。
我已经检查了所有内容,我的虚拟环境是从anaconda资源管理器中运行的。我什至从终端运行它并激活它。我正在正确导入所有内容。虽然我使用激活了虚拟环境的anaconda终端安装了tensorflow 1.0.0,但是当我在anacoda中查找它时显示1.1.0。我不知道那里是否有主要区别,所以我不确定那件事。运行此代码后tf.reset_default_graph()session = tf.InteractiveSession()它没有显示成功运行的spyder,而是显示AttributeError错误:模块'tensorflow'没有属性'reset_default_graph'。
import numpy as np
import tensorflow as tf
import re
import time
### Phase 1: Data Preprocessing ###
# Importing the dataset
lines = open('movie_lines.txt', encoding = 'utf-8', errors = 'ignore').read().split('\n')
conversations = open('movie_conversations.txt', encoding = 'utf-8', errors = 'ignore').read().split('\n')
# Creating a dictionary that maps each line with its id
id_to_line =
for line in lines:
_line = line.split(' +++$+++ ')
if len(_line) == 5:
id_to_line[_line[0]] = _line[4]
# Creating a list of conversations
conversations_ids = []
for conversation in conversations[:-1]:
_conversation = conversation.split(' +++$+++ ')[-1][1:-1].replace("'", "").replace(" ", "")
conversations_ids.append(_conversation.split(','))
# Getting questions and answers seperately
questions= []
answers = []
for conversation in conversations_ids:
for i in range(len(conversation) - 1):
questions.append(id_to_line[conversation[i]])
answers.append(id_to_line[conversation[i]])
# Cleaning Texts by removing apostrophes and putting everything in lowercase
def clean_text(text):
text = text.lower()
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "what is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"[-()\"#/@;:<>`+=~|.!?,]", "", text)
return text
# Cleaning Questions
clean_questions = []
for question in questions:
clean_questions.append(clean_text(question))
# Cleaning Answers
clean_answers = []
for answer in answers:
clean_answers.append(clean_text(answer))
# Filtering out the questions and answers that are too short or too long
short_questions = []
short_answers = []
i = 0
for question in clean_questions:
if 2 <= len(question.split()) <= 25:
short_questions.append(question)
short_answers.append(clean_answers[i])
i += 1
clean_questions = []
clean_answers = []
i = 0
for answer in short_answers:
if 2 <= len(answer.split()) <= 25:
clean_answers.append(answer)
clean_questions.append(short_questions[i])
i += 1
# Creating a Dictionary to map each word to its number of occurences
word_to_count =
for question in clean_questions:
for word in question.split():
if word not in word_to_count:
word_to_count[word] = 1
else:
word_to_count[word] += 1
for answer in clean_answers:
for word in answer.split():
if word not in word_to_count:
word_to_count[word] = 1
else:
word_to_count[word] += 1
# Doing Tokenization & Filtering non-frequent words
threshold = 20
ques_words_to_int =
word_count = 0
for word, count in word_to_count.items():
if count >= threshold:
ques_words_to_int[word] = word_count
word_count += 1
ans_words_to_int =
word_count = 0
for word, count in word_to_count.items():
if count >= threshold:
ans_words_to_int[word] = word_count
word_count += 1
# Adding last tokens to above two Dictionaries
tokens = ['<PAD>', '<EOS>', '<OUT>', '<SOS>']
for token in tokens:
ques_words_to_int[token] = len(ques_words_to_int) + 1
for token in tokens:
ans_words_to_int[token] = len(ans_words_to_int) + 1
# Creating Inverse Dictionary of ans_words_to_int
ans_ints_to_word = w_i: w for w, w_i in ans_words_to_int.items()
# Adding <EOS> to end of every answer for SEQ2SEQ Decoding
for i in range(len(clean_answers)):
clean_answers[i] += ' <EOS>'
# Translating all ques & ans into int & replacing all words, filtered out by <OUT>
ques_into_int = []
for question in clean_questions:
ints = []
for word in question.split():
if word not in ques_words_to_int:
ints.append(ques_words_to_int['<OUT>'])
else:
ints.append(ques_words_to_int[word])
ques_into_int.append(ints)
ans_into_int = []
for answer in clean_answers:
ints = []
for word in answer.split():
if word not in ans_words_to_int:
ints.append(ans_words_to_int['<OUT>'])
else:
ints.append(ans_words_to_int[word])
ans_into_int.append(ints)
# Sorting ques & ans by length of questions
sorted_clean_questions = []
sorted_clean_answers = []
for length in range(1, 25):
for i in enumerate(ques_into_int):
if len(i[1]) == length:
sorted_clean_questions.append(ques_into_int[i[0]])
sorted_clean_answers.append(ans_into_int[i[0]])
### Phase 2: Building SEQ2SEQ Model ###
# Creating placeholders for inputs and targets
def model_inputs():
inputs = tf.placeholder(tf.int32, [None, None], name = 'input')
targets = tf.placeholder(tf.int32, [None, None], name = 'input')
lr = tf.placeholder(tf.float32, name = 'Learning_rate')
keep_prob = tf.placeholder(tf.float32, name = 'Keep_prob')
return inputs, targets, lr, keep_prob
# Preprocessing the targets
def preprocess_targets(targets, word_to_int, batch_size):
left_side = tf.fill([batch_size, 1], word_to_int['<SOS>'])
right_side = tf.strided_slice(targets, [0,0], [batch_size, -1], [1,1])
preprocessed_targets = tf.concat([left_side, right_side], 1)
return preprocessed_targets
# Creating the Encoder RNN Layer
def encoder_rnn_layer(rnn_inputs, rnn_size, num_layers, keep_prob, sequence_length):
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
lstm_dropout = tf.contrib.rnn.DropoutWrapper(lstm, input_keep_prob = keep_prob)
encoder_cell = tf.contrib.rnn.MultiRNNCell([lstm_dropout] * num_layers)
_, encoder_state = tf.nn.bidirectional_dynamic_rnn(cell_fw = encoder_cell,
cell_bw = encoder_cell,
sequence_length = sequence_length,
inputs = rnn_inputs,
dtype = tf.float32)
return encoder_state
# Decoding the Training Set
def decode_training_set(encoder_state, decoder_cell, decoder_embedded_input, sequence_length, decoding_scope, output_function, keep_prob, batch_size):
attention_states = tf.zeros([batch_size, 1, decoder_cell.output_size])
attention_keys, attention_values, attention_score_function, attention_construct_function = tf.contrib.seq2seq.prepare_attention(attention_states, attention_option = "bahdanau", num_units = decoder_cell.output_size)
training_decoder_function = tf.contrib.seq2seq.attention_decoder_fn_train(encoder_state[0],
attention_keys,
attention_values,
attention_score_function,
attention_construct_function,
name = "attn_dec_train")
decoder_output, decoder_final_state, decoder_final_context_state = tf.contrib.seq2seq.dynamic_rnn_decoder(decoder_cell,
training_decoder_function,
decoder_embedded_input,
sequence_length,
scope = decoding_scope)
decoder_output_dropout = tf.nn.dropout(decoder_output, keep_prob)
return output_function(decoder_output_dropout)
# Decoding the test/validation set
def decode_test_set(encoder_state, decoder_cell, decoder_embeddings_matrix, sos_id, eos_id, maximum_length, num_words, decoding_scope, output_function, keep_prob, batch_size):
attention_states = tf.zeros([batch_size, 1, decoder_cell.output_size])
attention_keys, attention_values, attention_score_function, attention_construct_function = tf.contrib.seq2seq.prepare_attention(attention_states, attention_option = "bahdanau", num_units = decoder_cell.output_size)
test_decoder_function = tf.contrib.seq2seq.attention_decoder_fn_inference(output_function,
encoder_state[0],
attention_keys,
attention_values,
attention_score_function,
attention_construct_function,
decoder_embeddings_matrix,
sos_id,
eos_id,
maximum_length,
num_words,
name = "attn_dec_inf")
test_predictions, decoder_final_state, decoder_final_context_state = tf.contrib.seq2seq.dynamic_rnn_decoder(decoder_cell,
test_decoder_function,
scope = decoding_scope)
return test_predictions
# Creating the Decoder RNN
def decoder_rnn(decoder_embedded_input, decoder_embeddings_matrix, encoder_state, num_words, sequence_length, rnn_size, num_layers, word_to_int, keep_prob, batch_size):
with tf.variable_scope("decoding") as decoding_scope:
lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)
lstm_dropout = tf.contrib.rnn.DropoutWrapper(lstm, input_keep_prob = keep_prob)
decoder_cell = tf.contrib.rnn.MultiRNNCell([lstm_dropout] * num_layers)
weights = tf.truncated_normal_initializer(stddev = 0.1)
biases = tf.zeros_initializer()
output_function = lambda x: tf.contrib.layers.fully_connected(x,
num_words,
None,
scope = decoding_scope,
weights_initializer = weights,
biases_initializer = biases)
training_predictions = decode_training_set(encoder_state,
decoder_cell,
decoder_embedded_input,
sequence_length,
decoding_scope,
output_function,
keep_prob,
batch_size)
decoding_scope.reuse_variables()
test_predictions = decode_test_set(encoder_state,
decoder_cell,
decoder_embeddings_matrix,
word_to_int['<SOS>'],
word_to_int['<EOS>'],
sequence_length - 1,
num_words,
decoding_scope,
output_function,
keep_prob,
batch_size)
return training_predictions, test_predictions
# Building the SEQ2SEQ Model
def seq2seq_model(inputs, targets, keep_prob, batch_size, sequence_length, answers_num_words, questions_num_words, encoder_embedding_size, decoder_embedding_size, rnn_size, num_layers, ques_words_to_int):
encoder_embedded_input = tf.contrib.layers.embed_sequence(inputs,
answers_num_words + 1,
encoder_embedding_size,
initializer = tf.random_uniform_initializer(0, 1))
encoder_state = encoder_rnn_layer(encoder_embedded_input, rnn_size, num_layers, keep_prob, sequence_length)
preprocessed_targets = preprocess_targets(targets, ques_words_to_int, batch_size)
decoder_embeddings_matrix = tf.Variable(tf.random_uniform([questions_num_words + 1, decoder_embedding_size], 0, 1))
decoder_embedded_input = tf.nn.embedding_lookup(decoder_embeddings_matrix, preprocessed_targets)
training_predictions, test_predictions = decoder_rnn(decoder_embedded_input,
decoder_embeddings_matrix,
encoder_state,
questions_num_words,
sequence_length,
rnn_size,
num_layers,
ques_words_to_int,
keep_prob,
batch_size)
return training_predictions, test_predictions
### Phase 3: Training the SEQ2SEQ Model ###
# Setting the Hyperparameters
epochs = 100
batch_size = 32
rnn_size = 1024
num_layers = 3
encoding_embedding_size = 1024
decoding_embedding_size = 1024
learning_rate = 0.001
learning_rate_decay = 0.9
min_learning_rate = 0.0001
keep_probability = 0.5
# Defining a session
tf.reset_default_graph()
session = tf.InteractiveSession()
我通过停用和重新激活虚拟环境,然后运行命令“ pip install --user tensorflow = 1.0.0”以及pip3 install --user tensorflow=1.0.0
以及与tf.reset_default_graph()session = tf有关的属性错误来解决此问题.InteractiveSession()已解决]
我解决了这个问题。从下次开始,我会尝试以一种更简洁的方式发布问题,对不起。我通过停用和重新激活虚拟环境来解决此问题,然后运行命令“ pip install --user tensorflow = 1.0.0”和“ pip3 install --user tensorflow = 1.0.0”,然后该属性错误与tf相关.reset_default_graph()session = tf.InteractiveSession()已解决。
以上是关于如何修复AttributeError:模块'tensorflow'没有属性'reset_default_graph'的主要内容,如果未能解决你的问题,请参考以下文章
如何修复AttributeError:模块'numpy'没有属性'square'[关闭]
如何修复“AttributeError:模块'tensorflow'没有属性'get_default_graph'”?
AttributeError:模块'win32ctypes.pywin32.win32api'没有属性'错误'
如何修复此 AttributeError:“SubRequest”对象没有属性“getfuncargvalue”?