在 Tensorflow 中使用 InceptionV3 进行预测
Posted
技术标签:
【中文标题】在 Tensorflow 中使用 InceptionV3 进行预测【英文标题】:Prediction using InceptionV3 in Tensorflow 【发布时间】:2017-07-16 18:37:57 【问题描述】:我在自己的数据集上训练了张量流中的 InceptionV3 模型。我有来自培训的检查点文件和图表(.meta)。我正在使用这些文件对新图像的标签进行分类。到目前为止,我有以下内容:
来自 TFslim 的 inception_v3 代码
def inception_v3(inputs,
dropout_keep_prob=0.8,
num_classes=1000,
is_training=True,
restore_logits=True,
scope=''):
"""Latest Inception from http://arxiv.org/abs/1512.00567.
"Rethinking the Inception Architecture for Computer Vision"
Christian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens,
Zbigniew Wojna
Args:
inputs: a tensor of size [batch_size, height, width, channels].
dropout_keep_prob: dropout keep_prob.
num_classes: number of predicted classes.
is_training: whether is training or not.
restore_logits: whether or not the logits layers should be restored.
Useful for fine-tuning a model with different num_classes.
scope: Optional scope for name_scope.
Returns:
a list containing 'logits', 'aux_logits' Tensors.
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points =
with tf.name_scope(scope, 'inception_v3', [inputs]):
with scopes.arg_scope([ops.conv2d, ops.fc, ops.batch_norm, ops.dropout],
is_training=is_training):
with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
stride=1, padding='VALID'):
# 299 x 299 x 3
end_points['conv0'] = ops.conv2d(inputs, 32, [3, 3], stride=2,
scope='conv0')
# 149 x 149 x 32
end_points['conv1'] = ops.conv2d(end_points['conv0'], 32, [3, 3],
scope='conv1')
# 147 x 147 x 32
end_points['conv2'] = ops.conv2d(end_points['conv1'], 64, [3, 3],
padding='SAME', scope='conv2')
# 147 x 147 x 64
end_points['pool1'] = ops.max_pool(end_points['conv2'], [3, 3],
stride=2, scope='pool1')
# 73 x 73 x 64
end_points['conv3'] = ops.conv2d(end_points['pool1'], 80, [1, 1],
scope='conv3')
# 73 x 73 x 80.
end_points['conv4'] = ops.conv2d(end_points['conv3'], 192, [3, 3],
scope='conv4')
# 71 x 71 x 192.
end_points['pool2'] = ops.max_pool(end_points['conv4'], [3, 3],
stride=2, scope='pool2')
# 35 x 35 x 192.
net = end_points['pool2']
# Inception blocks
with scopes.arg_scope([ops.conv2d, ops.max_pool, ops.avg_pool],
stride=1, padding='SAME'):
# mixed: 35 x 35 x 256.
with tf.variable_scope('mixed_35x35x256a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 32, [1, 1])
net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
end_points['mixed_35x35x256a'] = net
# mixed_1: 35 x 35 x 288.
with tf.variable_scope('mixed_35x35x288a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
end_points['mixed_35x35x288a'] = net
# mixed_2: 35 x 35 x 288.
with tf.variable_scope('mixed_35x35x288b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 64, [1, 1])
with tf.variable_scope('branch5x5'):
branch5x5 = ops.conv2d(net, 48, [1, 1])
branch5x5 = ops.conv2d(branch5x5, 64, [5, 5])
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 64, [1, 1])
net = tf.concat([branch1x1, branch5x5, branch3x3dbl, branch_pool], 3)
end_points['mixed_35x35x288b'] = net
# mixed_3: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768a'):
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [3, 3], stride=2, padding='VALID')
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 64, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3])
branch3x3dbl = ops.conv2d(branch3x3dbl, 96, [3, 3],
stride=2, padding='VALID')
with tf.variable_scope('branch_pool'):
branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
net = tf.concat([branch3x3, branch3x3dbl, branch_pool], 3)
end_points['mixed_17x17x768a'] = net
# mixed4: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 128, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 128, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 128, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 128, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
end_points['mixed_17x17x768b'] = net
# mixed_5: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768c'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 160, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 160, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
end_points['mixed_17x17x768c'] = net
# mixed_6: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768d'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 160, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 160, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 160, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 160, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
end_points['mixed_17x17x768d'] = net
# mixed_7: 17 x 17 x 768.
with tf.variable_scope('mixed_17x17x768e'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 192, [1, 1])
with tf.variable_scope('branch7x7'):
branch7x7 = ops.conv2d(net, 192, [1, 1])
branch7x7 = ops.conv2d(branch7x7, 192, [1, 7])
branch7x7 = ops.conv2d(branch7x7, 192, [7, 1])
with tf.variable_scope('branch7x7dbl'):
branch7x7dbl = ops.conv2d(net, 192, [1, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [7, 1])
branch7x7dbl = ops.conv2d(branch7x7dbl, 192, [1, 7])
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat([branch1x1, branch7x7, branch7x7dbl, branch_pool], 3)
end_points['mixed_17x17x768e'] = net
# Auxiliary Head logits
aux_logits = tf.identity(end_points['mixed_17x17x768e'])
with tf.variable_scope('aux_logits'):
aux_logits = ops.avg_pool(aux_logits, [5, 5], stride=3,
padding='VALID')
aux_logits = ops.conv2d(aux_logits, 128, [1, 1], scope='proj')
# Shape of feature map before the final layer.
shape = aux_logits.get_shape()
aux_logits = ops.conv2d(aux_logits, 768, shape[1:3], stddev=0.01,
padding='VALID')
aux_logits = ops.flatten(aux_logits)
aux_logits = ops.fc(aux_logits, num_classes, activation=None,
stddev=0.001, restore=restore_logits)
end_points['aux_logits'] = aux_logits
# mixed_8: 8 x 8 x 1280.
# Note that the scope below is not changed to not void previous
# checkpoints.
# (TODO) Fix the scope when appropriate.
with tf.variable_scope('mixed_17x17x1280a'):
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 192, [1, 1])
branch3x3 = ops.conv2d(branch3x3, 320, [3, 3], stride=2,
padding='VALID')
with tf.variable_scope('branch7x7x3'):
branch7x7x3 = ops.conv2d(net, 192, [1, 1])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [1, 7])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [7, 1])
branch7x7x3 = ops.conv2d(branch7x7x3, 192, [3, 3],
stride=2, padding='VALID')
with tf.variable_scope('branch_pool'):
branch_pool = ops.max_pool(net, [3, 3], stride=2, padding='VALID')
net = tf.concat([branch3x3, branch7x7x3, branch_pool], 3)
end_points['mixed_17x17x1280a'] = net
# mixed_9: 8 x 8 x 2048.
with tf.variable_scope('mixed_8x8x2048a'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 320, [1, 1])
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [1, 1])
branch3x3 = tf.concat([ops.conv2d(branch3x3, 384, [1, 3]),
ops.conv2d(branch3x3, 384, [3, 1])], 3)
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 448, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
branch3x3dbl = tf.concat([ops.conv2d(branch3x3dbl, 384, [1, 3]),
ops.conv2d(branch3x3dbl, 384, [3, 1])], 3)
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat([branch1x1, branch3x3, branch3x3dbl, branch_pool], 3)
end_points['mixed_8x8x2048a'] = net
# mixed_10: 8 x 8 x 2048.
with tf.variable_scope('mixed_8x8x2048b'):
with tf.variable_scope('branch1x1'):
branch1x1 = ops.conv2d(net, 320, [1, 1])
with tf.variable_scope('branch3x3'):
branch3x3 = ops.conv2d(net, 384, [1, 1])
branch3x3 = tf.concat([ops.conv2d(branch3x3, 384, [1, 3]),
ops.conv2d(branch3x3, 384, [3, 1])], 3)
with tf.variable_scope('branch3x3dbl'):
branch3x3dbl = ops.conv2d(net, 448, [1, 1])
branch3x3dbl = ops.conv2d(branch3x3dbl, 384, [3, 3])
branch3x3dbl = tf.concat([ops.conv2d(branch3x3dbl, 384, [1, 3]),
ops.conv2d(branch3x3dbl, 384, [3, 1])], 3)
with tf.variable_scope('branch_pool'):
branch_pool = ops.avg_pool(net, [3, 3])
branch_pool = ops.conv2d(branch_pool, 192, [1, 1])
net = tf.concat([branch1x1, branch3x3, branch3x3dbl, branch_pool], 3)
end_points['mixed_8x8x2048b'] = net
# Final pooling and prediction
with tf.variable_scope('logits'):
shape = net.get_shape()
net = ops.avg_pool(net, shape[1:3], padding='VALID', scope='pool')
# 1 x 1 x 2048
net = ops.dropout(net, dropout_keep_prob, scope='dropout')
net = ops.flatten(net, scope='flatten')
# 2048
logits = ops.fc(net, num_classes, activation=None, scope='logits',
restore=restore_logits)
# 1000
end_points['logits'] = logits
end_points['predictions'] = tf.nn.softmax(logits, name='predictions')
return logits, end_points
预测码
config = tf.ConfigProto(allow_soft_placement=True)
saver = tf.train.import_meta_graph('path/to/meta/graph')
graph = tf.get_default_graph()
with tf.Session(config=config,graph=graph) as sess:
print graph
saver.restore(sess,'/path/to/chpk/')
init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())
sess.run(init_op)
print ('Restored checkpoint file and graph')
tens=image_preprocessing(tf.read_file('/serving/13_left.jpeg'))
with slim.arg_scope(inception_arg_scope()):
logits = inception_v3(tf.expand_dims(tens,0),
num_classes=6,
is_training=False)
prob = tf.nn.softmax(logits)
sess.run(prob)
这会产生以下错误:
FailedPreconditionError (see above for traceback): Attempting to use uninitialized value mixed_17x17x768d/branch7x7dbl/Conv_3/weights_2
[[Node: mixed_17x17x768d/branch7x7dbl/Conv_3/weights_2/read = Identity[T=DT_FLOAT, _class=["loc:@mixed_17x17x768d/branch7x7dbl/Conv_3/weights_2"], _device="/job:localhost/replica:0/task:0/cpu:0"](mixed_17x17x768d/branch7x7dbl/Conv_3/weights_2)]]
我对 Tensorflow 相当陌生,因此非常感谢您提供任何帮助。我做错了什么,但无法弄清楚。在此先感谢:)
编辑 1
我通过将图表和权重重新加载到会话中来重新启动会话。我冻结了图表并将其保存为frozen_graph.pb。以下是我的代码:
def freeze_graph(model_folder):
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_folder)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_folder = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_folder + "/frozen_model.pb"
# Before exporting our graph, we need to precise what is our output node
# This is how TF decides what part of the Graph he has to keep and what part it can dump
output_node_names = "tower_0/logits/predictions"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We import the meta graph and retrieve a Saver
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We retrieve the protobuf graph definition
graph = tf.get_default_graph()
input_graph_def = graph.as_graph_def()
# We start a session and restore the graph weights
with tf.Session() as sess:
saver.restore(sess, input_checkpoint)
for op in sess.graph.get_operations():
print(op.name)
# We use a built-in TF helper to export variables to constants
output_graph_def = graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
input_graph_def # The graph_def is used to retrieve the nodes
,output_node_names.split(",")) # The output node names are used to select the usefull nodes
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_folder", type=str, help="Model folder to export")
args = parser.parse_args()
freeze_graph(args.model_folder)
我将冻结的图表加载到一个新会话中。以下是我的节点名称:
prefix/batch_processing/batch_join/fifo_queue
prefix/batch_processing/batch_join/n
prefix/batch_processing/batch_join
prefix/batch_processing/Reshape/shape
prefix/batch_processing/Reshape
.
.
.
prefix/logits/logits/weights
prefix/logits/logits/weights/read
prefix/logits/logits/biases
prefix/logits/logits/biases/read
prefix/tower_0/logits/logits/xw_plus_b/MatMul
prefix/tower_0/logits/logits/xw_plus_b
prefix/tower_0/logits/predictions
我使用输入节点和最终节点(预测)对新图像进行分类。以下是我的代码:
def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we can use again a convenient built-in function to import a graph_def into the
# current default Graph
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name="prefix",
op_dict=None,
producer_op_list=None
)
return graph
#graph = load_graph('/serving/frozen_model.pb')
if __name__ == '__main__':
import scipy.misc
# Let's allow the user to pass the filename as an argument
parser = argparse.ArgumentParser()
parser.add_argument("--frozen_model_filename", default="/serving/frozen_model.pb", type=str, help="Frozen model file to import")
args = parser.parse_args()
# We use our "load_graph" function
graph = load_graph(args.frozen_model_filename)
# We can verify that we can access the list of operations in the graph
for op in graph.get_operations():
print(op.name)
# prefix/Placeholder/inputs_placeholder
# ...
# prefix/Accuracy/predictions
# We access the input and output nodes
x = graph.get_tensor_by_name('prefix/batch_processing/batch_join/fifo_queue:0')
y = graph.get_tensor_by_name('prefix/tower_0/logits/predictions:0')
image_data = tf.gfile.FastGFile('/serving/13_left.jpeg', 'rb').read()
# We launch a Session
with tf.Session(graph=graph) as sess:
# Note: we didn't initialize/restore anything, everything is stored in the graph_def
y_out = sess.run(y, feed_dict=
x: image_data) # < 45
print(y_out) # [[ False ]] Yay, it works!
我收到以下错误。
Traceback (most recent call last):
File "predict_3.py", line 183, in <module>
x: image_data) # < 45
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 767, in run
run_metadata_ptr)
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/client/session.py", line 929, in _run
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
File "/usr/local/lib/python2.7/dist-packages/tensorflow/python/framework/dtypes.py", line 138, in as_numpy_dtype
return _TF_TO_NP[self._type_enum]
KeyError: 20
不确定现在出了什么问题。我看到我的节点是正确的。非常感谢您对此的任何帮助。
【问题讨论】:
你运行的是什么版本的 TensorFlow? 我正在运行 TF v1.0 【参考方案1】:最后我想出了如何解决这个问题。我们必须冻结图以生成一个 .pb 文件,让我们重新使用节点进行预测。以下是我的代码:
def freeze_graph(model_folder):
# We retrieve our checkpoint fullpath
checkpoint = tf.train.get_checkpoint_state(model_folder)
input_checkpoint = checkpoint.model_checkpoint_path
# We precise the file fullname of our freezed graph
absolute_model_folder = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_folder + "/frozen_model.pb"
# Before exporting our graph, we need to precise what is our output node
# This is how TF decides what part of the Graph he has to keep and what part it can dump
# NOTE: this variable is plural, because you can have multiple output nodes
output_node_names = "tower_0/logits/predictions"
# We clear devices to allow TensorFlow to control on which device it will load operations
clear_devices = True
# We import the meta graph and retrieve a Saver
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
# We retrieve the protobuf graph definition
graph = tf.get_default_graph()
input_graph_def = graph.as_graph_def()
# We start a session and restore the graph weights
with tf.Session() as sess:
saver.restore(sess, input_checkpoint)
for op in sess.graph.get_operations():
print(op.name)
# We use a built-in TF helper to export variables to constants
output_graph_def = graph_util.convert_variables_to_constants(
sess, # The session is used to retrieve the weights
input_graph_def # The graph_def is used to retrieve the nodes
,output_node_names.split(",")) # The output node names are used to select the usefull nodes
# Finally we serialize and dump the output graph to the filesystem
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_folder", type=str, help="Model folder to export")
args = parser.parse_args()
freeze_graph(args.model_folder)
在此之后,我们可以加载图表。以下是我的代码:
def load_graph(frozen_graph_filename):
# We load the protobuf file from the disk and parse it to retrieve the
# unserialized graph_def
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Then, we can use again a convenient built-in function to import a graph_def into the
# current default Graph
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name="prefix",
op_dict=None,
producer_op_list=None
)
return graph
这将创建您的协议缓冲区 (frozen_graph.pb),我们可以查看
if __name__ == '__main__':
# Let's allow the user to pass the filename as an argument
parser = argparse.ArgumentParser()
parser.add_argument("--frozen_model_filename", default="/serving/frozen_model.pb", type=str, help="Frozen model file to import")
parser.add_argument("--image_name",type=str,help="Image to test")
args = parser.parse_args()
# Create test batch
image_data = create_test_batch(args.image_name)
# We use our "load_graph" function
graph = load_graph(args.frozen_model_filename)
# We can verify that we can access the list of operations in the graph
#for op in graph.get_operations():
#print(op.name)
# prefix/Placeholder/inputs_placeholder
# ...
# prefix/Accuracy/predictions
# We access the input and output nodes
x = graph.get_tensor_by_name('prefix/batch_processing/Reshape:0') # Input tensor
y = graph.get_tensor_by_name('prefix/tower_0/logits/predictions:0') # Output tensor
# We launch a Session
with tf.Session(graph=graph) as sess:
# Note: we didn't initialize/restore anything, everything is stored in the graph_def
y_out = sess.run(y, feed_dict=
x:image_data) # < 45
print(y_out)
我的输入节点 (x) 需要大小为 64 和大小为 299x299x3 的批次,因此我通过将测试图像复制 64 次并创建输入批次来破解我的方式。我通过以下方式做到这一点:
def create_test_batch(input_image):
data = []
img = cv2.imread(input_image) # Read the test image
img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV) # Convert RGB image to YUV
# equalize the histogram of the Y channel
img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0])
# convert the YUV image back to RGB format
img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
img_resize = cv2.resize(img_output,(299,299)) # Resize the image acceptable by InceptionV3 model
for i in range(0,64):
data.append(img_resize) # Create a batch of 64 images
#data.append(np.resize((ndimage.imread('/serving/'+input_image)),(299,299,3)))
print np.shape(data)
return data
如果有更好的解决输入批处理问题的方法,我将不胜感激。
【讨论】:
以上是关于在 Tensorflow 中使用 InceptionV3 进行预测的主要内容,如果未能解决你的问题,请参考以下文章
如何让 Tensorflow Profiler 在 Tensorflow 2.5 中使用“tensorflow-macos”和“tensorflow-metal”工作
TensorFlow Hub 模块可以在 TensorFlow 2.0 中使用吗?
使用 GPU 无法在 tensorflow 教程中运行词嵌入示例