如何在 tf2 中导出、导入和运行自定义模型?
Posted
技术标签:
【中文标题】如何在 tf2 中导出、导入和运行自定义模型?【英文标题】:How to export, import, and run custom model in tf2? 【发布时间】:2021-12-03 01:48:17 【问题描述】:我正在使用 tf2 在虚拟数据上训练自定义文本分类模型。该模型似乎训练得很好。然后我导出它。然后我在导入它和运行我保存的模型时遇到问题。
另外,我不确定词汇和标记保存在哪里。
import tensorflow as tf
print("tf.__version__: ", tf.__version__)
import os, sys, random
from pprint import pprint
import numpy as np
# CONFIGURATION
START_EPOCH = 0
END_EPOCH = 3
MAX_LENGTH = 5
BATCH_SIZE = 256
WORD_EMB_DIM = 32
LSTM_DIM=32
SAVE_MODEL_PATH = "saved_models/1"
# DUMMY DATA
text_data = [
"i like this movie",
"i feel happy watch movie",
"great taste",
"like the look of it",
"great news",
"hate this movie",
"very bad news",
"horrible movie",
"very bad news",
"i do not like it"
]
label_data = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
# BUILD DATASET
class text_dataset():
def __init__(self):
self._build_vocab()
def _build_vocab(self):
words = []
for words_list in [t.split(" ") for t in text_data]:
words.extend(words_list)
words = sorted(list(set(words)))
self.item2idx =
self.item2idx["<pad>"] = 0
for w_idx, w in enumerate(words):
self.item2idx[w] = w_idx + 1
self.idx2item = w_idx: w for w, w_idx in self.item2idx.items()
self.vocab_size = len(self.idx2item)
print("self.vocab_size: ", self.vocab_size)
def data_generator(self):
batch_idx = 0
while batch_idx < 8:
sample_indices = [random.randint(0, len(text_data)-1) for _ in range(BATCH_SIZE)]
x_raw = [text_data[i] for i in sample_indices]
y = [label_data[i] for i in sample_indices]
x_raw = [i.split(" ") for i in x_raw]
x_raw = [[self.item2idx[j] for j in i] for i in x_raw]
zero_array = np.zeros((BATCH_SIZE, MAX_LENGTH))
for i in range(len(x_raw)):
zero_array[i, :len(x_raw[i])] = x_raw[i]
x_train = np.array(zero_array) # (BATCH_SIZE, MAX_LENGTH)
y_train = np.array(y) # (BATCH_SIZE, )
yield tuple((x_train, y_train))
batch_idx += 1
# BUILD MODEL
class classification_model(tf.keras.Model):
def __init__(self, vocab_size):
super(classification_model, self).__init__()
self.word_emb = tf.keras.layers.Embedding(vocab_size,
WORD_EMB_DIM,
mask_zero=True,
name="word_embedding_layer")
self.lstm = tf.keras.layers.LSTM(LSTM_DIM, return_state=True, name="rnn_layer")
self.dense = tf.keras.layers.Dense(2)
def call(self, word_emb_inp, initial_state=None, training=True):
word_emb = self.word_emb(word_emb_inp) # (bs, MAX_LEN, WORD_EMB_DIM)
word_emb_mask = self.word_emb.compute_mask(word_emb_inp) # (bs, MAX_LEN)
lstm_inp = word_emb # (bs, MAX_LEN, WORD_EMB_DIM)
lstm_inp_mask = word_emb_mask # (bs, MAX_LEN)
lstm, state_h, state_c = self.lstm(lstm_inp, mask=word_emb_mask, initial_state=initial_state)
dense_out = self.dense(lstm)
return dense_out
# INITIALIZING DATASET AND MODEL
dataset = text_dataset()
model = classification_model(dataset.vocab_size)
model.build(input_shape=(None, MAX_LENGTH))
model.summary()
optimizer = tf.keras.optimizers.Adam()
loss_func = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True,
name='sparse_categorical_crossentropy'
)
# TRAINING
print("\n")
print("TRAINING")
print("\n")
for e in range(START_EPOCH, END_EPOCH):
print(f"EPOCH: str(e+1).zfill(len(str(END_EPOCH)))/END_EPOCH")
train_gen = dataset.data_generator
train_gen = tf.data.Dataset.from_generator(
train_gen,
output_types=(tf.dtypes.int64, tf.dtypes.int64),
output_shapes=((None, MAX_LENGTH), (None,))
)
for batch_idx, batch in enumerate(train_gen):
# print(batch_idx, type(batch))
# print(batch[0].shape, batch[1].shape)
x, y = batch
with tf.GradientTape() as tape:
logits = model(x) # model is supposed to output the logits (BATCH_SIZE, 2)
loss_value = loss_func(y, logits)
print(loss_value.numpy(), end="\r")
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print(f"loss@epoch#e: loss_value.numpy()")
# EXPORT MODEL
print("\nEXPORTING THE MODEL\n")
tf.saved_model.save(model,
export_dir=SAVE_MODEL_PATH,
signatures=None)
# IMPORTING MODEL
imported = tf.saved_model.load(SAVE_MODEL_PATH)
print(type(imported))
pprint(imported.signatures)
inp = tf.constant([[1, 2, 3, 0, 0]])
out = imported(inp)
print("out: ", out)
我这里有多个问题-
如何导出词汇? 如何访问保存的词汇? 如何在导出和导入模型时进行文本预处理? 导入模型后如何进行预测?这就是我从加载模型的部分运行代码后的样子-
2021-10-14 22:52:32.754962: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library cudart64_110.dll
2021-10-14 22:52:37.487561: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library nvcuda.dll
2021-10-14 22:52:38.559793: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties:
pciBusID: 0000:01:00.0 name: GeForce GTX 1660 Ti computeCapability: 7.5
coreClock: 1.59GHz coreCount: 24 deviceMemorySize: 6.00GiB deviceMemoryBandwidth: 268.26GiB/s
2021-10-14 22:52:38.560014: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library cudart64_110.dll
2021-10-14 22:52:38.567645: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library cublas64_11.dll
2021-10-14 22:52:38.567785: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library cublasLt64_11.dll
2021-10-14 22:52:38.572346: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library cufft64_10.dll
2021-10-14 22:52:38.573904: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library curand64_10.dll
2021-10-14 22:52:38.583016: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library cusolver64_11.dll
2021-10-14 22:52:38.586465: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library cusparse64_11.dll
2021-10-14 22:52:38.587604: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library cudnn64_8.dll
2021-10-14 22:52:38.587822: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1871] Adding visible gpu devices: 0
2021-10-14 22:52:38.588323: I tensorflow/core/platform/cpu_feature_guard.cc:142] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX AVX2
To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.
2021-10-14 22:52:38.589853: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1733] Found device 0 with properties:
pciBusID: 0000:01:00.0 name: GeForce GTX 1660 Ti computeCapability: 7.5
coreClock: 1.59GHz coreCount: 24 deviceMemorySize: 6.00GiB deviceMemoryBandwidth: 268.26GiB/s
2021-10-14 22:52:38.590197: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1871] Adding visible gpu devices: 0
2021-10-14 22:52:39.163026: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1258] Device interconnect StreamExecutor with strength 1 edge matrix:
2021-10-14 22:52:39.163216: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1264] 0
2021-10-14 22:52:39.163535: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1277] 0: N
2021-10-14 22:52:39.163842: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1418] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 3983 MB memory) -> physical GPU (device: 0, name: GeForce GTX 1660 Ti, pci bus id: 0000:01:00.0, compute capability: 7.5)
2021-10-14 22:52:40.735500: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond/while' has 13 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:40.793113: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond/while' has 13 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:40.975299: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond' has 5 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:41.209622: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond/while' has 13 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:42.214461: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond' has 5 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:42.421412: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond' has 5 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:42.698141: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond/while' has 13 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:46.216757: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond/while' has 13 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:46.891681: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond/while' has 13 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:46.922376: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond' has 5 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:47.696619: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond/while' has 13 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:47.726003: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond' has 5 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:47.818013: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond/while' has 13 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:47.847961: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond' has 5 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:48.188927: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond/while' has 13 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:48.220841: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond' has 5 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:48.281547: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond' has 5 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
2021-10-14 22:52:48.382800: W tensorflow/core/common_runtime/graph_constructor.cc:809] Node 'cond' has 5 outputs but the _output_shapes attribute specifies shapes for 46 outputs. Output shapes may be inaccurate.
<class 'tensorflow.python.saved_model.load.Loader._recreate_base_user_object.<locals>._UserObject'>
_SignatureMap('serving_default': <ConcreteFunction signature_wrapper(*, input_1) at 0x1AEB12589A0>)
Traceback (most recent call last):
File "D:\daftar\tensorflow_serving\load.py", line 13, in <module>
out = imported(inp)
File "C:\Users\anime\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\saved_model\load.py", line 670, in _call_attribute
return instance.__call__(*args, **kwargs)
File "C:\Users\anime\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\eager\def_function.py", line 889, in __call__
result = self._call(*args, **kwds)
File "C:\Users\anime\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\eager\def_function.py", line 933, in _call
self._initialize(args, kwds, add_initializers_to=initializers)
File "C:\Users\anime\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\eager\def_function.py", line 763, in _initialize
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
File "C:\Users\anime\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\eager\function.py", line 3050, in _get_concrete_function_internal_garbage_collected
graph_function, _ = self._maybe_define_function(args, kwargs)
File "C:\Users\anime\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\eager\function.py", line 3444, in _maybe_define_function
graph_function = self._create_graph_function(args, kwargs)
File "C:\Users\anime\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\eager\function.py", line 3279, in _create_graph_function
func_graph_module.func_graph_from_py_func(
File "C:\Users\anime\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\framework\func_graph.py", line 999, in func_graph_from_py_func
func_outputs = python_func(*func_args, **func_kwargs)
File "C:\Users\anime\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\eager\def_function.py", line 672, in wrapped_fn
out = weak_wrapped_fn().__wrapped__(*args, **kwds)
File "C:\Users\anime\AppData\Local\Programs\Python\Python39\lib\site-packages\tensorflow\python\saved_model\function_deserialization.py", line 285, in restored_function_body
raise ValueError(
ValueError: Could not find matching function to call loaded from the SavedModel. Got:
Positional arguments (3 total):
* Tensor("song_emb_inp:0", shape=(1, 5), dtype=int32)
* None
* True
Keyword arguments:
Expected these arguments to match one of the following 4 option(s):
Option 1:
Positional arguments (3 total):
* TensorSpec(shape=(None, 5), dtype=tf.int64, name='input_1')
* None
* False
Keyword arguments:
Option 2:
Positional arguments (3 total):
* TensorSpec(shape=(None, 5), dtype=tf.int64, name='song_emb_inp')
* None
* False
Keyword arguments:
Option 3:
Positional arguments (3 total):
* TensorSpec(shape=(None, 5), dtype=tf.int64, name='song_emb_inp')
* None
* True
Keyword arguments:
Option 4:
Positional arguments (3 total):
* TensorSpec(shape=(None, 5), dtype=tf.int64, name='input_1')
* None
* True
Keyword arguments:
【问题讨论】:
【参考方案1】:我发现我必须在导入导出模型后调用的方法中定义 input_signatures。
import tensorflow as tf
print("tf.__version__: ", tf.__version__)
import os, sys, random, pdb
from pprint import pprint
import numpy as np
START_EPOCH = 0
END_EPOCH = 3
MAX_LENGTH = 5
BATCH_SIZE = 512
WORD_EMB_DIM = 32
LSTM_DIM=4
SAVE_MODEL_PATH = "saved_models/1"
def q(exit_msg=""):
print(f"\n>exit_msg<")
sys.exit()
text_data = [
"i like this movie",
"i feel happy watch movie",
"great taste",
"like the look of it",
"great news",
"hate this movie",
"very bad news",
"horrible movie",
"very bad news",
"i do not like it"
]
label_data = [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]
# BUILD DATASET
class text_dataset():
def __init__(self):
self._build_vocab_list()
def _build_vocab_list(self):
words = []
for words_list in [t.split(" ") for t in text_data]:
words.extend(words_list)
words = sorted(list(set(words)))
self.vocab_list = words
print(f"len of vocab_list: len(self.vocab_list)")
def build_vocab_mapping_from_tokenizer(self, tokenizer_vocab_list):
self.item2idx = w: w_idx for w_idx, w in enumerate(tokenizer_vocab_list)
self.idx2item = w_idx: w for w_idx, w in enumerate(tokenizer_vocab_list)
print(f"len of tokenizer_vocab_list: len(self.idx2item)")
def data_generator(self):
batch_idx = 0
while batch_idx < 64:
sample_indices = [random.randint(0, len(text_data)-1) for _ in range(BATCH_SIZE)]
x_raw = [text_data[i] for i in sample_indices]
y = [label_data[i] for i in sample_indices]
yield tuple((x_raw, y))
batch_idx += 1
# BUILD MODEL
class classification_model(tf.keras.Model):
def __init__(self, vocab_list):
super(classification_model, self).__init__()
# self.input_layer = tf.keras.Input(shape=(1,), dtype=tf.string)
self.vectorize_layer = tf.keras.layers.TextVectorization(
max_tokens=None,
standardize='lower_and_strip_punctuation',
split='whitespace', ngrams=None,
output_mode='int', output_sequence_length=None,
pad_to_max_tokens=False, vocabulary=vocab_list)
self.word_emb = tf.keras.layers.Embedding(len(self.vectorize_layer.get_vocabulary()),
WORD_EMB_DIM,
mask_zero=True,
name="word_embedding_layer")
self.lstm = tf.keras.layers.LSTM(LSTM_DIM, return_state=True, name="rnn_layer")
self.dense = tf.keras.layers.Dense(2)
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string)])
def get_tokens_indices(self, inp_string):
return self.vectorize_layer(inp_string)
# @tf.function(input_signature=[tf.TensorSpec(shape=(None), dtype=tf.string),
# tf.TensorSpec(shape=(None, LSTM_DIM), dtype=tf.float32),
# tf.TensorSpec(shape=(None, LSTM_DIM), dtype=tf.float32)])
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.string),
tf.TensorSpec(shape=[None, LSTM_DIM], dtype=tf.float32),
tf.TensorSpec(shape=[None, LSTM_DIM], dtype=tf.float32)])
def call(self, inp_string, state_h, state_c):
word_emb_inp = self.vectorize_layer(inp_string)
word_emb = self.word_emb(word_emb_inp) # (bs, MAX_LEN, WORD_EMB_DIM)
word_emb_mask = self.word_emb.compute_mask(word_emb_inp) # (bs, MAX_LEN)
lstm_inp = word_emb # (bs, MAX_LEN, WORD_EMB_DIM)
lstm_inp_mask = word_emb_mask # (bs, MAX_LEN)
lstm, state_h, state_c = self.lstm(lstm_inp, mask=word_emb_mask, initial_state=[state_h, state_c])
dense_out = self.dense(lstm)
return dense_out, state_h, state_c
# INITIALIZING DATASET AND MODEL
dataset = text_dataset()
model = classification_model(dataset.vocab_list)
# print(help(model.build))
# model.build(input_shape=(None, MAX_LENGTH))
# model.summary()
tokenizer_vocab_list = model.vectorize_layer.get_vocabulary()
# print("tokenizer_vocab_list: ", tokenizer_vocab_list)
dataset.build_vocab_mapping_from_tokenizer(tokenizer_vocab_list)
# item2idx = dataset.item2idx
# idx2item = dataset.idx2item
# pprint(item2idx)
# pprint(idx2item)
optimizer = tf.keras.optimizers.Adam()
loss_func = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True,
name='sparse_categorical_crossentropy'
)
# TRAINING
print("\nTRAINING\n")
for e in range(START_EPOCH, END_EPOCH):
print(f"EPOCH: str(e+1).zfill(len(str(END_EPOCH)))/END_EPOCH")
train_gen = dataset.data_generator
train_gen = tf.data.Dataset.from_generator(
train_gen,
output_types=(tf.dtypes.string, tf.dtypes.int32),
output_shapes=((None,), (None,))
)
for batch_idx, batch in enumerate(train_gen):
x, y = batch
with tf.GradientTape() as tape:
zero_state_h_tensor = tf.zeros((BATCH_SIZE, LSTM_DIM), dtype=tf.dtypes.float32)
zero_state_c_tensor = tf.zeros((BATCH_SIZE, LSTM_DIM), dtype=tf.dtypes.float32)
logits, _, _ = model(x, zero_state_h_tensor, zero_state_c_tensor)#, zero_state_tensors, zero_state_tensors) # model is supposed to output the logits (BATCH_SIZE, 2)
loss_value = loss_func(y, logits)
print(loss_value.numpy(), end="\r")
grads = tape.gradient(loss_value, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print(f"loss@epoch#e: loss_value.numpy()")
print("\nEXPORTING THE MODEL...")
class MyModule(tf.Module):
def __init__(self, model, item2idx):
self.model = model
self.this = json.loads(item2idx)
@tf.function(input_signature=[tf.TensorSpec(shape=(None), dtype=tf.string),
tf.TensorSpec(shape=(None, LSTM_DIM), dtype=tf.float32),
tf.TensorSpec(shape=(None, LSTM_DIM), dtype=tf.float32)])
def predict(self, inp_string, state_h, state_c):
# tokenized_input = self.model.vectorize_layer(inp_string) # doesnt work
tokenized_input = self.model.get_tokens_indices(inp_string)
result, state_h, state_c = self.model(inp_string, state_h, state_c)
return "scores": result,
"state_h": state_h,
"state_c": state_c,
"tokenized_input": tokenized_input
@tf.function(input_signature=[])
def metadata(self):
return "item2idx": item2idx
import json
item2idx = json.dumps(dataset.item2idx)
module = MyModule(model, item2idx)
tf.saved_model.save(module,
SAVE_MODEL_PATH,
signatures= "predict": module.predict,
# "preprocess": module.preprocess,
"metadata": module.metadata)
print("\nIMPORTING THE MODEL...")
imported = tf.saved_model.load(SAVE_MODEL_PATH)
imported_item2idx = imported.signatures["metadata"]()["item2idx"]
json.loads(imported_item2idx.numpy())
test_text = "movie was"
print("test_text: ", test_text)
def get_zero_state_tensor():
return tf.zeros((1, LSTM_DIM), dtype=tf.dtypes.float32)
state_h = get_zero_state_tensor()
state_c = get_zero_state_tensor()
inp = tf.constant([test_text], dtype=tf.string)
out = imported.signatures["predict"](inp_string=inp, state_h=state_h, state_c=state_c)#, initial_state=initial_state)#, initial_state)#["scores"].numpy()
print("\nout")
pprint(out)
【讨论】:
以上是关于如何在 tf2 中导出、导入和运行自定义模型?的主要内容,如果未能解决你的问题,请参考以下文章
部署到 Heroku 时,如何在 Procfile 中导入带有 Gunicorn 的自定义模块?
如何在AndroidStudio中导出Jar包,并且自定义Exclude文件