无法使用 keras.load_model 保存/加载模型 - IndexError: list index out of range

Posted

技术标签:

【中文标题】无法使用 keras.load_model 保存/加载模型 - IndexError: list index out of range【英文标题】:Can't save/load model using keras.load_model - IndexError: list index out of range 【发布时间】:2019-06-18 01:04:18 【问题描述】:

我正在使用 tf 和 Keras 创建一个循环GAN,遵循使用 here 和 here 的方法

网络结构相当复杂:很多模型相互嵌套。

我无法保存和重新加载经过训练的模型。

训练完成后我使用

 generator_AtoB.save("models/generator_AtoB.h5")

pickle.dump(generator_AtoB, saveFile)

保存模型:这不会导致错误,并且会在提供的路径中创建一个文件。

通过检查h5dump | less,我可以看到 .h5 文件包含数据。

稍后使用 keras 重新加载模型:

generator_AtoB = load_model("models/generator_AtoB.h5")

或泡菜:

pickle.load(saveFile)

导致错误:

Traceback (most recent call last):
File "test_model.py", line 14, in <module>
generator_AtoB = pickle.load(saveFile)
File "/home/MYUSERNAME/.virtualenvs/tensorflow_py3/lib/python3.5/site-packages/keras/engine/network.py", line 1266, in __setstate__
model = saving.unpickle_model(state)
File "/home/MYUSERNAME/.virtualenvs/tensorflow_py3/lib/python3.5/site-packages/keras/engine/saving.py", line 435, in unpickle_model
return _deserialize_model(f)
File "/home/MYUSERNAME/.virtualenvs/tensorflow_py3/lib/python3.5/site-packages/keras/engine/saving.py", line 274, in _deserialize_model
reshape=False)
File "/home/MYUSERNAME/.virtualenvs/tensorflow_py3/lib/python3.5/site-packages/keras/engine/saving.py", line 682, in preprocess_weights_for_loading
weights = convert_nested_model(weights)
File "/home/MYUSERNAME/.virtualenvs/tensorflow_py3/lib/python3.5/site-packages/keras/engine/saving.py", line 658, in convert_nested_model
original_backend=original_backend))
File "/home/MYUSERNAME/.virtualenvs/tensorflow_py3/lib/python3.5/site-packages/keras/engine/saving.py", line 682, in preprocess_weights_for_loading
weights = convert_nested_model(weights)
File "/home/MYUSERNAME/.virtualenvs/tensorflow_py3/lib/python3.5/site-packages/keras/engine/saving.py", line 670, in convert_nested_model
original_backend=original_backend))
File "/home/MYUSERNAME/.virtualenvs/tensorflow_py3/lib/python3.5/site-packages/keras/engine/saving.py", line 682, in preprocess_weights_for_loading
weights = convert_nested_model(weights)
File "/home/MYUSERNAME/.virtualenvs/tensorflow_py3/lib/python3.5/site-packages/keras/engine/saving.py", line 658, in convert_nested_model
original_backend=original_backend))
File "/home/MYUSERNAME/.virtualenvs/tensorflow_py3/lib/python3.5/site-packages/keras/engine/saving.py", line 800, in preprocess_weights_for_loading
elif layer_weights_shape != weights[0].shape:
IndexError: list index out of range

如果使用keras.load_modelpickle.load,错误是一样的

由于使用 keras 或 pickle 保存都会发生这种情况,我认为这不是保存/加载问题,而是我在模型中错误地保存了一些东西,但我在任何地方都找不到任何参考。

感谢您的帮助

前面的完整代码:

    #!/usr/bin/env python
# -*- coding: UTF-8 -*-

# https://hardikbansal.github.io/CycleGANBlog/
import sys
import time

import numpy as np
import keras
from keras.models import Sequential, Model
from keras.layers import Dense, Flatten, Input, multiply, add as kadd
from keras.layers import Conv2D, BatchNormalization, Conv2DTranspose
from keras.layers import LeakyReLU, ReLU
from keras.layers import Activation

from keras.preprocessing.image import ImageDataGenerator

from PIL import Image



ngf = 32 # Number of filters in first layer of generator
ndf = 64 # Number of filters in first layer of discriminator
BATCH_SIZE = 1 # batch_size
pool_size = 50 # pool_size
IMG_WIDTH = 256 # Imput image will of width 256
IMG_HEIGHT = 256 # Input image will be of height 256
IMG_DEPTH = 3 # RGB format

DISCRIMINATOR_ITERATIONS = 1
SAVE_IMAGES_INTERVAL = 25

ITERATIONS = 5000
FAKE_POOL_SIZE=25
INPUT_SHAPE = (IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH)


def resnet_block(num_features):

    block = Sequential()
    block.add(Conv2D(num_features, kernel_size=3, strides=1, padding="SAME"))
    block.add(BatchNormalization())
    block.add(ReLU())
    block.add(Conv2D(num_features, kernel_size=3, strides=1, padding="SAME"))
    block.add(BatchNormalization())
    block.add(ReLU())


    resblock_input = Input(shape=(64, 64, 256))
    conv_model = block(resblock_input)

    _sum = kadd([resblock_input, conv_model])

    composed =  Model(inputs=[resblock_input], outputs=_sum)
    return composed


def discriminator( f=4, name=None):
    d = Sequential()
    d.add(Conv2D(ndf, kernel_size=f, strides=2, padding="SAME", name="discr_conv2d_1"))
    d.add(BatchNormalization())
    d.add(LeakyReLU(0.2))
    d.add(Conv2D(ndf * 2, kernel_size=f, strides=2, padding="SAME", name="discr_conv2d_2"))
    d.add(BatchNormalization())
    d.add(LeakyReLU(0.2))
    d.add(Conv2D(ndf * 4, kernel_size=f, strides=2, padding="SAME", name="discr_conv2d_3"))
    d.add(BatchNormalization())
    d.add(LeakyReLU(0.2))
    d.add(Conv2D(ndf * 8, kernel_size=f, strides=2, padding="SAME", name="discr_conv2d_4"))
    d.add(BatchNormalization())
    d.add(LeakyReLU(0.2))
    d.add(Conv2D(1, kernel_size=f, strides=1, padding="SAME", name="discr_conv2d_out"))

    # d.add(Activation("sigmoid"))


    model_input = Input(shape=INPUT_SHAPE)

    decision  = d(model_input)

    composed = Model(model_input, decision)
    # print(d.output_shape)
    # d.summary()

    return composed

def generator(name=None):

    g = Sequential()
    # ENCODER
    g.add(Conv2D(ngf, kernel_size=7,
            strides=1,
            # activation='relu',
            padding='SAME',
            input_shape=INPUT_SHAPE,
            name="encoder_0" ))


    g.add(Conv2D(64*2, kernel_size=3,
            strides=2,
            padding='SAME',
            name="encoder_1" ))
    # output shape = (128, 128, 128)


    g.add(Conv2D(64*4, kernel_size=3,
            padding="SAME",
            strides=2,))
    # output shape = (64, 64, 256)

    # END ENCODER


    # TRANSFORM

    g.add(resnet_block(64*4))
    g.add(resnet_block(64*4))
    g.add(resnet_block(64*4))
    g.add(resnet_block(64*4))
    g.add(resnet_block(64*4))

    # END TRANSFORM
    # generator.shape = (64, 64, 256)

    # DECODER

    g.add(Conv2DTranspose(ngf*2,kernel_size=3, strides=2, padding="SAME"))
    g.add(Conv2DTranspose(ngf*2,kernel_size=3, strides=2, padding="SAME"))

    g.add(Conv2D(3,kernel_size=7, strides=1, padding="SAME"))

    # END DECODER

    model_input = Input(shape=INPUT_SHAPE)
    generated_image = g(model_input)

    composed = Model(model_input, generated_image, name=name)
    return composed


def fromMinusOneToOne(x):
    return x/127.5 -1

def toRGB(x):
    return (1+x) * 127.5


def createImageGenerator( subset="train", data_type="A", batch_size=1, pp=None):

    # we create two instances with the same arguments
    data_gen_args = dict(
                         preprocessing_function= pp,
                         zoom_range=0.1)

    image_datagen = ImageDataGenerator(**data_gen_args)

    # Provide the same seed and keyword arguments to the fit and flow methods
    seed = 1

    image_directory=subset+data_type
    print('data/vangogh2photo/'+image_directory)
    image_generator = image_datagen.flow_from_directory(
        'data/vangogh2photo/'+image_directory,
        class_mode=None,
        batch_size=batch_size,
        seed=seed)

    return image_generator


if __name__ == '__main__':

    generator_AtoB = generator(name="gen_A")
    generator_BtoA = generator(name="gen_B")

    discriminator_A = discriminator(name="disc_A")
    discriminator_B = discriminator(name="disc_B")


    # input_A = Input(batch_shape=(batch_size, IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH), name="input_A")
    input_A = Input(batch_shape=(None, IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH), name="input_A")
    generated_B = generator_AtoB(input_A)
    discriminator_generated_B = discriminator_B(generated_B)
    cyc_A = generator_BtoA(generated_B)


    input_B = Input(batch_shape=(None, IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH), name="input_B")
    generated_A = generator_BtoA(input_B)
    discriminator_generated_A = discriminator_A(generated_A )
    cyc_B = generator_AtoB(generated_A)


    ### GENERATOR TRAINING
    optim = keras.optimizers.Adam(lr=0.0002, beta_1=0.5, beta_2=0.999, epsilon=1e-08)


    # cyclic error is increased, because it's more important
    cyclic_weight_multipier = 10

    generator_trainer =  Model([input_A, input_B],
                 [discriminator_generated_B,   discriminator_generated_A,
                 cyc_A,      cyc_B,])

    losses =         [ "MSE", "MSE", "MAE",                   "MAE"]
    losses_weights = [ 1,     1,     cyclic_weight_multipier, cyclic_weight_multipier]

    generator_trainer.compile(optimizer=optim, loss = losses, loss_weights=losses_weights)



    ### DISCRIMINATOR TRAINING

    disc_optim = keras.optimizers.Adam(lr=0.0002, beta_1=0.5, beta_2=0.999, epsilon=1e-08)

    real_A = Input(batch_shape=(None, IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH), name="in_real_A")
    real_B = Input(batch_shape=(None, IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH), name="in_real_B")

    generated_A = Input(batch_shape=(None, IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH), name="in_gen_A")
    generated_B = Input(batch_shape=(None, IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH), name="in_gen_B")

    discriminator_real_A = discriminator_A(real_A)
    discriminator_generated_A = discriminator_A(generated_A)
    discriminator_real_B =  discriminator_B(real_B)
    discriminator_generated_B = discriminator_B(generated_B)

    disc_trainer = Model([real_A, generated_A, real_B, generated_B],
                         [  discriminator_real_A,
                            discriminator_generated_A,
                            discriminator_real_B,
                            discriminator_generated_B] )


    disc_trainer.compile(optimizer=disc_optim, loss = 'MSE')


    #########
    ##
    ## TRAINING
    ##
    #########


    fake_A_pool = []
    fake_B_pool = []


    ones = np.ones((BATCH_SIZE,)+ generator_trainer.output_shape[0][1:])
    zeros = np.zeros((BATCH_SIZE,)+ generator_trainer.output_shape[0][1:])


    train_A_image_generator = createImageGenerator("train", "A")
    train_B_image_generator = createImageGenerator("train", "B")


    it = 1
    while it  < ITERATIONS:
        start = time.time()
        print("\nIteration %d " % it)
        sys.stdout.flush()

        # THIS ONLY WORKS IF BATCH SIZE == 1
        real_A = train_A_image_generator.next()

        real_B = train_B_image_generator.next()

        fake_A_pool.extend(generator_BtoA.predict(real_B))
        fake_B_pool.extend(generator_AtoB.predict(real_A))

        #resize pool
        fake_A_pool = fake_A_pool[-FAKE_POOL_SIZE:]
        fake_B_pool = fake_B_pool[-FAKE_POOL_SIZE:]

        fake_A = [ fake_A_pool[ind] for ind in np.random.choice(len(fake_A_pool), size=(BATCH_SIZE,), replace=False) ]
        fake_B = [ fake_B_pool[ind] for ind in np.random.choice(len(fake_B_pool), size=(BATCH_SIZE,), replace=False) ]

        fake_A = np.array(fake_A)
        fake_B = np.array(fake_B)


        for x in range(0, DISCRIMINATOR_ITERATIONS):
            _, D_loss_real_A, D_loss_fake_A, D_loss_real_B, D_loss_fake_B = \
            disc_trainer.train_on_batch(
                [real_A, fake_A, real_B, fake_B],
                [zeros, ones * 0.9, zeros, ones * 0.9] )


        print("=====")
        print("Discriminator loss:")
        print("Real A: %s, Fake A: %s || Real B: %s, Fake B: %s " % ( D_loss_real_A, D_loss_fake_A, D_loss_real_B, D_loss_fake_B))

        _, G_loss_fake_B, G_loss_fake_A, G_loss_rec_A, G_loss_rec_B = \
            generator_trainer.train_on_batch(
                [real_A, real_B],
                [zeros, zeros, real_A, real_B])


        print("=====")
        print("Generator loss:")
        print("Fake B: %s, Cyclic A: %s || Fake A: %s, Cyclic B: %s " % (G_loss_fake_B, G_loss_rec_A, G_loss_fake_A, G_loss_rec_B))

        end = time.time()
        print("Iteration time: %s s" % (end-start))
        sys.stdout.flush()

        if not (it % SAVE_IMAGES_INTERVAL ):
            imgA = real_A
            # print(imgA.shape)
            imga2b = generator_AtoB.predict(imgA)
            # print(imga2b.shape)
            imga2b2a = generator_BtoA.predict(imga2b)
            # print(imga2b2a.shape)
            imgB = real_B
            imgb2a = generator_BtoA.predict(imgB)
            imgb2a2b = generator_AtoB.predict(imgb2a)

            c = np.concatenate([imgA, imga2b, imga2b2a, imgB, imgb2a, imgb2a2b], axis=2).astype(np.uint8)
            # print(c.shape)
            x = Image.fromarray(c[0])
            x.save("data/generated/iteration_%s.jpg" % str(it).zfill(4))

        it+=1


        generator_AtoB.save("models/generator_AtoB.h5")
        generator_BtoA.save("models/generator_BtoA.h5")

【问题讨论】:

请完整代码,您正在使用。 tf.keras API 试试这个。 谢谢,我添加了完整的代码;我不熟悉 tf.keras api,你指的是什么? 【参考方案1】:

我遇到了同样的问题。 Ankish 建议使用 tf.keras API 进行尝试解决了这个问题。我不知道为什么,但是...

tf.keras.models.load_model("./saved_models/our_model.h5", compile=False)

完美运行,而

keras.models.load_model("./saved_models/our_model.h5")

因此处列出的错误而失败。编译标志设置为 false 只是为了隐藏警告。

【讨论】:

使用compile=False仍然会遇到同样的错误【参考方案2】:

我会概括 Ankish 和 Josh 的答案,并从 tensorflow keras API 导入所有内容。首先安装 Tensorflow 2(pip install tensorflowpip install tensorflow-gpu 如果使用 pip,详细说明 here)。然后,导入 tensorflow 并通过在每个 keras 导入上切换到 tensorflow.keras 来替换您的导入语句:

# ...
import numpy as np

import tensorflow as tf

import tf.keras as keras
from tf.keras.models import Sequential, Model
from tf.keras.layers import Dense, Flatten, Input, multiply, add as kadd
from tf.keras.layers import Conv2D, BatchNormalization, Conv2DTranspose
from tf.keras.layers import LeakyReLU, ReLU
from tf.keras.layers import Activation

from tf.keras.preprocessing.image import ImageDataGenerator
#...

通过这些更改,其余代码可以保持不变。

【讨论】:

我的问题仍然存在。我也只有tf.python.keras. ...【参考方案3】:

不要使用pickle,而是使用joblib。查看比较here。 Joblib 与 pickle 有类似的接口,即:

import joblib
joblib.dump(model, '<path>')  # Save
model = joblib.load('<path>') # Load

我个人更喜欢这种方法,因为我可以将它用于 SciKit-Learn 和 Keras 模型。但是请注意,如果您通过 TensorFlow (import tf.keras as keras) 使用 Keras,则它不起作用,因为在这种情况下,您必须依赖本机序列化程序并在从磁盘加载模型时初始化图形引擎。

【讨论】:

【参考方案4】:

tf.keras 是 Keras API 规范的 Tensorflow 特定实现。但是 keras 是一个 API 规范,描述了一个深度学习框架。

tf.keras 为框架添加了对 Tensorflow 特定功能的支持。

更改您的代码: 导入 keras 导入 tensorflow.keras

【讨论】:

【参考方案5】:

假设您正在构建一个网站。 php 是您的编程语言,而 SQL SERVER 是您的后端。现在您也可以使用 PostgreSQL 或 mysql 作为您的数据库,但是您用于与数据库交互的 PHP 代码不会改变。

您可以将后端视为您的数据库,将 Keras 视为您用于访问数据库的编程语言。最初,Keras 的默认后端是 Theano。随着 Keras v1.1.0 的发布,Tensorflow 成为默认后端。 Google 于 2019 年 6 月发布了 TensorFlow 2.0,他们宣布 Keras 现在是 TensorFlow 的官方高级 API,用于快速轻松地进行模型设计和训练。

这就是为什么你必须使用 tf.kerastensorflow.keras

import sys
import time

import numpy as np
import keras
import tensorflow
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Flatten, Input, multiply, add as kadd
from tensorflow.keras.layers import Conv2D, BatchNormalization, Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU, ReLU
from tensorflow.keras.layers import Activation

from tensorflow.keras.preprocessing.image import ImageDataGenerator

from PIL import Image



ngf = 32 # Number of filters in first layer of generator
ndf = 64 # Number of filters in first layer of discriminator
BATCH_SIZE = 1 # batch_size
pool_size = 50 # pool_size
IMG_WIDTH = 256 # Imput image will of width 256
IMG_HEIGHT = 256 # Input image will be of height 256
IMG_DEPTH = 3 # RGB format

DISCRIMINATOR_ITERATIONS = 1
SAVE_IMAGES_INTERVAL = 25

ITERATIONS = 5000
FAKE_POOL_SIZE=25
INPUT_SHAPE = (IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH)


def resnet_block(num_features):

    block = Sequential()
    block.add(Conv2D(num_features, kernel_size=3, strides=1, padding="SAME"))
    block.add(BatchNormalization())
    block.add(ReLU())
    block.add(Conv2D(num_features, kernel_size=3, strides=1, padding="SAME"))
    block.add(BatchNormalization())
    block.add(ReLU())


    resblock_input = Input(shape=(64, 64, 256))
    conv_model = block(resblock_input)

    _sum = kadd([resblock_input, conv_model])

    composed =  Model(inputs=[resblock_input], outputs=_sum)
    return composed


def discriminator( f=4, name=None):
    d = Sequential()
    d.add(Conv2D(ndf, kernel_size=f, strides=2, padding="SAME",         name="discr_conv2d_1"))
    d.add(BatchNormalization())
    d.add(LeakyReLU(0.2))
    d.add(Conv2D(ndf * 2, kernel_size=f, strides=2, padding="SAME", name="discr_conv2d_2"))
    d.add(BatchNormalization())
    d.add(LeakyReLU(0.2))
    d.add(Conv2D(ndf * 4, kernel_size=f, strides=2, padding="SAME", name="discr_conv2d_3"))
    d.add(BatchNormalization())
    d.add(LeakyReLU(0.2))
    d.add(Conv2D(ndf * 8, kernel_size=f, strides=2, padding="SAME", name="discr_conv2d_4"))
    d.add(BatchNormalization())
    d.add(LeakyReLU(0.2))
    d.add(Conv2D(1, kernel_size=f, strides=1, padding="SAME", name="discr_conv2d_out"))

# d.add(Activation("sigmoid"))


model_input = Input(shape=INPUT_SHAPE)

decision  = d(model_input)

composed = Model(model_input, decision)
# print(d.output_shape)
# d.summary()

return composed

def generator(name=None):

    g = Sequential()
    # ENCODER
    g.add(Conv2D(ngf, kernel_size=7,
        strides=1,
        # activation='relu',
        padding='SAME',
        input_shape=INPUT_SHAPE,
        name="encoder_0" ))


g.add(Conv2D(64*2, kernel_size=3,
        strides=2,
        padding='SAME',
        name="encoder_1" ))
# output shape = (128, 128, 128)


g.add(Conv2D(64*4, kernel_size=3,
        padding="SAME",
        strides=2,))
# output shape = (64, 64, 256)

# END ENCODER


# TRANSFORM

g.add(resnet_block(64*4))
g.add(resnet_block(64*4))
g.add(resnet_block(64*4))
g.add(resnet_block(64*4))
g.add(resnet_block(64*4))

# END TRANSFORM
# generator.shape = (64, 64, 256)

# DECODER

g.add(Conv2DTranspose(ngf*2,kernel_size=3, strides=2, padding="SAME"))
g.add(Conv2DTranspose(ngf*2,kernel_size=3, strides=2, padding="SAME"))

g.add(Conv2D(3,kernel_size=7, strides=1, padding="SAME"))

# END DECODER

model_input = Input(shape=INPUT_SHAPE)
generated_image = g(model_input)

composed = Model(model_input, generated_image, name=name)
return composed


def fromMinusOneToOne(x):
    return x/127.5 -1

def toRGB(x):
    return (1+x) * 127.5


def createImageGenerator( subset="train", data_type="A", batch_size=1, pp=None):

    # we create two instances with the same arguments
    data_gen_args = dict(
                         preprocessing_function= pp,
                         zoom_range=0.1)

    image_datagen = ImageDataGenerator(**data_gen_args)

    # Provide the same seed and keyword arguments to the fit and flow methods
    seed = 1

    image_directory=subset+data_type
    print('data/vangogh2photo/'+image_directory)
    image_generator = image_datagen.flow_from_directory(
    'data/vangogh2photo/'+image_directory,
    class_mode=None,
    batch_size=batch_size,
    seed=seed)

return image_generator


if __name__ == '__main__':

generator_AtoB = generator(name="gen_A")
generator_BtoA = generator(name="gen_B")

discriminator_A = discriminator(name="disc_A")
discriminator_B = discriminator(name="disc_B")


# input_A = Input(batch_shape=(batch_size, IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH), name="input_A")
input_A = Input(batch_shape=(None, IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH), name="input_A")
generated_B = generator_AtoB(input_A)
discriminator_generated_B = discriminator_B(generated_B)
cyc_A = generator_BtoA(generated_B)


input_B = Input(batch_shape=(None, IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH), name="input_B")
generated_A = generator_BtoA(input_B)
discriminator_generated_A = discriminator_A(generated_A )
cyc_B = generator_AtoB(generated_A)


### GENERATOR TRAINING
optim = tensorflow.keras.optimizers.Adam(lr=0.0002, beta_1=0.5, beta_2=0.999, epsilon=1e-08)


# cyclic error is increased, because it's more important
cyclic_weight_multipier = 10

generator_trainer =  Model([input_A, input_B],
             [discriminator_generated_B,   discriminator_generated_A,
             cyc_A,      cyc_B,])

losses =         [ "MSE", "MSE", "MAE",                   "MAE"]
losses_weights = [ 1,     1,     cyclic_weight_multipier, cyclic_weight_multipier]

generator_trainer.compile(optimizer=optim, loss = losses, loss_weights=losses_weights)



### DISCRIMINATOR TRAINING

disc_optim = tensorflow.keras.optimizers.Adam(lr=0.0002, beta_1=0.5, beta_2=0.999, epsilon=1e-08)

real_A = Input(batch_shape=(None, IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH), name="in_real_A")
real_B = Input(batch_shape=(None, IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH), name="in_real_B")

generated_A = Input(batch_shape=(None, IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH), name="in_gen_A")
generated_B = Input(batch_shape=(None, IMG_WIDTH, IMG_HEIGHT, IMG_DEPTH), name="in_gen_B")

discriminator_real_A = discriminator_A(real_A)
discriminator_generated_A = discriminator_A(generated_A)
discriminator_real_B =  discriminator_B(real_B)
discriminator_generated_B = discriminator_B(generated_B)

disc_trainer = Model([real_A, generated_A, real_B, generated_B],
                     [  discriminator_real_A,
                        discriminator_generated_A,
                        discriminator_real_B,
                        discriminator_generated_B] )


disc_trainer.compile(optimizer=disc_optim, loss = 'MSE')


#########
##
## TRAINING
##
#########


fake_A_pool = []
fake_B_pool = []


ones = np.ones((BATCH_SIZE,)+ generator_trainer.output_shape[0][1:])
zeros = np.zeros((BATCH_SIZE,)+ generator_trainer.output_shape[0][1:])


train_A_image_generator = createImageGenerator("train", "A")
train_B_image_generator = createImageGenerator("train", "B")


it = 1
while it  < ITERATIONS:
    start = time.time()
    print("\nIteration %d " % it)
    sys.stdout.flush()

    # THIS ONLY WORKS IF BATCH SIZE == 1
    real_A = train_A_image_generator.next()

    real_B = train_B_image_generator.next()

    fake_A_pool.extend(generator_BtoA.predict(real_B))
    fake_B_pool.extend(generator_AtoB.predict(real_A))

    #resize pool
    fake_A_pool = fake_A_pool[-FAKE_POOL_SIZE:]
    fake_B_pool = fake_B_pool[-FAKE_POOL_SIZE:]

    fake_A = [ fake_A_pool[ind] for ind in np.random.choice(len(fake_A_pool), size=(BATCH_SIZE,), replace=False) ]
    fake_B = [ fake_B_pool[ind] for ind in np.random.choice(len(fake_B_pool), size=(BATCH_SIZE,), replace=False) ]

    fake_A = np.array(fake_A)
    fake_B = np.array(fake_B)


    for x in range(0, DISCRIMINATOR_ITERATIONS):
        _, D_loss_real_A, D_loss_fake_A, D_loss_real_B, D_loss_fake_B = \
        disc_trainer.train_on_batch(
            [real_A, fake_A, real_B, fake_B],
            [zeros, ones * 0.9, zeros, ones * 0.9] )


    print("=====")
    print("Discriminator loss:")
    print("Real A: %s, Fake A: %s || Real B: %s, Fake B: %s " % ( D_loss_real_A, D_loss_fake_A, D_loss_real_B, D_loss_fake_B))

    _, G_loss_fake_B, G_loss_fake_A, G_loss_rec_A, G_loss_rec_B = \
        generator_trainer.train_on_batch(
            [real_A, real_B],
            [zeros, zeros, real_A, real_B])


    print("=====")
    print("Generator loss:")
    print("Fake B: %s, Cyclic A: %s || Fake A: %s, Cyclic B: %s " % (G_loss_fake_B, G_loss_rec_A, G_loss_fake_A, G_loss_rec_B))

    end = time.time()
    print("Iteration time: %s s" % (end-start))
    sys.stdout.flush()

    if not (it % SAVE_IMAGES_INTERVAL ):
        imgA = real_A
        # print(imgA.shape)
        imga2b = generator_AtoB.predict(imgA)
        # print(imga2b.shape)
        imga2b2a = generator_BtoA.predict(imga2b)
        # print(imga2b2a.shape)
        imgB = real_B
        imgb2a = generator_BtoA.predict(imgB)
        imgb2a2b = generator_AtoB.predict(imgb2a)

        c = np.concatenate([imgA, imga2b, imga2b2a, imgB, imgb2a, imgb2a2b], axis=2).astype(np.uint8)
        # print(c.shape)
        x = Image.fromarray(c[0])
        x.save("data/generated/iteration_%s.jpg" % str(it).zfill(4))

    it+=1


    generator_AtoB.save("models/generator_AtoB.h5")
    generator_BtoA.save("models/generator_BtoA.h5")

【讨论】:

以上是关于无法使用 keras.load_model 保存/加载模型 - IndexError: list index out of range的主要内容,如果未能解决你的问题,请参考以下文章

Windows Keras load_model报错及解决

Keras Model AttributeError:’str‘ object has no attribute ’call‘

Keras Model AttributeError:’str‘ object has no attribute ’call‘

AttributeError:type object ‘TFLiteConverterV2 ‘has no attribute ‘form_keras_model_file ‘

AttributeError:type object ‘TFLiteConverterV2 ‘has no attribute ‘form_keras_model_file ‘

无法使用 NSUserDefaults 保存布尔变量