如何在 keras 中使用网格搜索和 fit 生成器
Posted
技术标签:
【中文标题】如何在 keras 中使用网格搜索和 fit 生成器【英文标题】:how use grid search with fit generator in keras 【发布时间】:2018-04-27 00:59:21 【问题描述】:我想在 keras 中使用 fit_generator 作为输入对模型的参数进行网格搜索
我在堆栈溢出中找到下面的代码并更改它
1- 但我不明白如何将 fit_generator 或 flow_from_directory 赋予 fit 函数(代码中的最后一行)
2-如何添加提前停止?
谢谢
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.wrappers.scikit_learn import KerasClassifier
from keras import backend as K
from sklearn.grid_search import GridSearchCV
from tqdm import tqdm # a nice pretty percentage bar for tasks. Thanks to viewer Daniel Bühler for this suggestion
import os # dealing with directories
import numpy as np # dealing with arrays
from random import shuffle # mixing up or currently ordered data that might lead our network astray in training.
num_classes = 10
# input image dimensions
img_rows, img_cols = 28, 28
input_shape = (img_rows, img_cols, 1)
def make_model(dense_layer_sizes, filters, kernel_size, pool_size):
'''Creates model comprised of 2 convolutional layers followed by dense layers
dense_layer_sizes: List of layer sizes.
This list has one number for each layer
filters: Number of convolutional filters in each convolutional layer
kernel_size: Convolutional kernel size
pool_size: Size of pooling area for max pooling
'''
model = Sequential()
model.add(Conv2D(filters, kernel_size,
padding='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(Conv2D(filters, kernel_size))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=pool_size))
model.add(Dropout(0.25))
model.add(Flatten())
for layer_size in dense_layer_sizes:
model.add(Dense(layer_size))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
return model
class KerasClassifier(KerasClassifier):
""" adds sparse matrix handling using batch generator
"""
def fit(self, x, y, **kwargs):
""" adds sparse matrix handling """
if not issparse(x):
return super().fit(x, y, **kwargs)
############ adapted from KerasClassifier.fit ######################
if self.build_fn is None:
self.model = self.__call__(**self.filter_sk_params(self.__call__))
elif not isinstance(self.build_fn, types.FunctionType):
self.model = self.build_fn(
**self.filter_sk_params(self.build_fn.__call__))
else:
self.model = self.build_fn(**self.filter_sk_params(self.build_fn))
loss_name = self.model.loss
if hasattr(loss_name, '__name__'):
loss_name = loss_name.__name__
if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:
y = to_categorical(y)
### fit => fit_generator
fit_args = copy.deepcopy(self.filter_sk_params(Sequential.fit_generator))
fit_args.update(kwargs)
############################################################
self.model.fit_generator(
self.get_batch(x, y, self.sk_params["batch_size"]),
samples_per_epoch=x.shape[0],
**fit_args)
return self
def get_batch(self, x, y=None, batch_size=32):
""" batch generator to enable sparse input """
index = np.arange(x.shape[0])
start = 0
while True:
if start == 0 and y is not None:
np.random.shuffle(index)
batch = index[start:start+batch_size]
if y is not None:
yield x[batch].toarray(), y[batch]
else:
yield x[batch].toarray()
start += batch_size
if start >= x.shape[0]:
start = 0
def predict_proba(self, x):
""" adds sparse matrix handling """
if not issparse(x):
return super().predict_proba(x)
preds = self.model.predict_generator(
self.get_batch(x, None, self.sk_params["batch_size"]),
val_samples=x.shape[0])
return preds
dense_size_candidates = [[32], [64], [32, 32], [64, 64]]
my_classifier = KerasClassifier(make_model, batch_size=32)
validator = GridSearchCV(my_classifier,
param_grid='dense_layer_sizes': dense_size_candidates,
# epochs is avail for tuning even when not
# an argument to model building function
'epochs': [3, 6],
'filters': [8],
'kernel_size': [3],
'pool_size': [2],
scoring='neg_log_loss',
n_jobs=1)
batch_size = 20
validation_datagen = ImageDataGenerator(rescale=1./255)
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'd:/train', # this is the target directory
target_size=(width, height), # all images will be resized to 150x150
batch_size=batch_size,
color_mode= "grayscale",
class_mode='binary',
shuffle=True
# ,save_to_dir='preview', save_prefix='cat', save_format='png'
) # since we use binary_crossentropy loss, we need binary labels
# this is a similar generator, for validation data
validation_generator = validation_datagen.flow_from_directory(
'd:/validation',
target_size=(width, height),
batch_size=batch_size,
color_mode= "grayscale",
class_mode='binary')
test_generator = test_datagen.flow_from_directory(
'd:/test',
target_size=(width, height),
batch_size=batch_size,
color_mode= "grayscale",
class_mode='binary')
validator.fit(??????
【问题讨论】:
【参考方案1】:def create_model(learn_rate=0.01, momentum=0):
image_size = 224
input_shape = (image_size, image_size, 3)
pre_trained_model = VGG16(input_shape=input_shape, include_top=False, weights="imagenet")
last_layer = pre_trained_model.get_layer('block5_pool')
last_output = last_layer.output
# Flatten the output layer to 1 dimension
x = GlobalMaxPooling2D()(last_output)
# Add a fully connected layer with 512 hidden units and ReLU activation
x = Dense(512, activation='relu')(x)
# Add a dropout rate of 0.5
x = Dropout(0.5)(x)
# Add a final sigmoid layer for classification
x = layers.Dense(1, activation='sigmoid')(x)
model = Model(pre_trained_model.input, x)
model.compile(loss='binary_crossentropy',
optimizer=optimizers.SGD(lr=learn_rate, momentum=momentum),
metrics=['accuracy'])
return model
learn_rate = [1e-9, 1e-3]
momentum = [0.6, 0.9]
def try_fit(learn_rate,momentum):
history_page=[]
for lr in learn_rate:
for moment in momentum:
model = create_model(lr,moment)
history = model.fit_generator(
train_generator,
epochs=epochs,
validation_data=validation_generator,
validation_steps=total_validate//batch_size,
steps_per_epoch=total_train//batch_size)
history_page.append(history)
return history_page
history_page = try_fit(learn_rate,momentum)
history_page[0].history['accuracy']
我觉得你可以试试这个方法
【讨论】:
您似乎根本没有使用GridSearchCV
。您的代码可能是正确的,但我认为 OP 的想法并非如此【参考方案2】:
有一个名为 ParameterGrid 的类,它在 GridSearchCV() 中生成用于网格搜索的所有参数组合。您可以将它们存储在列表中。例如:
from sklearn.model_selection import ParameterGrid
parameters = 'epochs': [32, 64, 128],
'batch_size':[24, 32, 48, 64],
list(ParameterGrid(parameters))
打印出来
['batch_size': 24, 'epochs': 32,
'batch_size': 24, 'epochs': 64,
'batch_size': 24, 'epochs': 128,
'batch_size': 32, 'epochs': 32,
'batch_size': 32, 'epochs': 64,
'batch_size': 32, 'epochs': 128,
'batch_size': 48, 'epochs': 32,
'batch_size': 48, 'epochs': 64,
'batch_size': 48, 'epochs': 128,
'batch_size': 64, 'epochs': 32,
'batch_size': 64, 'epochs': 64,
'batch_size': 64, 'epochs': 128]
在此列表的循环中,您可以使用这些特定组合训练您的模型。在每个循环结束时,您可以使用其他函数检查 val_acc 和 val_loss。
【讨论】:
【参考方案3】:我正在使用这个实现,希望对你有所帮助。
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger
from keras.wrappers.scikit_learn import KerasClassifier
import types
class KerasBatchClassifier(KerasClassifier):
def fit(self, X, y, **kwargs):
# taken from keras.wrappers.scikit_learn.KerasClassifier.fit ###################################################
if self.build_fn is None:
self.model = self.__call__(**self.filter_sk_params(self.__call__))
elif not isinstance(self.build_fn, types.FunctionType) and not isinstance(self.build_fn, types.MethodType):
self.model = self.build_fn(**self.filter_sk_params(self.build_fn.__call__))
else:
self.model = self.build_fn(**self.filter_sk_params(self.build_fn))
loss_name = self.model.loss
if hasattr(loss_name, '__name__'):
loss_name = loss_name.__name__
if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:
y = to_categorical(y)
################################################################################################################
datagen = ImageDataGenerator(
rotation_range=45,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest'
)
if 'X_val' in kwargs and 'y_val' in kwargs:
X_val = kwargs['X_val']
y_val = kwargs['y_val']
val_gen = ImageDataGenerator(
horizontal_flip=True
)
val_flow = val_gen.flow(X_val, y_val, batch_size=32)
val_steps = len(X_val) / 32
early_stopping = EarlyStopping( patience=5, verbose=5, mode="auto")
model_checkpoint = ModelCheckpoint("results/best_weights.epoch:02d-loss:.5f.hdf5", verbose=5, save_best_only=True, mode="auto")
else:
val_flow = None
val_steps = None
early_stopping = EarlyStopping(monitor="acc", patience=3, verbose=5, mode="auto")
model_checkpoint = ModelCheckpoint("results/best_weights.epoch:02d-loss:.5f.hdf5", monitor="acc", verbose=5, save_best_only=True, mode="auto")
callbacks = [early_stopping, model_checkpoint]
epochs = self.sk_params['epochs'] if 'epochs' in self.sk_params else 100
self.__history = self.model.fit_generator(
datagen.flow(X, y, batch_size=32),
steps_per_epoch=len(X) / 32,
validation_data=val_flow,
validation_steps=val_steps,
epochs=epochs,
callbacks=callbacks
)
return self.__history
def score(self, X, y, **kwargs):
kwargs = self.filter_sk_params(Sequential.evaluate, kwargs)
loss_name = self.model.loss
if hasattr(loss_name, '__name__'):
loss_name = loss_name.__name__
if loss_name == 'categorical_crossentropy' and len(y.shape) != 2:
y = to_categorical(y)
outputs = self.model.evaluate(X, y, **kwargs)
if type(outputs) is not list:
outputs = [outputs]
for name, output in zip(self.model.metrics_names, outputs):
if name == 'acc':
return output
raise Exception('The model is not configured to compute accuracy. '
'You should pass `metrics=["accuracy"]` to '
'the `model.compile()` method.')
@property
def history(self):
return self.__history
如您所见,它特定于图像,但您可以根据您的特定需求进行调整。
我是这样使用的:
from sklearn.model_selection import GridSearchCV
model = KerasBatchClassifier(build_fn=create_model, epochs=epochs)
learn_rate = [0.001, 0.01, 0.1]
epsilon = [None, 1e-2, 1e-3]
dropout_rate = [0.25, 0.5]
param_grid = dict(learn_rate=learn_rate, epsilon=epsilon, dropout_rate=dropout_rate)
grid = GridSearchCV(estimator=model, param_grid=param_grid)
grid_result = grid.fit(X_train, Y_train, X_val = X_test, y_val = Y_test)
【讨论】:
我只是想知道如何将这个实现与flow_from_directory
一起使用?你能帮我澄清一下吗?
嗨@ThanhNguyen,你知道了吗?
@Anshuman Kumar 我有问题,我需要使用 args 中的 X 和 y 还是可以在里面定义生成器?以上是关于如何在 keras 中使用网格搜索和 fit 生成器的主要内容,如果未能解决你的问题,请参考以下文章
Keras:网络不使用 fit_generator() 进行训练
使用Keras model.fit_generator生成器