I want to save the best checkpoint when my model is training, but the callback does not work as I expect. According to Saving best model in Keras this code should work.
model = Sequential()
model.add(Conv1D(filters=32, kernel_size=8, input_shape=(X_train.shape[1], 4)))
model.add(MaxPooling1D(pool_size=4))
model.add(Flatten())
model.add(Dense(16, activation='relu'))
model.add(Dense(2, activation='softmax'))
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
model.summary()
stop = EarlyStopping(monitor='val_loss', patience=15, verbose=1, mode='min')
save = ModelCheckpoint('./my_model.hdf5', save_best_only=True, monitor='val_loss', mode='min')
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, verbose=1, epsilon=1e-4, mode='min')
history = model.fit(X_train, y_train, epochs=25, verbose=0, callbacks=[stop, save, reduce_lr], validation_split=0.25)
However it keeps giving me following error:
AttributeError Traceback (most recent call last)
<ipython-input-28-f86f439eae5a> in <module>()
17 reduce_lr_loss = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=7, verbose=1, epsilon=1e-4, mode='min')
18
---> 19 history = model.fit(X_train, y_train, batch_size=batch_size, epochs=50, verbose=0, callbacks=[earlyStopping, mcp_save, reduce_lr_loss], validation_split=0.25)
20
21
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, max_queue_size, workers, use_multiprocessing, **kwargs)
878 initial_epoch=initial_epoch,
879 steps_per_epoch=steps_per_epoch,
--> 880 validation_steps=validation_steps)
881
882 def evaluate(self,
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/engine/training_arrays.py in model_iteration(model, inputs, targets, sample_weights, batch_size, epochs, verbose, callbacks, val_inputs, val_targets, val_sample_weights, shuffle, initial_epoch, steps_per_epoch, validation_steps, mode, validation_in_fit, **kwargs)
323 # Callbacks batch_begin.
324 batch_logs = {'batch': batch_index, 'size': len(batch_ids)}
--> 325 callbacks._call_batch_hook(mode, 'begin', batch_index, batch_logs)
326 progbar.on_batch_begin(batch_index, batch_logs)
327
/usr/local/lib/python3.6/dist-packages/tensorflow/python/keras/callbacks.py in _call_batch_hook(self, mode, hook, batch, logs)
194 t_before_callbacks = time.time()
195 for callback in self.callbacks:
--> 196 batch_hook = getattr(callback, hook_name)
197 batch_hook(batch, logs)
198 self._delta_ts[hook_name].append(time.time() - t_before_callbacks)
AttributeError: 'EarlyStopping' object has no attribute 'on_train_batch_begin'
I have successfully used this code for my functional model, but I am not sure what the problem is here with the sequential model.
From the stack trace, I notice that you're using tensorflow.keras but EarlyStopping from keras (based on the the other answer you referenced). This is the cause of the error.
This should work(import from tensorflow keras):
from tensorflow.keras.callbacks import EarlyStopping
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With