The following code runs a Sequential Keras model, pretty straight forward , on the MNIST data that are packaged with Keras.
In running the following piece of code I get an exception.
The code is readily reproducible.
import tensorflow as tf
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc')>0.99):
print("\nReached 99% accuracy so cancelling training!")
self.model.stop_training = True
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
callbacks = myCallback()
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, callbacks=[callbacks])
The exception is:
Epoch 1/10
59296/60000 [============================>.] - ETA: 0s - loss: 0.2005 - accuracy: 0.9400
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-26-f5e673b24d24> in <module>()
23 metrics=['accuracy'])
24
---> 25 model.fit(x_train, y_train, epochs=10, callbacks=[callbacks])
C:\Program Files (x86)\Microsoft Visual Studio\Shared\Anaconda3_64\lib\site-packages\tensorflow\python\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_freq, max_queue_size, workers, use_multiprocessing, **kwargs)
871 validation_steps=validation_steps,
872 validation_freq=validation_freq,
--> 873 steps_name='steps_per_epoch')
874
875 def evaluate(self,
C:\Program Files (x86)\Microsoft Visual Studio\Shared\Anaconda3_64\lib\site-packages\tensorflow\python\keras\engine\training_arrays.py in model_iteration(model, inputs, targets, sample_weights, batch_size, epochs, verbose, callbacks, val_inputs, val_targets, val_sample_weights, shuffle, initial_epoch, steps_per_epoch, validation_steps, validation_freq, mode, validation_in_fit, prepared_feed_values_from_dataset, steps_name, **kwargs)
406 if mode == ModeKeys.TRAIN:
407 # Epochs only apply to `fit`.
--> 408 callbacks.on_epoch_end(epoch, epoch_logs)
409 progbar.on_epoch_end(epoch, epoch_logs)
410
C:\Program Files (x86)\Microsoft Visual Studio\Shared\Anaconda3_64\lib\site-packages\tensorflow\python\keras\callbacks.py in on_epoch_end(self, epoch, logs)
288 logs = logs or {}
289 for callback in self.callbacks:
--> 290 callback.on_epoch_end(epoch, logs)
291
292 def on_train_batch_begin(self, batch, logs=None):
<ipython-input-26-f5e673b24d24> in on_epoch_end(self, epoch, logs)
3 class myCallback(tf.keras.callbacks.Callback):
4 def on_epoch_end(self, epoch, logs={}):
----> 5 if(logs.get('acc')>0.99):
6 print("\nReached 99% accuracy so cancelling training!")
7 self.model.stop_training = True
TypeError: '>' not supported between instances of 'NoneType' and 'float'
In model.compile function you defined metrics=['accuracy']. You need to use 'accuracy' in logs.get i.e logs.get('accuracy').
In Jupyter notebook I had to use "acc", but in google Colab "accuracy" instead. I guess it depends on the tensorflow version installed.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With