After Training, I saved Both Keras whole Model and Only Weights using
model.save_weights(MODEL_WEIGHTS) and model.save(MODEL_NAME)
Models and Weights were saved successfully and there was no error. I can successfully load the weights simply using model.load_weights and they are good to go, but when i try to load the save model via load_model, i am getting an error.
File "C:/Users/Rizwan/model_testing/model_performance.py", line 46, in <module> Model2 = load_model('nasnet_RS2.h5',custom_objects={'euc_dist_keras': euc_dist_keras}) File "C:\Users\Rizwan\AppData\Roaming\Python\Python36\site-packages\keras\engine\saving.py", line 419, in load_model model = _deserialize_model(f, custom_objects, compile) File "C:\Users\Rizwan\AppData\Roaming\Python\Python36\site-packages\keras\engine\saving.py", line 321, in _deserialize_model optimizer_weights_group['weight_names']] File "C:\Users\Rizwan\AppData\Roaming\Python\Python36\site-packages\keras\engine\saving.py", line 320, in <listcomp> n.decode('utf8') for n in AttributeError: 'str' object has no attribute 'decode'
I never received this error and i used to load any models successfully. I am using Keras 2.2.4 with tensorflow backend. Python 3.6. My Code for training is :
from keras_preprocessing.image import ImageDataGenerator from keras import backend as K from keras.models import load_model from keras.callbacks import ReduceLROnPlateau, TensorBoard, ModelCheckpoint,EarlyStopping import pandas as pd MODEL_NAME = "nasnet_RS2.h5" MODEL_WEIGHTS = "nasnet_RS2_weights.h5" def euc_dist_keras(y_true, y_pred): return K.sqrt(K.sum(K.square(y_true - y_pred), axis=-1, keepdims=True)) def main(): # Here, we initialize the "NASNetMobile" model type and customize the final #feature regressor layer. # NASNet is a neural network architecture developed by Google. # This architecture is specialized for transfer learning, and was discovered via Neural Architecture Search. # NASNetMobile is a smaller version of NASNet. model = NASNetMobile() model = Model(model.input, Dense(1, activation='linear', kernel_initializer='normal')(model.layers[-2].output)) # model = load_model('current_best.hdf5', custom_objects={'euc_dist_keras': euc_dist_keras}) # This model will use the "Adam" optimizer. model.compile("adam", euc_dist_keras) lr_callback = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.003) # This callback will log model stats to Tensorboard. tb_callback = TensorBoard() # This callback will checkpoint the best model at every epoch. mc_callback = ModelCheckpoint(filepath='current_best_mem3.h5', verbose=1, save_best_only=True) es_callback=EarlyStopping(monitor='val_loss', min_delta=0, patience=4, verbose=0, mode='auto', baseline=None, restore_best_weights=True) # This is the train DataSequence. # These are the callbacks. #callbacks = [lr_callback, tb_callback,mc_callback] callbacks = [lr_callback, tb_callback,es_callback] train_pd = pd.read_csv("./train3.txt", delimiter=" ", names=["id", "label"], index_col=None) test_pd = pd.read_csv("./val3.txt", delimiter=" ", names=["id", "label"], index_col=None) # train_pd = pd.read_csv("./train2.txt",delimiter=" ",header=None,index_col=None) # test_pd = pd.read_csv("./val2.txt",delimiter=" ",header=None,index_col=None) #model.summary() batch_size=32 datagen = ImageDataGenerator(rescale=1. / 255) train_generator = datagen.flow_from_dataframe(dataframe=train_pd, directory="./images", x_col="id", y_col="label", has_ext=True, class_mode="other", target_size=(224, 224), batch_size=batch_size) valid_generator = datagen.flow_from_dataframe(dataframe=test_pd, directory="./images", x_col="id", y_col="label", has_ext=True, class_mode="other", target_size=(224, 224), batch_size=batch_size) STEP_SIZE_TRAIN = train_generator.n // train_generator.batch_size STEP_SIZE_VALID = valid_generator.n // valid_generator.batch_size model.fit_generator(generator=train_generator, steps_per_epoch=STEP_SIZE_TRAIN, validation_data=valid_generator, validation_steps=STEP_SIZE_VALID, callbacks=callbacks, epochs=20) # we save the model. model.save_weights(MODEL_WEIGHTS) model.save(MODEL_NAME) if __name__ == '__main__': # freeze_support() here if program needs to be frozen main()
For me the solution was downgrading the h5py
package (in my case to 2.10.0), apparently putting back only Keras and Tensorflow to the correct versions was not enough.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With