Keras won't load json file - keras

I ran my code and saved saved my model using the following code:
model_json = model.to_json()
with open(inFilePath+".json", "w") as json_file:
json_file.write(model_json)
modWeightsFilepath=inFilePath+".weights.hdf5"
checkpoint = ModelCheckpoint(modWeightsFilepath, monitor='val_accuracy', verbose=1, save_best_only=True, save_weights_only=True, mode='auto')
And then I wanted to load my model again to make predictions:
from keras.models import model_from_json
json_file = open('/home/models/final_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
#load weights into new model
model.load_weights('/home/models/final_model.weights.hdf5')
print("Loaded model from disk")
But this gives me the following error:
TypeError: __init__() got an unexpected keyword argument 'ragged'
Full traceback:
And I don't quite know what's wrong. My Keras gpu version is 2.1.6-tf.
Edit:
In order to create the model I used:
import json
import numpy as np
from generator import DataGenerator
import tensorflow
KERAS_BACKEND=tensorflow
import keras
from keras.preprocessing import sequence
from keras.models import Sequential, Model
from keras import optimizers
from keras.layers import Dense, Dropout, Activation, Flatten, Input
from keras.layers import Conv1D, AveragePooling1D, MaxPooling1D
from keras.layers.merge import concatenate
from keras.optimizers import SGD
import os
import sys
from itertools import chain
#import matplotlib.pyplot as plt
from functools import reduce
from keras.callbacks import EarlyStopping,ModelCheckpoint
from sklearn.utils import class_weight
And in order to load the model, I imported:
from keras.models import model_from_json
after which I got the error that I told you about. And then I changed it to:
from tensorflow.keras.models import model_from_json
And the error persisted.

Related

How to save trained model (simpletransformers.ner)

this is my first time using simpletransformers.ner and now I want to save my model, this is my model
!pip install simpletransformers
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from simpletransformers.ner import NERModel,NERArgs
from transformers import AutoConfig, AutoTokenizer, AutoModel, TFAutoModel
model = NERModel('bert', 'bert-base-cased',labels=label,args =args)
model.train_model(train_data,eval_data = test_data,acc=accuracy_score)
result, model_outputs, preds_list = model.eval_model(test_data)

Module object is not callable (kerastuner)

I am trying to optimize my keras NN using kerastuner but for some reason its giving me a 'module' object is not callable error on the tuner=randomsearch line.
import pandas as pd
import numpy as np
from sklearn.preprocessing import scale
from keras.models import Sequential
from keras.layers import Dense
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot as plt
from keras_tuner.tuners import randomsearch
from keras_tuner.engine import hyperparameters
import time
LOG_DIR = f"{int(time.time())}"
def build_model(hp):
model = Sequential()
model.add(Dense(hp.Int("input_units", min_value=1, max_value=105, step=1), input_dim=X_train.shape[1], activation='relu'))
for i in range(hp.Int("n_layers", 1, 5)):
model.add(Dense(hp.Int(f"conv_{i}_units", min_value=1, max_value=105, step=1), activation='relu'))
model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
return model
tuner = randomsearch(build_model, objective = "val_accuracy", max_trials = 1, executions_per_trial = 1, directory = LOG_DIR)
tuner.search(x=X_train, y=y_train, epochs=1, batch_size=146, validation_data=(X_test, y_test))
I figured it out I imported the wrong randomsearch and hyperparameters.

ValueError: could not convert string to float: '8/20/2014' then how i fit the train dataset to the model dataset

This image may be useful for your referenceCan someone please help me try to fix these errors? It does not fit the training data set to the model dataset:
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df=pd.read_csv('./DATA/kc_house_data.csv')
X=df.drop('price',axis=1).values
y=df['price'].values
from sklearn.model_selection import train_test_split
X_test, X_train, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler()
X_train=scaler.fit_transform(X_train)
X_test=scaler.transform(X_test)
print(X_train.shape())
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
model=Sequential()
model.add(Dense(19,activation='relu'))
model.add(Dense(19,activation='relu'))
model.add(Dense(19,activation='relu'))
model.add(Dense(19,activation='relu'))
model.add(Dense(1))
model.compile(optimizer='adam',loss='mse')
print(model.fit(x=X_train,y=y_train,validation_data=(X_test,y_test),batch_size=128,epochs=400))
This image may be used for your reference

TypeError: load_model() missing 1 required positional argument: 'filepath'

I am using Keras 2.2.5 in the training phase. I have saved the model with ModelCheckpoint function that is imported like this: from keras.callbacks import ModelCheckpoint
Then, in the test phase, when I want to load the model using the load_model function (from keras.models import load_model), I get the title error.
test script is as follow:
import numpy as np
import argparse
import keras.layers as KL
from keras.models import load_model
from keras.preprocessing import image
from keras.applications.resnet50 import preprocess_input as preprocess_input_resnet
if __name__ == "__main__":
# Define variables
parser = argparse.ArgumentParser()
parser.add_argument("--image_size", type=tuple, default=(500, 500))
parser.add_argument("--mask_size", type=tuple, default=(32,32))
parser.add_argument("--image_path", type=str, default="../DATA/resized_imgs/13056.png")
parser.add_argument("--mask_path", type=str, default="../DATA/resized_masks/13056.png")
parser.add_argument("--path_of_the_checkpoint", type=str, default="./RESULTS/2020_02_25_12_34_54/bestmodel/MultiLabel_PETA_weights.best.hdf5")
parser.add_argument("--Categories", type=list, default=["personalLess30", "personalLess45", "personalLess60", "personalLarger60"])
args = parser.parse_args()
# Load trained model
PAR_model = load_model (filepath=args.path_of_the_checkpoint, custom_objects={'BatchNorm':KL.BatchNormalization})
PAR_model.summary()
You should not pass positional arguments as keyword arguments:
PAR_model = load_model (args.path_of_the_checkpoint, custom_objects={'BatchNorm':KL.BatchNormalization})

ValueError: You are trying to load a weight file containing 58 layers into a model with 55 layers

I trained my model and saved the model in .h5 format. Trained by freezing the last layer of the mobilenet imagenet model.
Loading the model and trying prediction makes error stating ValueError: You are trying to load a weight file containing 58 layers into a model with 55 layers.
Training code :
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import os
import keras
import matplotlib.pyplot as plt
from keras.layers import Dense,GlobalAveragePooling2D
from keras.applications import MobileNet
from keras.preprocessing import image
from keras.applications.mobilenet import preprocess_input
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.optimizers import Adam
# In[2]:
base_model=MobileNet(weights='imagenet',include_top=False) #imports the mobilenet model and discards the last 1000 neuron layer.
x=base_model.output
x=GlobalAveragePooling2D()(x)
x=Dense(1024,activation='relu')(x) #we add dense layers so that the model can learn more complex functions and classify for better results.
x=Dense(1024,activation='relu')(x) #dense layer 2
x=Dense(512,activation='relu')(x) #dense layer 3
preds=Dense(2,activation='softmax')(x) #final layer with softmax activation
# In[3]:
model=Model(inputs=base_model.input,outputs=preds)
#specify the inputs
#specify the outputs
#now a model has been created based on our architecture
# In[4]:
for layer in model.layers[:20]:
layer.trainable=False
for layer in model.layers[20:]:
layer.trainable=True
# In[5]:
train_datagen=ImageDataGenerator(preprocessing_function=preprocess_input) #included in our dependencies
train_generator=train_datagen.flow_from_directory('./train/', # this is where you specify the path to the main data folder
target_size=(224,224),
color_mode='rgb',
batch_size=64,
class_mode='categorical',
shuffle=True)
# In[33]:
model.compile(optimizer='Adam',loss='categorical_crossentropy',metrics=['accuracy'])
# Adam optimizer
# loss function will be categorical cross entropy
# evaluation metric will be accuracy
step_size_train=train_generator.n//train_generator.batch_size
model.fit_generator(generator=train_generator,
steps_per_epoch=step_size_train,
epochs=10)
# serialize model to JSON
model_json = model.to_json()
with open("mobilenet_2.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("mobilenet_2.h5")
print("Saved model to disk")
Prediciton code :
import keras
from keras import backend as K
from keras.layers.core import Dense, Activation
from keras.optimizers import Adam
from keras.metrics import categorical_crossentropy
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.models import Model
from keras.applications import imagenet_utils
from keras.layers import Dense,GlobalAveragePooling2D
from keras.applications import MobileNet
from keras.applications.mobilenet import preprocess_input
import numpy as np
from keras.optimizers import Adam
from keras.models import load_model
model = load_model("mobilenet_1.h5")
#mobile = keras.applications.mobilenet.MobileNet(weights="imagenet")
def prepare_image(file):
img_path = ''
img = image.load_img("/home/christie/mobilenet/transfer-learning/" + file, target_size=(224, 224))
img_array = image.img_to_array(img)
img_array_expanded_dims = np.expand_dims(img_array, axis=0)
return keras.applications.mobilenet.preprocess_input(img_array_expanded_dims)
'''
lookup_list = ["banana","banana_palenkodan","banana_red","banana_nendran","banana_karpooravalli"]
#print(lookup_list)
if ans not in lookup_list:sx
print("Not found")
return "[None]"
'''
preprocessed_image = prepare_image('test.jpg')
predictions = model.predict(preprocessed_image)
results = imagenet_utils.decode_predictions(predictions)
print(results)
Error log :
ValueError: You are trying to load a weight file containing 58 layers
into a model with 55 layers.
The model is converted to JSON format and written to mobilenet_2.json in the local directory. The network weights are written to mobilenet_2.h5 in the local directory.
Similarly you have to load the json and its corresponding weights.
Try editing as below :
# serialize model to JSON
model_json = model.to_json()
with open("mobilenet_2.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("mobilenet_2.h5")
print("Saved model to disk")
# later...
# load json and create model
json_file = open('mobilenet_2.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("mobilenet_2.h5")
print("Loaded model from disk")
You are saving just the weights but trying to load the model architecture and weights. If you would like to save weights and model architecture together and later load, then try the below code -
# save model and architecture to single file
model.save("model.h5")
# later...
# load model
model = load_model('model.h5')

Resources