Unable to load the saved model in the browser using Tensorflowjs - python-3.x

I am trying to build a rice classifier using transfer learning on edge device, I took the help of tutorial at https://github.com/ADLsourceCode/TensorflowJS
My sample data is at https://www.dropbox.com/s/esirpr6q1lsdsms/ricetransfer1.zip?dl=0
I saved the model locally using the code mentioned below for rice classification and kept in folder TensorflowJS/Mobilenet_VGG16_Keras_To_TensorflowJS/static/
alongwith vgg and mobilenet but the, I am not able to load the rice model on tensorflowjs in the browser.
If I trying the save the vgg model in my local system and load the model in the tensoflowjs(in browser) it's working well.
# Base variables
import os
base_dir = 'ricetransfer1/'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
train_cats_dir = os.path.join(train_dir, 'KN')
train_dogs_dir = os.path.join(train_dir, 'DM')
train_size, validation_size, test_size = 90, 28, 26
#train_size, validation_size, test_size = 20, 23, 14
img_width, img_height = 224, 224 # Default input size for VGG16
# Instantiate convolutional base
from keras.applications import VGG16
import tensorflowjs as tfjs
import tensorflow as tf
tf.compat.v1.disable_eager_execution()
img_width, img_height = 224, 224 # Default input size for VGG16
conv_base = VGG16(weights='imagenet',
include_top=False,
input_shape=(img_width, img_height, 3))
# 3 = number of channels in RGB pictures
#saving the vgg model to run it locally
tfjs.converters.save_keras_model(conv_base, '/TensorflowJS/Mobilenet_VGG16_Keras_To_TensorflowJS/static/vgg')
# Check architecture
conv_base.summary()
# Extract features
import os, shutil
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
train_size, validation_size, test_size = 90, 28, 25
datagen = ImageDataGenerator(rescale=1./255)
batch_size = 1
#train_dir = "ricetransfer1/train"
#validation_dir = "ricetransfer1/validation"
#test_dir="ricetransfer1/test"
#indices = np.random.choice(range(len(X_train)))
def extract_features(directory, sample_count):
#sample_count= X_train.ravel()
features = np.zeros(shape=(sample_count, 7, 7, 512)) # Must be equal to the output of the convolutional base
labels = np.zeros(shape=(sample_count))
# Preprocess data
generator = datagen.flow_from_directory(directory,
target_size=(img_width,img_height),
batch_size = batch_size,
class_mode='binary')
# Pass data through convolutional base
i = 0
for inputs_batch, labels_batch in generator:
features_batch = conv_base.predict(inputs_batch)
features[i * batch_size: (i + 1) * batch_size] = features_batch
labels[i * batch_size: (i + 1) * batch_size] = labels_batch
i += 1
if i * batch_size >= sample_count:
break
return features, labels
train_features, train_labels = extract_features(train_dir, train_size) # Agree with our small dataset size
validation_features, validation_labels = extract_features(validation_dir, validation_size)
test_features, test_labels = extract_features(test_dir, test_size)
# Define model
from keras import models
from keras import layers
from keras import optimizers
epochs = 2
ricemodel = models.Sequential()
ricemodel.add(layers.Flatten(input_shape=(7,7,512)))
ricemodel.add(layers.Dense(256, activation='relu', input_dim=(7*7*512)))
ricemodel.add(layers.Dropout(0.5))
ricemodel.add(layers.Dense(1, activation='sigmoid'))
ricemodel.summary()
# Compile model
ricemodel.compile(optimizer=optimizers.Adam(),
loss='binary_crossentropy',
metrics=['acc'])
# Train model
import os
history = ricemodel.fit(train_features, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(validation_features, validation_labels))
##saving the rice classification model to run it locally
tfjs.converters.save_keras_model(ricemodel, '/TensorflowJS/Mobilenet_VGG16_Keras_To_TensorflowJS/static/rice/')
I think, there is some mistake in the rice model, how can I solve the issue?
The expected output is to run the rice classification on the browser using tensorflowjs

I think here it might be you are getting an error due to the older version of the tfjs file.
update the latest version to
<script src="https://cdn.jsdelivr.net/npm/#tensorflow/tfjs#0.13.5"></script>
in your html page but it might arise a new error to due different image size.
I will suggest the open the develope mode in the browser to see the exact error, in this it worked.

Related

How do I know what the output of model.predict() correspond to?

I am trying to make a CNN that classifies cats and dogs and I am using flow_from_directory() to prepare my data for the model.
from keras import Sequential
from keras_preprocessing.image import ImageDataGenerator
from keras.layers import *
from keras.callbacks import ModelCheckpoint
from keras.optimizers import *
import keras
import numpy as np
import os
img_size = 250 # number of pixels for width and height
#Random Seed
np.random.seed(123456789)
training_path = os.getcwd() + "/cats and dogs images/train"
testing_path = os.getcwd() + "/cats and dogs images/test"
#Defines the Model
model = Sequential([
Conv2D(filters=128, kernel_size=(3,3), activation="relu", padding="same", input_shape=(img_size,img_size,3)),
MaxPool2D(pool_size=(2,2), strides=2),
Conv2D(filters=64, kernel_size=(3,3), activation="relu", padding="same"),
Flatten(),
Dense(32, activation="relu"),
Dense(2, activation="softmax")
])
#Scales the pixel values to between 0 to 1
datagen = ImageDataGenerator(rescale=1.0/255.0)
Batch_size = 10
#Prepares Training Data
training_dataset = datagen.flow_from_directory(directory = training_path,
target_size=(img_size,img_size),
classes = ["cat","dog"],
class_mode = "categorical",
batch_size = Batch_size)
#Prepares Testing Data
testing_dataset = datagen.flow_from_directory(directory = testing_path,
target_size=(img_size,img_size),
classes = ["cat","dog"],
class_mode = "categorical",
batch_size = Batch_size)
#Compiles the model
#model.compile(loss="categorical_crossentropy", optimizer="sgd", metrics=['accuracy'])
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=['accuracy'])
#model.compile(loss="mse", optimizer="sgd", metrics=[keras.metrics.MeanSquaredError()])
#Checkpoint
filepath = os.getcwd() + "/trained_model.h5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min', save_freq=1)
#Fitting the model to the dataset (Training the Model)
model.fit(x = training_dataset, steps_per_epoch = 400,
validation_data=testing_dataset, validation_steps=100,
epochs = 10, callbacks=[checkpoint], verbose = 1)
# evaluate model on training dataset
_,acc = model.evaluate_generator(training_dataset, steps=len(training_dataset), verbose=0)
print("Accuracy on training dataset:")
print('> %.3f' % (acc * 100.0))
#evaluate model on testing dataset
_,acc = model.evaluate_generator(testing_dataset, steps=len(testing_dataset), verbose=0)
print("Accuracy on testing dataset:")
print('> %.3f' % (acc * 100.0))
I want to know how the output of model.predict() is going to correspond to the labels cats and dogs and which one of the two numbers in the output is a cat and which is a dog?
Here's my code for loading the model and giving a prediction:
from keras.models import Sequential
from keras_preprocessing.image import *
from keras.layers import *
import tensorflow as tf
import numpy as np
from keras.layers.experimental.preprocessing import Rescaling
import os
import cv2
from keras.models import *
img_size = 250
#Load weights into new model
filepath = os.getcwd() + "/trained_model.h5"
model = load_model(filepath)
print("Loaded model from disk")
#Scales the pixel values to between 0 to 1
#datagen = ImageDataGenerator(rescale=1.0/255.0)
#Prepares Testing Data
testing_dataset = cv2.imread(os.getcwd() + "/cats and dogs images/single test sample/507.png")
#img = datagen.flow_from_directory(testing_dataset, target_size=(img_size,img_size))
img = cv2.resize(testing_dataset, (img_size,img_size))
newimg = np.asarray(img)
pixels = newimg.astype('float32')
pixels /= 255.0
print(pixels.shape)
pixels = np.expand_dims(pixels, axis=0)
print(pixels.shape)
prediction = model.predict(pixels)
print(prediction)
And here is the output from the prediction code above:
Loaded model from disk
(250, 250, 3)
(1, 250, 250, 3)
[[5.4904184e-27 1.0000000e+00]]
As you can see, the prediction gave an array of two numbers, but which one corresponds to the dog label and which to the cat label? By the way, the model isn't fully trained so I am just testing out the code to see if it works.
The model output depends on how you loaded the data and specified how the classes are going to be ordered/labelled in this code you provided:
training_dataset = datagen.flow_from_directory(directory = training_path,
target_size=(img_size,img_size),
classes = ["cat","dog"],
class_mode = "categorical",
batch_size = Batch_size)
#Prepares Testing Data
testing_dataset = datagen.flow_from_directory(directory = testing_path,
target_size=(img_size,img_size),
classes = ["cat","dog"],
class_mode = "categorical",
batch_size = Batch_size)
You specified during the loading of the data that the classes are going to be ordered Cat then Dog in classes argument.
Therefor the output is going to be ordered as two probabilities (summing to 1)
The first probability refers to by how % that the input image is cat and the second probability refers to by how % that the input image is dog.
You use this line:
output_class = np.argmax(prediction, axis=1)
This line will compare the elements of the list and outputs which index of the elements of the list is the greatest (In our case the list containing the two probabilities) in the form of [1] (or [0, 1] depending on the shape of the output) this means that the said image is a dog, since the 2nd element in the output list is 1 if it were [0] (or [1, 0] depending on the shape of the output) then that means that the output class of the input image is cat.

Load model from Keras and investigate the details

I'm using Keras to fit a function, and I'm new to Keras.
With a very simple network, the Keras can fit my function very well, I just want to know what the function is and try to understand why it works very well. But the "predict" function hide the details.
Here is the code I create the network:
import numpy as np
import tensorflow as tf
from tensorflow import keras
LABEL_COLUMN = "shat"
BATCH_SIZE = 16
EPOCHS = 20
trainfilePath = "F:\\PyworkingFolder\\WWSHat\\_Data\\alpha0train.csv"
testfilePath = "F:\\PyworkingFolder\\WWSHat\\_Data\\alpha0test.csv"
with open(trainfilePath, encoding='utf-8') as txtContent:
trainArray = np.loadtxt(txtContent, delimiter=",")
with open(testfilePath, encoding='utf-8') as txtContent:
testArray = np.loadtxt(txtContent, delimiter=",")
trainSample = trainArray[:, 0:14]
trainLable = trainArray[:, 14]
testSample = testArray[:, 0:14]
testLable = testArray[:, 14]
model = keras.Sequential([
keras.layers.Dense(14, activation='relu', input_shape=[14]),
keras.layers.Dense(15, activation='relu'),
keras.layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
# optimizer = keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=None, decay=0.0)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
model.summary()
history = model.fit(trainSample, trainLable, epochs=EPOCHS, batch_size=BATCH_SIZE)
model.evaluate(testSample, testLable, verbose=1)
model.save("F:\\PyworkingFolder\\WWSHat\\_Data\\alpha0.h5")
What I understand is:
the layers are weight matrices and basis matrices, it works as
out=max(0, weight * input + basis)
After some search, I find I can read the .h5 file using
import h5py
import numpy as np
FILENAME = "F:\\PyworkingFolder\\WWSHat\\_Data\\alpha0.h5"
with h5py.File(FILENAME, 'r') as f:
dense_1 = f['/model_weights/dense_1/dense_1']
dense_1_bias = dense_1['bias:0'][:]
dense_1_kernel = dense_1['kernel:0'][:]
dense_2 = f['/model_weights/dense_2/dense_2']
dense_2_bias = dense_2['bias:0'][:]
dense_2_kernel = dense_2['kernel:0'][:]
# print("Weight matrix 1:\n")
# print(dense_1_kernel)
# print("Basis matrix 1:\n")
# print(dense_1_bias)
# print("Weight matrix 2:\n")
# print(dense_2_kernel)
# print("Basis matrix 2:\n")
# print(dense_2_bias)
def layer_output(v, kernel, bias):
return np.dot(v, kernel) + bias
reluFunction = np.vectorize(lambda x: x if x >= 0.0 else 0.0)
testV = np.array([[-0.004090321213057993,
0.009615388501909157,
-0.24223693596921558,
0.015504079563927319,
-0.02659541428995062,
0.018512968977547152,
0.00836788544720289,
-0.10874776132746002,
-0.045863474556415526,
-0.010195799916571194,
0.09474219315939948,
0.03606698737846194,
-0.004560110004741025,
0.028042417959738858]])
output_1 = layer_output(testV, dense_1_kernel, dense_1_bias)
output_2 = reluFunction(output_1)
output_3 = layer_output(output_2, dense_2_kernel, dense_2_bias)
output_4 = reluFunction(output_3)
however, the result of output_4 is very different from what I get using
loaded_model = keras.models.load_model("F:\\PyworkingFolder\\WWSHat\\_Data\\alpha0.h5")
predicted = loaded_model(testV)
The "predicted" is very close to the ground truth while "output_4" is far away from the ground truth.
I get stuck here and don't know why and failed to find information about how to extract the function I want from the Keras model, I need your help!
Thanks!
model = keras.Sequential([
keras.layers.Dense(14, activation='relu', input_shape=[14]),
keras.layers.Dense(15, activation='relu'),
keras.layers.Dense(1)
])
In your model, there are 3 layers, the last dense layer has weight and biases too, you didn't consider them in your calculation.

How to extract features from an image for training a CNN model

I am working on a project to classify waste as plastics and non plastics using only.images to train them.However i still dont know what features does the model take into account while classifyimg them.I am using CNN,however the accuracy of prediction is still not up to the mark.
The reason why i went to CNN because there is no specific feature to distinguish plastics from others.Is there any other way to approach this problem?
For eg If i train the images of cats,my Neural Network learns what is a cat however i do not explicitly give features is the same case valid here too?
Suppose you want to extract the Features from the Pre-Trained Convolutional Neural Network, VGGNet, VGG16.
Code to reuse the Convolutional Base is:
from keras.applications import VGG16
conv_base = VGG16(weights='imagenet',
include_top=False,
input_shape=(150, 150, 3)) # This is the Size of your Image
The final feature map has shape (4, 4, 512). That’s the feature on top of which you’ll stick a densely connected classifier.
There are 2 ways to extract Features:
FAST FEATURE EXTRACTION WITHOUT DATA AUGMENTATION: Running the convolutional base over your dataset, recording its output to a
Numpy array on disk, and then using this data as input to a standalone, densely
connected classifier similar to those you saw in part 1 of this book. This solution is fast and cheap to run, because it only requires running the convolutional base once for every input image, and the convolutional base is by far the most expensive part of the pipeline. But for the same reason, this technique won’t allow you to use data augmentation.
Code for extracting Features using this method is shown below:
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
base_dir = '/Users/fchollet/Downloads/cats_and_dogs_small'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
test_dir = os.path.join(base_dir, 'test')
datagen = ImageDataGenerator(rescale=1./255)
batch_size = 20
def extract_features(directory, sample_count):
features = np.zeros(shape=(sample_count, 4, 4, 512))
labels = np.zeros(shape=(sample_count))
generator = datagen.flow_from_directory(directory, target_size=(150, 150),
batch_size=batch_size, class_mode='binary')
i=0
for inputs_batch, labels_batch in generator:
features_batch = conv_base.predict(inputs_batch)
features[i * batch_size : (i + 1) * batch_size] = features_batch
labels[i * batch_size : (i + 1) * batch_size] = labels_batch
i += 1
if i * batch_size >= sample_count:
break
return features, labels
train_features, train_labels = extract_features(train_dir, 2000)
validation_features, validation_labels = extract_features(validation_dir,1000)
test_features, test_labels = extract_features(test_dir, 1000)
train_features = np.reshape(train_features, (2000, 4*4* 512))
validation_features = np.reshape(validation_features, (1000, 4*4* 512))
test_features = np.reshape(test_features, (1000, 4*4* 512))
from keras import models
from keras import layers
from keras import optimizers
model = models.Sequential()
model.add(layers.Dense(256, activation='relu', input_dim=4 * 4 * 512))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer=optimizers.RMSprop(lr=2e-5),
loss='binary_crossentropy', metrics=['acc'])
history = model.fit(train_features, train_labels, epochs=30,
batch_size=20, validation_data=(validation_features, validation_labels))
Training is very fast, because you only have to deal with two Dense
layers—an epoch takes less than one second even on CPU
FEATURE EXTRACTION WITH DATA AUGMENTATION: Extending the model you have (conv_base) by adding Dense layers on top, and running the whole thing end to end on the input data. This will allow you to use data augmentation, because every input image goes through the convolutional base every time it’s seen by the model. But for the same reason, this technique is far more expensive than the first
Code for the same is shown below:
from keras import models
from keras import layers
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
train_datagen = ImageDataGenerator(rescale=1./255,rotation_range=40,
width_shift_range=0.2,height_shift_range=0.2,shear_range=0.2,
zoom_range=0.2,horizontal_flip=True,fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir,target_size=(150, 150), batch_size=20, class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['acc'])
history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=30, validation_data=validation_generator, validation_steps=50)
For more details, please refer Section 5.3.1 of the book, "Deep Learning with Python", authored by Father of Keras, "Francois Chollet"

MNIST and transfer learning with VGG16 in Keras- low validation accuracy

I recently started taking advantage of Keras's flow_from_dataframe() feature for a project, and decided to test it with the MNIST dataset. I have a directory full of the MNIST samples in png format, and a dataframe with the absolute directory for each in one column and the label in the other.
I'm also using transfer learning, importing VGG16 as a base, and adding my own 512 node relu dense layer and 0.5 drop-out before a softmax layer of 10. (For digits 0-9). I'm using rmsprop (lr=1e-4) as the optimizer.
When I launch my environment, it calls the latest version of keras_preprocessing from Git, which has support for absolute directories and capitalized file extensions.
My problem is that I have a very high training accuracy, and a terribly low validation accuracy. By my final epoch (10), I had a training accuracy of 0.94 and a validation accuracy of 0.01.
I'm wondering if there's something fundamentally wrong with my script? With another dataset, I'm even getting NaNs for both my training and validation loss values after epoch 4. (I checked the relevant columns, there aren't any null values!)
Here's my code. I'd be deeply appreciative is someone could glance through it and see if anything jumped out at them.
import pandas as pd
import numpy as np
import keras
from keras_preprocessing.image import ImageDataGenerator
from keras import applications
from keras import optimizers
from keras.models import Model
from keras.layers import Dropout, Flatten, Dense, GlobalAveragePooling2D
from keras import backend as k
from keras.callbacks import ModelCheckpoint, CSVLogger
from keras.applications.vgg16 import VGG16, preprocess_input
# INITIALIZE MODEL
img_width, img_height = 32, 32
model = VGG16(weights = 'imagenet', include_top=False, input_shape = (img_width, img_height, 3))
# freeze all layers
for layer in model.layers:
layer.trainable = False
# Adding custom Layers
x = model.output
x = Flatten()(x)
x = Dense(512, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(10, activation="softmax")(x)
# creating the final model
model_final = Model(input = model.input, output = predictions)
# compile the model
rms = optimizers.RMSprop(lr=1e-4)
#adadelta = optimizers.Adadelta(lr=0.001, rho=0.5, epsilon=None, decay=0.0)
model_final.compile(loss = "categorical_crossentropy", optimizer = rms, metrics=["accuracy"])
# LOAD AND DEFINE SOURCE DATA
train = pd.read_csv('MNIST_train.csv', index_col=0)
val = pd.read_csv('MNIST_test.csv', index_col=0)
nb_train_samples = 60000
nb_validation_samples = 10000
batch_size = 60
epochs = 10
# Initiate the train and test generators
train_datagen = ImageDataGenerator()
test_datagen = ImageDataGenerator()
train_generator = train_datagen.flow_from_dataframe(dataframe=train,
directory=None,
x_col='train_samples',
y_col='train_labels',
has_ext=True,
target_size = (img_height,
img_width),
batch_size = batch_size,
class_mode = 'categorical',
color_mode = 'rgb')
validation_generator = test_datagen.flow_from_dataframe(dataframe=val,
directory=None,
x_col='test_samples',
y_col='test_labels',
has_ext=True,
target_size = (img_height,
img_width),
batch_size = batch_size,
class_mode = 'categorical',
color_mode = 'rgb')
# GET CLASS INDICES
print('****************')
for cls, idx in train_generator.class_indices.items():
print('Class #{} = {}'.format(idx, cls))
print('****************')
# DEFINE CALLBACKS
path = './chk/epoch_{epoch:02d}-valLoss_{val_loss:.2f}-valAcc_{val_acc:.2f}.hdf5'
chk = ModelCheckpoint(path, monitor = 'val_acc', verbose = 1, save_best_only = True, mode = 'max')
logger = CSVLogger('./chk/training_log.csv', separator = ',', append=False)
nPlus = 1
samples_per_epoch = nb_train_samples * nPlus
# Train the model
model_final.fit_generator(train_generator,
steps_per_epoch = int(samples_per_epoch/batch_size),
epochs = epochs,
validation_data = validation_generator,
validation_steps = int(nb_validation_samples/batch_size),
callbacks = [chk, logger])
Have you tried explicitly defining the classes of the images? as such:
train_generator=image.ImageDataGenerator().flow_from_dataframe(classes=[0,1,2,3,4,5,6,7,8,9])
in both the train and validation generators.
I have found that sometimes the train and validation generators create different correspondence dictionaries.

Using Model file of convolutional neural network

I have trained my model and saved it using model.save.
How can i use model file to predict images.
I used this article How to predict input image using trained model in Keras? and used this codes
# Modify 'test1.jpg' and 'test2.jpg' to the images you want to predict on
from keras.models import load_model
from keras.preprocessing import image
import numpy as np
# dimensions of our images
img_width, img_height = 320, 240
# load the model we saved
model = load_model('model1.h5')
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# predicting images
img = image.load_img('yes.jpeg', target_size=(img_width, img_height))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict_classes(images, batch_size=10)
print(classes)
# predicting multiple images at once
##img = image.load_img('yes.jpeg', target_size=(img_width, img_height))
##y = image.img_to_array(img)
##y = np.expand_dims(y, axis=0)
# pass the list of multiple images np.vstack()
##images = np.vstack([x, y])
##classes = model.predict_classes(images, batch_size=10)
# print the classes, the images belong to
print(classes)
print(classes[0])
print(classes[0][0])
but this result is
[[1]]
[[1]]
[1]
1
how can i convert it into class indices?
Do not recompile your model unless you want to train it again. simply load your model then predict.
Compiling will reset the weights.

Resources