Prediction and class activation map works just on some pictures - python-3.x

I have retrained a VGG16 classifier and want to show the class activation map. Unfortunately this only works with some pictures, despite the images are preprocessed. It is only a binary classifier.
I have seen that some pictures are not in the desired width and height, despite setting the target_size while loading the image. Manual resizing did not help as well. z has the desired shape.
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input, decode_predictions
from keras.engine.input_layer import Input
from keras.layers import Dropout, Flatten, Dense
from keras.preprocessing import image
from keras.models import load_model, Model
from keras import backend as K
import numpy as np
import matplotlib.pyplot as plt
import cv2
# Load weights from retrained classifier
top_model_weights_path = 'retrained_weights.h5'
# Create model with VGG16 base
input_tensor = Input(shape=(224, 224, 3))
base_model = VGG16(weights='imagenet', include_top=False, input_tensor=input_tensor)
x = Flatten()(base_model.output)
x = Dense(4096, activation='relu')(x)
x = Dense(4096, activation='relu')(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(input=base_model.input, output=predictions)
model.load_weights(top_model_weights_path, by_name=True)
# load and preprocess image
img_path = './picture.jpg'
img = image.load_img(img_path, target_size=(224, 224))
z = image.img_to_array(img)
z = np.expand_dims(z, axis=0)
z = preprocess_input(z)
# make Prediction
preds = model.predict(z)
print(preds)
maximum_model_output = model.output[:, 0]
last_conv_layer = model.layers[17]
# pooled grads of last convolutional layer and iterate over image
grads = K.gradients(model.output[:, 0], last_conv_layer.output)[0]
pooled_grads = K.mean(grads, axis=(0, 1, 2))
iterate = K.function([model.input],
[pooled_grads, last_conv_layer.output[0]])
pooled_grads_value, conv_layer_output_value = iterate([z])
for i in range(512):
conv_layer_output_value[:, :, i] *= pooled_grads_value[i]
# create heatmap
heatmap = np.mean(conv_layer_output_value, axis=-1)
heatmap = np.maximum(heatmap, 0)
heatmap /= np.max(heatmap)
plt.matshow(heatmap)
img = cv2.imread(img_path)
heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
heatmap = np.uint8(255 * heatmap)
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
superimposed_img = heatmap * 0.4 + img
cv2.imwrite('./Images/picture_cam.jpg', superimposed_img)
As stated, with some pictures I get a valid prediction and a class activation map, most pictures do not work and I got this error (last_conv_layer.output is all 0, pooled_grads are all 0 and value of model.predict is 1.0):
Using TensorFlow backend.
Backend TkAgg is interactive backend. Turning interactive mode on.
2019-08-11 21:13:16.868637: I tensorflow/core/common_runtime/process_util.cc:69] Creating new thread pool with default inter op setting: 2. Tune using inter_op_parallelism_threads for best performance.
<input>:20: UserWarning: Update your `Model` call to the Keras 2 API: `Model(inputs=Tensor("in..., outputs=Tensor("de...)`
[[1.]]
0
<input>:63: RuntimeWarning: invalid value encountered in true_divide
/home/zuse/Projekte/deep-learning-convolutional-network/venv/lib/python3.6/site-packages/matplotlib/image.py:395: UserWarning: Warning: converting a masked element to nan.
dv = (np.float64(self.norm.vmax) -
/home/zuse/Projekte/deep-learning-convolutional-network/venv/lib/python3.6/site-packages/matplotlib/image.py:396: UserWarning: Warning: converting a masked element to nan.
np.float64(self.norm.vmin))
/home/zuse/Projekte/deep-learning-convolutional-network/venv/lib/python3.6/site-packages/matplotlib/image.py:403: UserWarning: Warning: converting a masked element to nan.
a_min = np.float64(newmin)
/home/zuse/Projekte/deep-learning-convolutional-network/venv/lib/python3.6/site-packages/matplotlib/image.py:408: UserWarning: Warning: converting a masked element to nan.
a_max = np.float64(newmax)
/home/zuse/Projekte/deep-learning-convolutional-network/venv/lib/python3.6/site-packages/matplotlib/colors.py:918: UserWarning: Warning: converting a masked element to nan.
dtype = np.min_scalar_type(value)
I am running out of ideas what could be the problem.

Issue was connected to preprocess_input from applications.vgg16. Setting
z = preprocess_input(z, mode='tf')
solved the issue. Perhaps it helps someone.

Related

Get matrix dimensions from pytorch layers

Here is an autoencoder I created from Pytorch tutorials :
epochs = 1000
from pylab import plt
plt.style.use('seaborn')
import torch.utils.data as data_utils
import torch
import torchvision
import torch.nn as nn
from torch.autograd import Variable
cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
import numpy as np
import pandas as pd
import datetime as dt
features = torch.tensor(np.array([ [1,2,3],[1,2,3],[100,200,500] ]))
print(features)
batch = 10
data_loader = torch.utils.data.DataLoader(features, batch_size=2, shuffle=False)
encoder = nn.Sequential(nn.Linear(3,batch), nn.Sigmoid())
decoder = nn.Sequential(nn.Linear(batch,3), nn.Sigmoid())
autoencoder = nn.Sequential(encoder, decoder)
optimizer = torch.optim.Adam(params=autoencoder.parameters(), lr=0.001)
encoded_images = []
for i in range(epochs):
for j, images in enumerate(data_loader):
# images = images.view(images.size(0), -1)
images = Variable(images).type(FloatTensor)
optimizer.zero_grad()
reconstructions = autoencoder(images)
loss = torch.dist(images, reconstructions)
loss.backward()
optimizer.step()
# encoded_images.append(encoder(images))
# print(decoder(torch.tensor(np.array([1,2,3])).type(FloatTensor)))
encoded_images = []
for j, images in enumerate(data_loader):
images = images.view(images.size(0), -1)
images = Variable(images).type(FloatTensor)
encoded_images.append(encoder(images))
I can see the encoded images do have newly created dimension of 10. In order to understand the matrix operations going on under the hood I'm attempting to print the matrix dimensions of encoder and decoder but shape is not available on nn.Sequential
How to print the matrix dimensions of nn.Sequential ?
A nn.Sequential is not a "layer", but rather a "container". It can store several layers and manage their execution (and some other functionalities).
In your case, each nn.Sequential holds both the linear layer and the non-linear nn.Sigmoid activation. To get the shape of the weights of the first layer in a nn.Sequential you can simply do:
encoder[0].weight.shape

can we train a model in tensorflow2.0 without using keras?

I am trying to write a simple ML code to classify the mnist dataset in tensorflow2.0. I didn't use Keras for now since I just want to use lower API to help me understand how tensorflow works. However, after I defined the cross entropy, It seems impossible to continue. All the tf2.0 optimizers are moved to keras and I don't know how to train a model without keras in tf2.0. Is there a way that we bypass keras in tf2.0?
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
print(train_images.shape)
print(len(train_labels))
print(train_images[1,:,:].shape)
# plt.figure()
# plt.imshow(train_images[0])
# plt.colorbar()
# plt.grid(False)
# plt.show()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
for i in range(1):
x = tf.constant(train_images[1,:,:].reshape(784), dtype = tf.float32)
x = tf.reshape(x, [1, 784])
print(tf.shape(x), tf.shape(W))
# define the model
y = tf.nn.softmax(tf.matmul(x, W) + b)
print(y)
# correct labels
y_ = np.zeros(10)
y_[train_labels[i]] = 1.0
y_ = tf.constant(y_, dtype = tf.float32)
y_ = tf.reshape(y_, [1, 10])
cross_entropy = -tf.reduce_sum(y_* tf.math.log(y))
print(cross_entropy)
I don't know how to continue from here.
Backpropagation-based training of models is totally possible in TensorFlow 2.x without using the keras API. The usage will be centered around the tf.GradientTape API and optimizers objects under the tf.optimizers namespace.
Your example can be modified as follows. Note that it's a simplistic code meant to illustrate the basic usage in a short code snippet. It's not to illustrate machine learning best practices in TF2.
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
#tf.function
def my_model(x):
# This is a hand-rolled logistic regressor.
y = tf.matmul(x, W) + b
return tf.nn.softmax(y)
#tf.function
def loss(x, y):
# This is a hand-rolled categorical cross-entropy loss.
diff = -(labels * tf.math.log(logits))
loss = tf.reduce_mean(diff)
return loss
optimizer = tf.optimizers.Adam(learning_rate=1e-3)
for i in xrange(num_steps):
# A single training step.
with tf.GradientTape() as tape:
# This is atypical, in that you would normally want to do this in
# mini-batches, instead of using all examples in x_train and y_train
# at once. But again, this is just a simple example.
loss_value = loss(x_train, y_train)
gradients = tape.gradient(loss_value, [W, b])
optimizer.apply_gradients(zip(gradients, [w, b]))

How do I prepare the test set shape for the model.evaluate with keras and tensorflow?

I'm trying to run a simple example with a NN using the MNIST dataset provided by tensorflow itself, running on Google Colab. I want to get the raw data and mount by myself the structure that has the data. I'm able to train the NN, but when I try to predict one example from the test set, I get the error
ValueError: Error when checking input: expected dense_input to have shape (784,) but got array with shape (1,).
Could somebody help me with this issue? I'm pretty new to Python and Keras/TensorFlow.
When I run
print(inp.shape)
I get (784,) and not the (1,) as the error says.
I have also tried to evaluate the test set using
test_loss, test_accuracy = model.evaluate(test_input.T)
, but I also get the error
ValueError: Arguments and signature arguments do not match: 25 27.
The source code is the following:
# Importing stuff
import tensorflow as tf
import tensorflow_datasets as tfds
import matplotlib.pyplot as plt
import numpy as np
import math
import time
import keras
tf.enable_eager_execution()
# Functions
def normalize(images, labels):
images = tf.cast(images, tf.float32)
images /= 255
return images, labels
# Getting dataset
ds, meta = tfds.load('fashion_mnist', as_supervised=True, with_info=True)
test_ds, train_ds = ds['test'], ds['train']
# Preprocess the data
train_ds = train_ds.map(normalize)
test_ds = test_ds.map(normalize)
num_train_examples = meta.splits['train'].num_examples
num_test_examples = meta.splits['test'].num_examples
# Making the train set
train_input = np.empty(shape=(784, num_train_examples))
train_label = np.empty(shape=(1, num_train_examples))
i = 0
for image, label in train_ds:
image = image.numpy().reshape((784, 1))
train_input[:, i] = image.ravel()
label = label.numpy().reshape(1)
train_label[:, i] = label
i = i + 1;
# Making the test set
test_input = np.empty(shape=(784, num_test_examples))
test_label = np.empty(shape=(1, num_test_examples))
i = 0
for image, label in test_ds:
image = image.numpy().reshape((784, 1))
test_input[:, i] = image.ravel()
label = label.numpy().reshape(1)
test_label[:, i] = label
i = i + 1;
# Network
input_layer = tf.keras.layers.Dense(units=784, input_shape=[784])
h1 = tf.keras.layers.Dense(128, activation=tf.nn.relu)
output_layer = tf.keras.layers.Dense(10, activation=tf.nn.softmax)
model = tf.keras.Sequential([input_layer, h1, output_layer])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_input.T, train_label.T, epochs=3, steps_per_epoch=100, batch_size=1)
test_loss, test_accuracy = model.evaluate(test_input.T)
inp = test_input[:, 0].T
res = model.predict(inp)
All the API functions expect an input shape containing the batch size first. In your case, you're trying to feed only 1 example and thus no batch size is given. You just need to specify your batch size as 1 by reshaping the data.
Using numpy:
res = model.predict(np.reshape(inp, len(inp))
The argument to the predict method now receives the array with shape (1, 784) in your case, specifying the batch size as 1.
When you give the function more examples to evaluate by stacking them on top of one another, the batch size is implicitly given by the shape of the array, so no further transformation is necessary.

Keras single Image prediction not working with correct dims

I want to predict single images with the functional API (keras version 2.2.2, tensorflow backend v1.7) . I load my model:
# Loading Model
base_model = VGG16(include_top = False, weights=None,
input_shape=(224,224,3))
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dropout(0.7)(x)
x = Dense(1020, activation='relu')(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.load_weights("model.h5")
Then I load an Image and transform it into the input format and try to predict:
from keras.preprocessing.image import img_to_array, load_img
img = load_img("data/my_image.png") # this is a PIL image
array = img_to_array(img) # this is a Numpy array with shape
arrayresized = cv2.resize(array, (244,244))*1./255
inputarray = np.expand_dims(arrayresized, axis=0)
# Predicting
prediction = model.predict(inputarray, batch_size = 1)
Then I get this error back:
ValueError: Error when checking input: expected input_4 to have shape
(224, 224, 3) but got array with shape (244, 244, 3)

model.fit_generator: Error when checking target: expected lambda_2 to have 4 dimensions, but got array with shape (200, 1)

I implemented a generator to feed the training process, but fit_generator throws this error:
Error when checking target: expected lambda_2 to have 4 dimensions,
but got array with shape (200, 1)
It seems that the function is switching X and y at some point, because (200,1) is "y" shape, not "X" shape.
If I test the generator with the code below it works fine:
for i in range(32):
train = next(train_generator)
print(train[0].shape)
But with the fit_generator an error is thrown.
This is my code:
import os
import csv
samples = []
with open('data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
from sklearn.model_selection import train_test_split
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
import cv2
import numpy as np
import sklearn
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
sklearn.utils.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
name = 'data\\'+batch_sample[0].split('\\')[-1]
center_image = cv2.imread(name)
center_angle = float(batch_sample[3])
if not center_image is None:
images.append(center_image)
angles.append(center_angle)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=int(len(train_samples)/32))
validation_generator = generator(validation_samples, batch_size=int(len(validation_samples)/32))
ch, row, col = 3, 160, 320 # Trimmed image format
from keras.models import Sequential
from keras.layers import Lambda
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: x/127.5 - 1.,
input_shape=(row, col, ch),
output_shape=(row, col, ch)))
#model.add(... finish defining the rest of your model architecture here ...)
model.compile(loss='mse', optimizer='adam')
model.fit_generator(train_generator,
steps_per_epoch=len(train_samples) / 32, validation_data=validation_generator,
validation_steps=len(validation_samples)/32, epochs=3)
Any ideas how I can solve this?
That was a error on load images, the name was not correctly defined. As the cv2.imread(name) don't raises a error wen it don't find the image, just returning a None object, the method was returning a empty variable, that causes the error on the network.

Resources