Custom ImageDataGenerator keras - keras

I've been trying to implement Keras custom imagedatagenerator so that I can do hair and microscope image augmentation.
This is the Datagenerator class:
class DataGenerator( Sequence ):
def __init__(self,image_paths,labels, augmentations, batch_size=32, image_dimension=(224,224,3), shuffle=False):
self.image_paths = image_paths
self.labels = labels
self.batch_size = batch_size
self.image_dimension = image_dimension
self.shuffle = shuffle
self.augment = augmentations
def __len__(self):
return int(np.ceil(len(self.image_paths) / self.batch_size ))
def _getitem__(self,index):
indexes = self.indexes[index*self.batch_size : (index+1)*self.batch_size]
batch_y = np.array([self.labels[k] for k in indexes])
batch_x = [cv2.cvtColor(cv2.imread(self.image_paths[k]), cv2.COLOR_RGB2BGR) for k in indexes]
return np.stack([
self.augment(image=x)["image"] for x in batch_x
], axis=0), np.array(batch_y)
Below Code is for albumentations augmentation (Just trying albualbumentations augmentation to test if the data generator works or not):
AUGMENTATIONS_TRAIN = Compose([
HorizontalFlip(p=0.5),
RandomContrast(limit=0.2, p=0.5),
RandomGamma(gamma_limit=(80, 120), p=0.5),
RandomBrightness(limit=0.2, p=0.5),
HueSaturationValue(hue_shift_limit=5, sat_shift_limit=20,
val_shift_limit=10, p=.9),
# CLAHE(p=1.0, clip_limit=2.0),
ShiftScaleRotate(
shift_limit=0.0625, scale_limit=0.1,
rotate_limit=15, border_mode=cv2.BORDER_REFLECT_101, p=0.8),
ToFloat(max_value=255)
])
AUGMENTATIONS_TEST = Compose([
# CLAHE(p=1.0, clip_limit=2.0),
ToFloat(max_value=255)
])
Now creating DataGenerator object :
train_datagen = DataGenerator( train['images'],
train['target'],
augmentations=AUGMENTATIONS_TRAIN,
batch_size=32,
image_dimension=(224,224,3) )
val_datagen = DataGenerator( validation['images'],
validation['target'],
augmentations=AUGMENTATIONS_TEST,
batch_size=16,
image_dimension=(224,224,3) )`
A NonImplementedError comes when i
run model.fit_generator(generator=train_datagen,steps_per_epoch=30,epochs = 30,validation_data=val_datagen,validation_steps=15)
I have shared my kernel here and
I was taking help from here.
I have also looked for other ways to augment which were all the same.
I will be thankful if someone can tell why and where is the problem ? and Is there is any other good way to do custom image augmentation in keras.

You can have a look at imgaug library. albumentations and imgaug are same almost. Write the sequence of operations and then just put it in Imagedatagenerator preprocessing_function. I tried using albumentations library but faced some errors.
from imgaug import augmenters as iaa
seq = iaa.Sequential([
iaa.Fliplr(0.5), # horizontally flip
# sometimes(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05), per_channel=0.5)),
iaa.OneOf([
iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),
iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)),
# iaa.Noop(),
iaa.GaussianBlur(sigma=(0.0, 1.0)),
# iaa.Noop(),
iaa.Affine(rotate=(-10, 10), translate_percent={"x": (-0.25, 0.25)}, mode='symmetric', cval=(0)),
# iaa.Noop(),
# iaa.PerspectiveTransform(scale=(0.04, 0.08)),
# # iaa.Noop(),
# iaa.PiecewiseAffine(scale=(0.05, 0.1), mode='edge', cval=(0)),
]),
sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)),
# More as you want ...
], random_order=True)
datagen = ImageDataGenerator(preprocessing_function=seq.augment_image)
There are some advanced data augmentation practices such as cutout, random-erasing and mixup. They are easy to implement in Keras. For mixup, the example is below:
training_generator = MixupGenerator(trainX, trainY, batch_size=8, alpha=0.2, datagen=datagen)()
x, y = next(training_generator)
# To visualize the batch images
for i in range(9):
plt.subplot(330+1+i)
# batch = it.next()
img = x[i]
plt.imshow(img.reshape(224, 224, 3))
plt.savefig("mixup_batch.png")
H = model.fit_generator(
# datagen.flow(trainX, trainY, batch_size=args.batch_size),
training_generator,
steps_per_epoch=len(trainX) // args.batch_size,
validation_data=(valX, valY),
validation_steps=len(valX) // args.batch_size,
epochs=args.epochs,
# workers=4,
callbacks=[model_checkpoint, lr_reducer, stopping, lr_schedule],
)
The problem I faced in this though is that for random erasing, we need to put that in ImageDataGenerator preprocessing_function and we have already put the imgaug augmentation in that. The possible alternative is to use two data generators maybe.

Related

fit keras.model from generator_function

TF 2.x - just for the experience I tried with a simple experimental dataset - to show the problem:
import numpy as np
import tensorflow as tf
import keras
from tensorflow.keras.callbacks import LambdaCallback
import tensorflow_datasets as tfds
data, info = tfds.load('iris', split='train[:80%]',
as_supervised=True, with_info=True)
print(info)
features, labels = tuple(zip(*data))
# NB: the generator should yield a dictionary for the inputs, and the output as is.
def gen(x_train, y_train):
print('generator initiated')
(x_train, y_train)= tfds.load('iris', shuffle_files=True, as_supervised=True, with_info=True)
idx = 0
while True:
yield tf.transpose([x_train[:32], tf.one_hot(y_train[:32])])
print('generator yielded a batch %d' % idx)
idx += 1
train_ds = tf.data.Dataset.from_generator(gen, args=(features, labels),
output_types=(tf.float32, tf.int32),
output_shapes=(tf.TensorShape([32,4]), tf.TensorShape([32,4 ])),
)
# OR
#output_signature=(
# tf.TensorSpec(shape=(4,), dtype=tf.float32),
# tf.TensorSpec(shape=(), dtype=tf.int32)),
#)
# datasetGen = iter(train_ds)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(512, activation='relu', input_shape=(32,4,))) # 4 fields
model.add(tf.keras.layers.Dense(4, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
train_ds= train_ds.batch(32).prefetch(32)
# callbacks=[LambdaCallback(on_epoch_end=generator.on_epoch_end)],
history= model.fit(train_ds, epochs = 7, verbose = 1)
print(history.history['accuracy'])
& am getting :
In ln: yield tf.transpose([x_train[:32], tf.one_hot(y_train[:32])])
TypeError: unhashable type: 'slice'
problem seems to be here - x_train[:32] ?
Q ?? how to make corrections to the code (either to the generator-func? or to the output_signature? or to the input_shape=? or somewhere else) to be able to use Dataset in model.fit() method ?
(sorry for dummy example, but I'd like to test generator-func use in model.fit())
well, it was really a dummy example of generator use; & moreover tf.data always win in speed compared with generator use. Nevertheless, such works (code also needs refactoring - e.g. or organizing pipelines for BigData - e.g.)
import tensorflow as tf
import numpy as np
import pandas as pd
# LOAD DATA
df= pd.read_csv('https://gist.githubusercontent.com/netj/8836201/raw/6f9306ad21398ea43cba4f7d537619d0e07d5ae3/iris.csv', dtype = 'float32', converters = {'variety' : str},
nrows=64, decimal='.')
# df.head()
_features=df.iloc[:,:4].copy()
_labels=df.iloc[:,-1:].copy()
_labels['variety1'] = pd.factorize(_labels['variety'])[0]
_target= _labels['variety1'].astype(np.int64).copy()
_targets= _target[:,np.newaxis]
#print(_features)
print(type(_targets))
# SPLIT for Train & Test
# https://www.kdnuggets.com/2020/07/getting-started-tensorflow2.html
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(_features,_targets, test_size=0.3)
# Typically, we normalize the data when we have a high amount of variance in it.
print(X_train.var())
print(X_test.var())
# Here we can see that both X_train and X_test have very low variance, so no need to normalize the data.
# PREPROCESSING
#
# to_categorical
y_train = tf.keras.utils.to_categorical(y_train)
y_test = tf.keras.utils.to_categorical(y_test)
print(y_train[:5,:])
# convert our data to numpy arrays
X_train = X_train.values
X_test = X_test.values
#################################################
#################################################
def gen(_features, _labels):
x_train= _features
y_train= _labels
#print('gen:\n', list(x_train))
#print('gen:\n', list(y_train))
idx = 0
while idx<64:
yield x_train[:32], y_train[:32]
print('generator yielded a batch %d' % idx)
idx += 1
#################################################
# train_ds <<<<<<<<<<<<<<<<<<<<<<<
train_ds = tf.data.Dataset.from_generator(gen, args=(X_train, y_train),
output_types=(tf.float32, tf.int64),
output_shapes=(tf.TensorShape([32,4]), tf.TensorShape([32, 2 ])),
)
# OR
#output_signature=(
# tf.TensorSpec(shape=(4,), dtype=tf.float32),
# tf.TensorSpec(shape=(), dtype=tf.int32)),
#)
# datasetGen = iter(train_ds)
# print('train_ds:\n',list(train_ds.as_numpy_iterator()))
#################################################
# Model
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense((512), activation='relu', input_shape=(32,4 ))) # 4 fields
model.add(tf.keras.layers.Dense((2), activation='softmax'))
# INSTEAD OF ONE-HOT CAN USE sparse_categorical_crossentropy HERE
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
train_ds= train_ds.batch(32).prefetch(32)
# callbacks=[LambdaCallback(on_epoch_end=generator.on_epoch_end)],
history= model.fit(train_ds, epochs = 7, verbose = 1)
validation_ ds from source X_test, y_test formed with tf.data.Dataset.from_tensor_slices() have problems with shape (4,) instead of model's input shape (32,4,) - but it is of the inappropriate generator's task at all from the very beginningg, I think... though with train_ds evaluate() & predict() methods works (though that is not the task of ML)
##############################################
score = model.evaluate(train_ds, batch_size=32, verbose=1) # test_ds needed
print("Test Accuracy:", score[1])
y_pred = model.predict(train_ds)
print('PREDICTIONS:\n', y_pred)
##############################################
#https://medium.com/#nutanbhogendrasharma/tensorflow-deep-learning-model-with-iris-dataset-8ec344c49f91
#Print actual and predicted value
features, labels = tuple(zip(*train_ds)) # If you need the numpy array version, convert them using np.array(): # https://stackoverflow.com/a/65499385/15893581
actual = np.argmax(labels,axis=-1)
predicted = np.argmax(y_pred,axis=-1)
print(f"Actual: {actual}")
print(f"Predicted: {predicted}")
So, incoming test_ds e.g. still needs to be adopted (though better to adopt gen_func here, I think), but overall idea of using generator in TF 2.x is clear now (only if will be used for huge data)...
P.S.
and advice to improve the model here
I apologize for this dummy question, as I'm still a novice in ML, but needed to connect somehow generator & training for the experience
Finally I generated iris_dataset from function (really, not quick operation)... some attention stll needed else to repeat-fn, but code-design in general works (for really random data)
# Importing the tensorflow library
import tensorflow as tf
import numpy as np
import keras
#FeaturesDict({
# 'features': Tensor(shape=(4,), dtype=tf.float32),
# 'label': ClassLabel(shape=(), dtype=tf.int64, num_classes=3),
#})
BATCH_SIZE= 12
EPOCHS = 7
QTY_BATCHES= 10 # to be generated
# The Dataset.from_generator constructor converts the python generator to a fully functional tf.data.Dataset.
def gen():
for i in range(BATCH_SIZE):
# should yield a pair Features - Label
data= np.expand_dims(np.random.sample(4) , axis=0)
label= [np.random.randint(3)]
yield data, label
train_ds = tf.data.Dataset.from_generator(gen,
(tf.float32, tf.int32),
(tf.TensorShape([None,4]),
tf.TensorShape([ 1])))
# Applying the Dataset.repeat() transformation with no arguments will repeat the input indefinitely.
# The Dataset.repeat transformation concatenates its arguments without signaling the end of one epoch and the beginning of the next epoch. Because of this a Dataset.batch applied after Dataset.repeat will yield batches that straddle epoch boundaries:
train_ds= train_ds.repeat(count= EPOCHS*BATCH_SIZE*QTY_BATCHES).batch(BATCH_SIZE, drop_remainder=True).prefetch(BATCH_SIZE)
NUM_CLASSES= 3
train_ds = train_ds.map(lambda x, y: (x, tf.one_hot(y, depth=NUM_CLASSES)))
for x, y in train_ds:
print(x)
print(y)
# Build a simple linear model
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(64, activation='relu', input_shape=(None,4))) # unknown(variable) batch_size, 4 fields
model.add(tf.keras.layers.Dense(3, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# steps_per_epoch = int( np.ceil(x_train.shape[0] / batch_size) )
# The Steps per epoch denote the number of batches to be selected for one epoch. If 500 steps are selected then the network will train for 500 batches to complete one epoch.
history= model.fit(train_ds, batch_size=BATCH_SIZE, epochs= EPOCHS, \
steps_per_epoch= (QTY_BATCHES*BATCH_SIZE)//BATCH_SIZE, \
verbose = 1)
print(history.history['accuracy'])
print(history.history['loss'])
# Keras - Plot training, validation and test set accuracy
# https://stackoverflow.com/questions/41908379/keras-plot-training-validation-and-test-set-accuracy
import keras
from matplotlib import pyplot as plt
plt.plot(history.history['accuracy'])
#plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
#plt.legend(['train', 'val'], loc='upper left')
plt.legend(['train'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
# plt.legend(['train', 'val'], loc='upper left')
plt.legend(['train'], loc='upper left')
plt.show()
ok, I'v got working case for the initial Dataset:
import numpy as np
import tensorflow as tf
import keras
from tensorflow.keras.callbacks import LambdaCallback
import tensorflow_datasets as tfds
data, info = tfds.load('iris', split='train[:100%]', batch_size=10, as_supervised=True, with_info=True)
print(info)
NUM_CLASSES= info.features["label"].num_classes
data = data.map(lambda x, y: (x, tf.one_hot(y, depth=NUM_CLASSES)))
features, labels = tuple(zip(*data))
print(features)
print(labels)
# NB: the generator should yield a dictionary for the inputs, and the output as is.
def gen(x_train, y_train):
print('generator initiated')
print(x_train.shape)
print(y_train.shape)
idx = 0
while True:
yield x_train, y_train
print('generator yielded a batch %d' % idx)
idx += 1
train_ds = tf.data.Dataset.from_generator(gen, args=(features, labels),
output_types=(tf.float32, tf.int32),
output_shapes=(tf.TensorShape([None,10,4]), tf.TensorShape([ None, 10, 3 ])),
)
# OR (better! because prev. is Deprecated)
#output_signature=(
# tf.TensorSpec(shape=(4,), dtype=tf.float32),
# tf.TensorSpec(shape=(), dtype=tf.int32)),
#)
#it = iter(train_ds)
#print(it.get_next())
for feature, label in train_ds:
print("shape of ds_generated: ", feature.shape,label.shape)
break
#num_val = len(train_ds) # TypeError: The dataset length is unknown. BECAUSE it is FLOW
#print(num_val)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(512, activation='relu', input_shape=(None,10,4))) # 4 fields
model.add(tf.keras.layers.Dense(3, activation='softmax'))
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
train_ds= train_ds.batch(32).prefetch(32)
# callbacks=[LambdaCallback(on_epoch_end=generator.on_epoch_end)],
history= model.fit(train_ds, epochs = 2, steps_per_epoch= 120 // 10, verbose = 1)
print(history.history['accuracy'])
one-hot encoding I've moved out of gen_func-scope
divided DS for features & labels
! gave correct input_shape to model (& appropriate shape changes in gen_func) - according [variable_rows_count_in_batch, batch_size, columns_features]
verbose = 1 for readable Debug in MT env.
advice from here
to define a variable batch size with None and setting the
steps_per_epoch
-- still not helps if taking split='train[:50%]' and steps_per_epoch= 60 // 10, -- as for unfully filled LAST batch -- the source of problem in my code IS in gen_func output_shapes -- that is clear here, because gen_func really was got dummy for testing purposes...
for real cases use Logical Output ! and appropriate Shapes
P.S.
though for 5 epochs I am getting:
Graph execution error: >> ZMQError: Too many open file
AttributeError: '_thread._local' object has no attribute 'event_pipe'
-- ! probably, NOT enough memory to finish training !... - decreasing output in Dense(512,..) HELPS (as well as decreasing number of epochs)

How to train Pytorch CNN with two or more inputs

I have a big image, multiple events in the image can impact the classification. I am thinking to split big image into small chunks and get features from each chunk and concatenate outputs together for prediction.
My code is like:
train_load_1 = DataLoader(dataset=train_dataset_1, batch_size=100, shuffle=False)
train_load_2 = DataLoader(dataset=train_dataset_2, batch_size=100, shuffle=False)
train_load_3 = DataLoader(dataset=train_dataset_3, batch_size=100, shuffle=False)
test_load_1 = DataLoader(dataset=test_dataset_1, batch_size=100, shuffle=True)
test_load_2 = DataLoader(dataset=test_dataset_2, batch_size=100, shuffle=True)
test_load_3 = DataLoader(dataset=test_dataset_3, batch_size=100, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Conv2d( ... ) # set up your layer here
self.fc1 = nn.Linear( ... ) # set up first FC layer
self.fc2 = nn.Linear( ... ) # set up the other FC layer
def forward(self, x1, x2, x3):
o1 = self.conv(x1)
o2 = self.conv(x2)
o3 = self.conv(x3)
combined = torch.cat((o1.view(c.size(0), -1),
o2.view(c.size(0), -1),
o3.view(c.size(0), -1)), dim=1)
out = self.fc1(combined)
out = self.fc2(out)
return F.softmax(x, dim=1)
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01)
for epoch in epochs:
model.train()
for batch_idx, (inputs, labels) in enumerate(train_loader_1):
**### I am stuck here, how to enumerate all three train_loader to pass input_1, input_2, input_3 into model and share the same label? Please note in train_loader I have set shuffle=False, this is to make sure train_loader_1, train_loader_2, train_loader_3 are getting the same label **
Thank you for your help!
Instead of using 3 separate dataLoader elements, you can use a single dataLoader element where each of the datapoint contains 3 separate parts of the image.
Like this:
dataLoader = [[[img1_part1],[img1_part2],[img1_part3], label1], [[img2_part1],[img2_part2],[img2_part3], label2]....]
This way you can use that in training loop as:
for img in dataLoader:
part1,part2,part3,label = img
out = model.forward(part1,part2,part3)
loss = loss_fn(out, label)
loss.backward()
optimizer.step()
For having the image parts in that format:
You can loop over the images and append them to a list or a numpy array.
def make_parts(full_image):
# some code
# returns a list of image parts after converting them into torch tensors
return [TorchTensor_of_part1, TorchTensor_of_part2, TorchTensor_of_part3]
list_of_parts_and_labels = []
for image,label in zip(full_img_data, labels):
parts = make_parts(image)
list_of_parts_and_labels.append([parts, torch.tensor(label)])
If you wanna load your images into dataLoader, assuming that you already have your image parts and labels in the above mentioned format:
train_loader = torch.utils.data.DataLoader(list_of_parts_and_labels,
shuffle = True, batch_size = BATCH_SIZE)
then use it as,
for data in train_loader:
parts, label = data
out = model.forward(*parts)
loss = loss_fn(out, label)

Pytorch: Add input normalization to model (division layer)

I want to add the image normalization to an existing pytorch model, so that I don't have to normalize the input image anymore.
Say I have an existing model
model = torch.hub.load('pytorch/vision:v0.6.0', 'mobilenet_v2', pretrained=True)
model.eval()
Now I can add new layers (for example a relu) using torch.nn.Sequential:
new_model = nn.Sequential(
model,
nn.ReLU()
)
However I couldn't find a layer to do perform just a division or subtraction as needed for the input normalization here shown in numpy:
import cv2
import numpy as np
img = cv2.imread("my_img.jpg")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(np.float32)
mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
img = img / 255.0
img = img - mean
img = img / std
img = np.transpose(img, (2, 0, 1))
img = np.expand_dims(img, axis=0)
The goal is that normalization is eventually done on GPU to save time during inference. Also I cannot use torchvision transforms as those operation are not stored inside the model itself. For example, if I want to save the model to disk (in order to convert it to tflite using onnx) the torchvision transform operations will not be saved along with the model. Is there an elegant way of doing this?
(preferably without using a linear layer, which would fix my model input size, which should be flexible as my real model is fully convolutional)
Untested code which hopefully you can vet yourself.
import torch.nn as nn
cuda0 = torch.device('cuda:0')
class Normalize(nn.Module):
def __init__(self, mean, std):
super(Normlize, self).__init__()
self.mean = torch.tensor(mean, device=cuda0)
self.std = torch.tensor(std, device=cuda0)
def forward(self, input):
x = input / 255.0
x = x - self.mean
x = x / self.std
return x
In your model you can do
new_model = nn.Sequential(
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
model,
nn.ReLU()
)
The right way of doing this in PyTorch is using dataset transformations. In your specific case, you need torchvision transforms. You can see an example here or here . Copying some part of the code here, for completeness
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)

can we train a model in tensorflow2.0 without using keras?

I am trying to write a simple ML code to classify the mnist dataset in tensorflow2.0. I didn't use Keras for now since I just want to use lower API to help me understand how tensorflow works. However, after I defined the cross entropy, It seems impossible to continue. All the tf2.0 optimizers are moved to keras and I don't know how to train a model without keras in tf2.0. Is there a way that we bypass keras in tf2.0?
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
print(train_images.shape)
print(len(train_labels))
print(train_images[1,:,:].shape)
# plt.figure()
# plt.imshow(train_images[0])
# plt.colorbar()
# plt.grid(False)
# plt.show()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
for i in range(1):
x = tf.constant(train_images[1,:,:].reshape(784), dtype = tf.float32)
x = tf.reshape(x, [1, 784])
print(tf.shape(x), tf.shape(W))
# define the model
y = tf.nn.softmax(tf.matmul(x, W) + b)
print(y)
# correct labels
y_ = np.zeros(10)
y_[train_labels[i]] = 1.0
y_ = tf.constant(y_, dtype = tf.float32)
y_ = tf.reshape(y_, [1, 10])
cross_entropy = -tf.reduce_sum(y_* tf.math.log(y))
print(cross_entropy)
I don't know how to continue from here.
Backpropagation-based training of models is totally possible in TensorFlow 2.x without using the keras API. The usage will be centered around the tf.GradientTape API and optimizers objects under the tf.optimizers namespace.
Your example can be modified as follows. Note that it's a simplistic code meant to illustrate the basic usage in a short code snippet. It's not to illustrate machine learning best practices in TF2.
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
#tf.function
def my_model(x):
# This is a hand-rolled logistic regressor.
y = tf.matmul(x, W) + b
return tf.nn.softmax(y)
#tf.function
def loss(x, y):
# This is a hand-rolled categorical cross-entropy loss.
diff = -(labels * tf.math.log(logits))
loss = tf.reduce_mean(diff)
return loss
optimizer = tf.optimizers.Adam(learning_rate=1e-3)
for i in xrange(num_steps):
# A single training step.
with tf.GradientTape() as tape:
# This is atypical, in that you would normally want to do this in
# mini-batches, instead of using all examples in x_train and y_train
# at once. But again, this is just a simple example.
loss_value = loss(x_train, y_train)
gradients = tape.gradient(loss_value, [W, b])
optimizer.apply_gradients(zip(gradients, [w, b]))

how to implement Grad-CAM on your own network?

I want to implement Grad-CAM on my own network, should I save my model and load it, then treat my saved model like VGG-16, then do similar operations?
I tried to search on the internet, and I found that all methods are based on famous models, not their owns.
So I wonder, maybe I just need to treat my own model as VGG-16, then do similar things.
Hi i have one solution in pytorch
import torch
import torch.nn as nn
from torch.utils import data
from torchvision import transforms
from torchvision import datasets
import matplotlib.pyplot as plt
import numpy as np
# use the ImageNet transformation
transform = transforms.Compose([transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
# define a 1 image dataset
dataset = datasets.ImageFolder(root='./data/Elephant/', transform=transform)
# define the dataloader to load that single image
dataloader = data.DataLoader(dataset=dataset, shuffle=False, batch_size=1)
vgg19 = Mymodel() ## create an object of your model
vgg19.load_state_dict(torch.load("your_vgg19_weights"))
class VGG(nn.Module):
def __init__(self):
super(VGG, self).__init__()
# get the pretrained VGG19 network
self.vgg = vgg19
# disect the network to access its last convolutional layer
self.features_conv = self.vgg.features[:36] # 36th layer was my last conv layer
# get the max pool of the features stem
self.max_pool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
# get the classifier of the vgg19
self.classifier = self.vgg.classifier
# placeholder for the gradients
self.gradients = None
# hook for the gradients of the activations
def activations_hook(self, grad):
self.gradients = grad
def forward(self, x):
x = self.features_conv(x)
# register the hook
h = x.register_hook(self.activations_hook)
# apply the remaining pooling
x = self.max_pool(x)
x = x.view((1, -1))
x = self.classifier(x)
return x
# method for the gradient extraction
def get_activations_gradient(self):
return self.gradients
# method for the activation exctraction
def get_activations(self, x):
return self.features_conv(x)
vgg = VGG()
# set the evaluation mode
vgg.eval()
# get the image from the dataloader
img, _ = next(iter(dataloader))
# get the most likely prediction of the model
pred_class = vgg(img).argmax(dim=1).numpy()[0]
pred = vgg(img)
pred[:, pred_class].backward()
# pull the gradients out of the model
gradients = vgg.get_activations_gradient()
# pool the gradients across the channels
pooled_gradients = torch.mean(gradients, dim=[0, 2, 3])
# get the activations of the last convolutional layer
activations = vgg.get_activations(img).detach()
# weight the channels by corresponding gradients
for i in range(512):
activations[:, i, :, :] *= pooled_gradients[i]
# average the channels of the activations
heatmap = torch.mean(activations, dim=1).squeeze()
# relu on top of the heatmap
# expression (2) in https://arxiv.org/pdf/1610.02391.pdf
heatmap = np.maximum(heatmap, 0)
# normalize the heatmap
heatmap /= torch.max(heatmap)
heatmap = heatmap.numpy()
import cv2
img = cv2.imread('./data/Elephant/data/05fig34.jpg')
heatmap = cv2.resize(heatmap, (img.shape[1], img.shape[0]))
heatmap = np.uint8(255 * heatmap)
heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
superimposed_img = heatmap * 0.4 + img
cv2.imwrite('./map.jpg', superimposed_img) ###saves gradcam visualization image

Resources