Incompatible shape problem with Keras ( segmentation model) for batch_size>1 - keras

I am trying to do semantic segmentation using Unet from segmentation model for multi channel (>3) image.
The code works if the batch_size =1. But if I change the batch_size to other values (e.g. 2) then error occurs (InvalidArgumentError: Incompatible shapes):
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-19-15dc3666afa8> in <module>
22 validation_steps = 1,
23 callbacks=build_callbacks(),
---> 24 verbose = 1)
25
~/.virtualenvs/sm/lib/python3.6/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name +
90 '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
~/.virtualenvs/sm/lib/python3.6/site-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
1424 use_multiprocessing=use_multiprocessing,
1425 shuffle=shuffle,
-> 1426 initial_epoch=initial_epoch)
1427
1428 #interfaces.legacy_generator_methods_support
~/.virtualenvs/sm/lib/python3.6/site-packages/keras/engine/training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
189 outs = model.train_on_batch(x, y,
190 sample_weight=sample_weight,
--> 191 class_weight=class_weight)
192
193 if not isinstance(outs, list):
~/.virtualenvs/sm/lib/python3.6/site-packages/keras/engine/training.py in train_on_batch(self, x, y, sample_weight, class_weight)
1218 ins = x + y + sample_weights
1219 self._make_train_function()
-> 1220 outputs = self.train_function(ins)
1221 if len(outputs) == 1:
1222 return outputs[0]
~/.virtualenvs/sm/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
2659 return self._legacy_call(inputs)
2660
-> 2661 return self._call(inputs)
2662 else:
2663 if py_any(is_tensor(x) for x in inputs):
~/.virtualenvs/sm/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in _call(self, inputs)
2629 symbol_vals,
2630 session)
-> 2631 fetched = self._callable_fn(*array_vals)
2632 return fetched[:len(self.outputs)]
2633
~/.virtualenvs/sm/lib/python3.6/site-packages/tensorflow_core/python/client/session.py in __call__(self, *args, **kwargs)
1470 ret = tf_session.TF_SessionRunCallable(self._session._session,
1471 self._handle, args,
-> 1472 run_metadata_ptr)
1473 if run_metadata:
1474 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
InvalidArgumentError: Incompatible shapes: [2,256,256,1] vs. [2,256,256]
[[{{node loss_1/model_4_loss/mul}}]]
I tried to play around by following different posts in the forum but could not solve it. Here, is a portion of code which runs for batch_size=1.
batch_size = 1 # CHANGING ‘batch_size ‘ value other than 1 gives error
train_image_files = glob(patch_img + "/**/*.tif")
# simple_image_generator() is used to work with multi channel (>3) images (the function is
at the end)
train_image_generator = simple_image_generator(train_image_files,
batch_size=batch_size,
rotation_range=45,
horizontal_flip=True,
vertical_flip=True)
train_mask_files = glob(patch_ann + "/**/*.tif")
train_mask_generator = simple_image_generator(train_mask_files,
batch_size=batch_size)
test_image_files = glob(test_img + "/**/*.tif")
test_image_generator = simple_image_generator(test_image_files,
batch_size=batch_size,
rotation_range=45,
horizontal_flip=True,
vertical_flip=True)
test_mask_files = glob(test_ann + "/**/*.tif")
test_mask_generator = simple_image_generator(test_mask_files,
batch_size=batch_size)
train_generator = (pair for pair in zip(train_image_generator, train_mask_generator))
test_generator = (pair for pair in zip(test_image_generator, test_mask_generator))
.
.
num_channels = 8 # no. of channel
base_model = sm.Unet(backbone_name='resnet34', encoder_weights='imagenet')
inp = Input(shape=( None, None, num_channels))
layer_1 = Conv2D( 3, (1, 1))(inp) # map N channels data to 3 channels
out = base_model(layer_1)
model = Model(inp, out, name=base_model.name)
model.summary()
model.compile(
optimizer = keras.optimizers.Adam(lr=learning_rate),
loss = sm.losses.bce_jaccard_loss,
metrics = ['accuracy',sm.metrics.iou_score]
)
model_history = model.fit_generator(train_generator,
epochs = 1,
steps_per_epoch = 1,
validation_data = test_generator,
validation_steps = 1,
callbacks = build_callbacks(),
verbose = 1)
Additional Information:
I am not using the default imageGenerator provided by keras. I am using ‘simple_image_generator’ (slightly modified)
def simple_image_generator(files, batch_size=32,
rotation_range=0, horizontal_flip=False,
vertical_flip=False):
while True:
# select batch_size number of samples without replacement
batch_files = sample(files, batch_size)
# array for images
batch_X = []
# loop over images of the current batch
for idx, input_path in enumerate(batch_files):
image = np.array(imread(input_path), dtype=float)
# process image
if horizontal_flip:
# randomly flip image up/down
if choice([True, False]):
image = np.flipud(image)
if vertical_flip:
# randomly flip image left/right
if choice([True, False]):
image = np.fliplr(image)
# rotate image by random angle between
# -rotation_range <= angle < rotation_range
if rotation_range is not 0:
angle = np.random.uniform(low=-abs(rotation_range),
high=abs(rotation_range))
image = rotate(image, angle, mode='reflect',
order=1, preserve_range=True)
# put all together
batch_X += [image]
# convert lists to np.array
X = np.array(batch_X)
yield(X)

This error was solved by redefining a new image generator instead of simple_image_generator(). The simple_image_generator() worked well with the shape of the images (8 Bands) but did not cope well with the shape of the mask (1 band ).
During the execution, image_generator had 4 dimensions with [2,256,256,1] ( i.e. batch_size, (image size), bands) BUT mask_generator had 3 dimensions only vs. [2,256,256] (i.e. batch_size,(image size))
So reshaping the mask of [2,256,256] to [2,256,256, 1] solved the issue.

Related

Input to reshape is a tensor with 'batch_size' values, but the requested shape requires a multiple of 'n_features'

I'm trying to make own attention model and I found example code in here:
https://www.kaggle.com/takuok/bidirectional-lstm-and-attention-lb-0-043
and it works just fine when I run it without modification.
But my own data contain only numeric values, I had to change example code.
so I erase embedding part in example code and plus, this is what I fixed.
xtr = np.reshape(xtr, (xtr.shape[0], 1, xtr.shape[1]))
# xtr.shape() = (n_sample_train, 1, 150), y.shape() = (n_sample_train, 6)
xte = np.reshape(xte, (xte.shape[0], 1, xte.shape[1]))
# xtr.shape() = (n_sample_test, 1, 150)
model = BidLstm(maxlen, max_features)
model.compile(loss='binary_crossentropy', optimizer='adam',
metrics=['accuracy'])
and my BidLstm func looks like,
def BidLstm(maxlen, max_features):
inp = Input(shape=(1,150))
#x = Embedding(max_features, embed_size, weights=[embedding_matrix], trainable=False)(inp) -> I don't need embedding since my own data is numeric.
x = Bidirectional(LSTM(300, return_sequences=True, dropout=0.25,
recurrent_dropout=0.25))(inp)
x = Attention(maxlen)(x)
x = Dense(256, activation="relu")(x)
x = Dropout(0.25)(x)
x = Dense(6, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
return model
and it said,
InvalidArgumentErrorTraceback (most recent call last)
<ipython-input-62-929955370368> in <module>
29
30 early = EarlyStopping(monitor="val_loss", mode="min", patience=1)
---> 31 model.fit(xtr, y, batch_size=128, epochs=15, validation_split=0.1, callbacks=[early])
32 #model.fit(xtr, y, batch_size=256, epochs=1, validation_split=0.1)
33
/usr/local/lib/python3.5/dist-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
1037 initial_epoch=initial_epoch,
1038 steps_per_epoch=steps_per_epoch,
-> 1039 validation_steps=validation_steps)
1040
1041 def evaluate(self, x=None, y=None,
/usr/local/lib/python3.5/dist-packages/keras/engine/training_arrays.py in fit_loop(model, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)
197 ins_batch[i] = ins_batch[i].toarray()
198
--> 199 outs = f(ins_batch)
200 outs = to_list(outs)
201 for l, o in zip(out_labels, outs):
/usr/local/lib/python3.5/dist-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
2713 return self._legacy_call(inputs)
2714
-> 2715 return self._call(inputs)
2716 else:
2717 if py_any(is_tensor(x) for x in inputs):
/usr/local/lib/python3.5/dist-packages/keras/backend/tensorflow_backend.py in _call(self, inputs)
2673 fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata)
2674 else:
-> 2675 fetched = self._callable_fn(*array_vals)
2676 return fetched[:len(self.outputs)]
2677
/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py in __call__(self, *args, **kwargs)
1437 ret = tf_session.TF_SessionRunCallable(
1438 self._session._session, self._handle, args, status,
-> 1439 run_metadata_ptr)
1440 if run_metadata:
1441 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg)
526 None, None,
527 compat.as_text(c_api.TF_Message(self.status.status)),
--> 528 c_api.TF_GetCode(self.status.status))
529 # Delete the underlying status object from memory otherwise it stays alive
530 # as there is a reference to status from this from the traceback due to
InvalidArgumentError: Input to reshape is a tensor with 128 values, but the requested shape requires a multiple of 150
[[{{node attention_16/Reshape_2}}]]
[[{{node loss_5/mul}}]]
I think something wrong in loss function saids in here:
Input to reshape is a tensor with 2 * "batch_size" values, but the requested shape has "batch_size"
but I don't know which part to fix it.
my keras and tensorflow versions are 2.2.4 and 1.13.0-rc0
please help. thanks.
Edit 1
I've change my batch size, like keras saids, multiple of 150(batch_size = 150). than it reports
Train on 143613 samples, validate on 15958 samples
Epoch 1/15
143400/143613 [============================>.] - ETA: 0s - loss: 0.1505 - acc: 0.9619
InvalidArgumentError: Input to reshape is a tensor with 63 values, but the requested shape requires a multiple of 150
[[{{node attention_18/Reshape_2}}]]
[[{{node metrics_6/acc/Mean_1}}]]
and details is same as before. what should I do?
Your input shape must be (150,1).
LSTM shapes are (batch, steps, features). It's pointless to use LSTMs with 1 step only. (Unless you are using custom training loops with stateful=True, which is not your case).

InvalidArgumentError when I use model.fit()

I am trying to build a custom layer in keras/tensorflow. The purpose would be to extend it as a RNN. The problem appears during training, where, apparently, there is an incompatibility with the output shape and the training dataset.
X_train.shape
(100, 5)
X_target.shape
(100, 5)
This is the custom layer:
class MyLayer(Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MyLayer, self).__init__(**kwargs)
def build(self, input_shape):
print(self.output_dim)
print(input_shape)
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=(self.output_dim, input_shape[1]),
initializer='uniform',
trainable=True)
super(MyLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
print('shape x: ')
print(x.shape)
print('shape kernel: ')
print(self.kernel.shape)
matrix = tf.transpose(self.kernel)
print('matrix')
print(matrix.shape)
prod = K.dot(x, matrix)
print('after product')
print(prod.shape)
return prod
def compute_output_shape(self, input_shape):
print('Compute output shape')
print(input_shape)
print(self.output_dim)
return (input_shape[0], self.output_dim)
model = Sequential()
model.add(MyLayer(5, batch_input_shape=(100, 5)))
model.compile(optimizer='adam', loss='mse')
# fit model
model.fit(X_train, X_target)
tf.keras.__version__ = '2.1.6-tf'
Since I am passing a x_training with a shape of (100,5), I would expect that, by multiplying it by a matrix of (5,5), I would obtain a matrix of (100, 5), which is the same shape as the target and thus, I would be able to train, in this case, a 5x5 weight matrix. Instead, I get this:
InvalidArgumentError Traceback (most recent call last)
<ipython-input-77-4dee23ead957> in <module>()
6 model.compile(optimizer='adam', loss='mse')
7 # fit model
----> 8 model.fit(X_train, X_target)#, epochs=300, verbose=0)
~/anaconda3/envs/ldsa/lib/python3.5/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
1037 initial_epoch=initial_epoch,
1038 steps_per_epoch=steps_per_epoch,
-> 1039 validation_steps=validation_steps)
1040
1041 def evaluate(self, x=None, y=None,
~/anaconda3/envs/ldsa/lib/python3.5/site-packages/keras/engine/training_arrays.py in fit_loop(model, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch, steps_per_epoch, validation_steps)
197 ins_batch[i] = ins_batch[i].toarray()
198
--> 199 outs = f(ins_batch)
200 outs = to_list(outs)
201 for l, o in zip(out_labels, outs):
~/anaconda3/envs/ldsa/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
2713 return self._legacy_call(inputs)
2714
-> 2715 return self._call(inputs)
2716 else:
2717 if py_any(is_tensor(x) for x in inputs):
~/anaconda3/envs/ldsa/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py in _call(self, inputs)
2673 fetched = self._callable_fn(*array_vals, run_metadata=self.run_metadata)
2674 else:
-> 2675 fetched = self._callable_fn(*array_vals)
2676 return fetched[:len(self.outputs)]
2677
~/anaconda3/envs/ldsa/lib/python3.5/site-packages/tensorflow/python/client/session.py in __call__(self, *args, **kwargs)
1380 ret = tf_session.TF_SessionRunCallable(
1381 self._session._session, self._handle, args, status,
-> 1382 run_metadata_ptr)
1383 if run_metadata:
1384 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
~/anaconda3/envs/ldsa/lib/python3.5/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self, type_arg, value_arg, traceback_arg)
517 None, None,
518 compat.as_text(c_api.TF_Message(self.status.status)),
--> 519 c_api.TF_GetCode(self.status.status))
520 # Delete the underlying status object from memory otherwise it stays alive
521 # as there is a reference to status from this from the traceback due to
InvalidArgumentError: Incompatible shapes: [100,5] vs. [32,5]
[[Node: training_12/Adam/gradients/loss_17/my_layer_19_loss/sub_grad/BroadcastGradientArgs = BroadcastGradientArgs[T=DT_INT32, _class=["loc:#training_12/Adam/gradients/loss_17/my_layer_19_loss/sub_grad/Reshape"], _device="/job:localhost/replica:0/task:0/device:CPU:0"](training_12/Adam/gradients/loss_17/my_layer_19_loss/sub_grad/Shape, training_12/Adam/gradients/loss_17/my_layer_19_loss/sub_grad/Shape_1)]]
My surprise comes from this: Incompatible shapes: [100,5] vs. [32,5], where does this 32 come from?

How can creat a path to my data for my CNN in jupyter notebook

Intro and setup
So I have been for some time now trying to make a simple Convolution Neural Network. I followed a simple tutorial, which can be found Here's a link!
It is a simple cat vs dog test (2 categories)
I have set my jupyter/tensorflow/keras up in
C:\Users\labadmin
What i have understood is that i just have to put the path from labadmin in order to implement my data for testing and training.
Since i am not sure what is causing the error i have pasted the whole code and error, i think it is about the system not getting the data.
The folder with the Data set-up as following:
labadmin has a folder called data withing that there are two folders
training
test
Both cat images and dog images are shuffled in both folders. There are 10000+ pictures in each folder, so there should be enough,
This is my code:
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
classifier = Sequential()
classifier.add(Convolution2D(32, 3, 3, input_shape = (64, 64, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2,2)))
classifier.add(Flatten())
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics=['accuracy'])
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'data\\training',
target_size=(64, 64),
batch_size=32,
class_mode='categorical',
shuffle=False)
test_set = test_datagen.flow_from_directory(
'data\\test',
target_size=(64, 64),
batch_size=32,
class_mode='categorical',
shuffle=False)
from IPython.display import display
from PIL import Image
classifier.fit_generator(
training_set,
steps_per_epoch=8000,
epochs=10,
validation_data = test_set,
validation_steps = 800)
import numpy as np
from keras_preprocessing import image
test_image = image.load_img('data\\random.jpg', target_size=(64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0]>= 0.5:
prediction = 'dog'
else:
prediction = 'cat'
print(prediction)
I get the following error:
C:\Users\labadmin\Miniconda3\envs\tensorflow\lib\site-packages\ipykernel_launcher.py:26: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(32, (3, 3), input_shape=(64, 64, 3..., activation="relu")`
C:\Users\labadmin\Miniconda3\envs\tensorflow\lib\site-packages\ipykernel_launcher.py:35: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation="relu", units=128)`
C:\Users\labadmin\Miniconda3\envs\tensorflow\lib\site-packages\ipykernel_launcher.py:36: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation="sigmoid", units=1)`
Found 0 images belonging to 0 classes.
Found 0 images belonging to 0 classes.
Epoch 1/10
---------------------------------------------------------------------------
ZeroDivisionError Traceback (most recent call last)
<ipython-input-5-393aaba195e9> in <module>
82 epochs=10,
83 validation_data = test_set,
---> 84 validation_steps = 800)
85
86 # Our image we now send through to test
~\Miniconda3\envs\tensorflow\lib\site-packages\keras\legacy\interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name + '` call to the ' +
90 'Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
~\Miniconda3\envs\tensorflow\lib\site-packages\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
1416 use_multiprocessing=use_multiprocessing,
1417 shuffle=shuffle,
-> 1418 initial_epoch=initial_epoch)
1419
1420 #interfaces.legacy_generator_methods_support
~\Miniconda3\envs\tensorflow\lib\site-packages\keras\engine\training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
179 batch_index = 0
180 while steps_done < steps_per_epoch:
--> 181 generator_output = next(output_generator)
182
183 if not hasattr(generator_output, '__len__'):
~\Miniconda3\envs\tensorflow\lib\site-packages\keras\utils\data_utils.py in get(self)
707 "`use_multiprocessing=False, workers > 1`."
708 "For more information see issue #1638.")
--> 709 six.reraise(*sys.exc_info())
~\Miniconda3\envs\tensorflow\lib\site-packages\six.py in reraise(tp, value, tb)
691 if value.__traceback__ is not tb:
692 raise value.with_traceback(tb)
--> 693 raise value
694 finally:
695 value = None
~\Miniconda3\envs\tensorflow\lib\site-packages\keras\utils\data_utils.py in get(self)
683 try:
684 while self.is_running():
--> 685 inputs = self.queue.get(block=True).get()
686 self.queue.task_done()
687 if inputs is not None:
~\Miniconda3\envs\tensorflow\lib\multiprocessing\pool.py in get(self, timeout)
642 return self._value
643 else:
--> 644 raise self._value
645
646 def _set(self, i, obj):
~\Miniconda3\envs\tensorflow\lib\multiprocessing\pool.py in worker(inqueue, outqueue, initializer, initargs, maxtasks, wrap_exception)
117 job, i, func, args, kwds = task
118 try:
--> 119 result = (True, func(*args, **kwds))
120 except Exception as e:
121 if wrap_exception and func is not _helper_reraises_exception:
~\Miniconda3\envs\tensorflow\lib\site-packages\keras\utils\data_utils.py in next_sample(uid)
624 The next value of generator `uid`.
625 """
--> 626 return six.next(_SHARED_SEQUENCES[uid])
627
628
~\Miniconda3\envs\tensorflow\lib\site-packages\keras_preprocessing\image\iterator.py in __next__(self, *args, **kwargs)
98
99 def __next__(self, *args, **kwargs):
--> 100 return self.next(*args, **kwargs)
101
102 def next(self):
~\Miniconda3\envs\tensorflow\lib\site-packages\keras_preprocessing\image\iterator.py in next(self)
107 """
108 with self.lock:
--> 109 index_array = next(self.index_generator)
110 # The transformation of images is not under thread lock
111 # so it can be done in parallel
~\Miniconda3\envs\tensorflow\lib\site-packages\keras_preprocessing\image\iterator.py in _flow_index(self)
83 self._set_index_array()
84
---> 85 current_index = (self.batch_index * self.batch_size) % self.n
86 if self.n > current_index + self.batch_size:
87 self.batch_index += 1
ZeroDivisionError: integer division or modulo by zero
Thank you for your time.
Did you populate your data\\training and data\\test directories? From the output:
Found 0 images belonging to 0 classes.
Found 0 images belonging to 0 classes.
Epoch 1/10
it appears that your data augmentation generator did not find any images and the resulting dataset is empty; consequently, when Keras tries to run the fit_generator, you get the division by 0 error as it tries to iterate through your null image set.

fitting a simple image generator in a keras model

I have a keras model that takes an input image and a label value.
I have a data generator that reads the image, processes it and feeds it into the net
from PIL import Image
def my_iterator():
i = 0
while True:
img_name = train_df.loc[i,'Image']
img_label = train_df.loc[i,'Id']
img = Image.open('master_train/'+str(img_name)).convert('L')
print(img.mode)
longer_side = max(img.size)
horizontal_padding = (longer_side - img.size[0]) / 2
vertical_padding = (longer_side - img.size[1]) / 2
img = img.crop((-horizontal_padding,-vertical_padding,img.size[0] + horizontal_padding,img.size[1] + vertical_padding))
img.thumbnail((128,128),Image.ANTIALIAS)
img_array = np.asarray(img,dtype='uint8')
img_array = img_array[:,:,np.newaxis]
print(img_array.ndim)
yield img_array,img_label
i = (i+1) % len(train_df)
from keras.models import Model
from keras.layers import Input,Dense
input_layer = Input(shape=(128,128,1))
x = Dense(100,activation='relu')(input_layer)
output_layer = Dense(1,activation='sigmoid')(x)
model = Model(inputs=input_layer,outputs=output_layer)
model.compile(loss='binary_crossentropy',optimizer='nadam',metrics['accuracy'])
model.summary()
training_generator = my_iterator()
model.fit(training_generator,steps_per_epoch=1)
I get the following error
AttributeError Traceback (most recent call last)
<ipython-input-189-7efa0828e76d> in <module>()
----> 1 model.fit(train_gen,steps_per_epoch=1)
~/work/venvs/keras3/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
1628 sample_weight=sample_weight,
1629 class_weight=class_weight,
-> 1630 batch_size=batch_size)
1631 # Prepare validation data.
1632 do_validation = False
~/work/venvs/keras3/lib/python3.6/site-packages/keras/engine/training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)
1474 self._feed_input_shapes,
1475 check_batch_axis=False,
-> 1476 exception_prefix='input')
1477 y = _standardize_input_data(y, self._feed_output_names,
1478 output_shapes,
~/work/venvs/keras3/lib/python3.6/site-packages/keras/engine/training.py in _standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
74 data = data.values if data.__class__.__name__ == 'DataFrame' else data
75 data = [data]
---> 76 data = [np.expand_dims(x, 1) if x is not None and x.ndim == 1 else x for x in data]
77
78 if len(data) != len(names):
~/work/venvs/keras3/lib/python3.6/site-packages/keras/engine/training.py in <listcomp>(.0)
74 data = data.values if data.__class__.__name__ == 'DataFrame' else data
75 data = [data]
---> 76 data = [np.expand_dims(x, 1) if x is not None and x.ndim == 1 else x for x in data]
77
78 if len(data) != len(names):
AttributeError: 'generator' object has no attribute 'ndim'
​
You should be using fit_generator to train a model using a generator, not the plain fit function.

ValueError: setting an array element with a sequence in keras using nltk ngram

from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.layers.recurrent import SimpleRNN
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import LabelEncoder
import numpy as np
text = open('eng.train').read().split()
words = []
tags_1 = []
tags_2 = []
for i in range(len(text)):
if i % 4 == 0:
words.append(text[i])
if i % 4 == 1:
tags_1.append(text[i])
if i % 4 == 3:
tags_2.append(text[i])
hashing_vectorizer = HashingVectorizer(decode_error = 'ignore', n_features = 2 **15)
X_v = hashing_vectorizer.fit_transform(words)
label_encoder = LabelEncoder()
y1 = label_encoder.fit_transform(tags_1)
y2 = label_encoder.fit_transform(tags_2)
y1 = np_utils.to_categorical(y1)
y2 = np_utils.to_categorical(y2)
import nltk
trigram_X = list(nltk.trigrams(X_v))
#trigram_X = list(trigram_X)
print(len(trigram_X))
X = numpy.array(trigram_X)
print(X.shape)
y = numpy.reshape(y1, (204567, 1, 46))
trigram_tags = list(nltk.trigrams(y))
#trigram_y = list(trigram_tags)
print (len(trigram_y))
target = numpy.array(trigram_y)
y = numpy.reshape(target, (204565, 3, 46))
X = numpy.reshape(X, (204565, 3, 1))
X_final = numpy.dstack((X, y))
print(X_final.shape)
X_input = X_final[: -1, :, :]
print(X_input.shape)
y_final = label_encoder.fit_transform(tags_1)
y_target = np_utils.to_categorical(y_final[3:])
print(y_target.shape)
from keras.layers import Dense
from keras.models import Sequential
from keras.layers.recurrent import SimpleRNN
Feature hashig is used here. Now the problem requires to give the generated hashed vector with a corresponding one hot encoded vector as the input. But the keras program is throwing the following error:
model = Sequential()
model.add(SimpleRNN(100,input_shape = (X_input.shape[1], X_input.shape[2])))
model.add(Dense(y_target.shape[1], activation = 'softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
model.fit(X_input, y_target, epochs = 20, batch_size = 200)
ValueError: setting an array element with a sequence.
Please explain the reason for the error and a possible solution
edit 1
I have attached the full error stack as below
ValueError Traceback (most recent call last)
<ipython-input-3-4d9a4c1d9885> in <module>()
62 model.add(Dense(y_target.shape[1], activation = 'softmax'))
63 model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
---> 64 model.fit(X_input, y_target, epochs = 20, batch_size = 200)
65
/home/aditya/anaconda3/lib/python3.6/site-packages/keras/models.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
843 class_weight=class_weight,
844 sample_weight=sample_weight,
--> 845 initial_epoch=initial_epoch)
846
847 def evaluate(self, x, y, batch_size=32, verbose=1,
/home/aditya/anaconda3/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
1483 val_f=val_f, val_ins=val_ins, shuffle=shuffle,
1484 callback_metrics=callback_metrics,
-> 1485 initial_epoch=initial_epoch)
1486
1487 def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
/home/aditya/anaconda3/lib/python3.6/site-packages/keras/engine/training.py in _fit_loop(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch)
1138 batch_logs['size'] = len(batch_ids)
1139 callbacks.on_batch_begin(batch_index, batch_logs)
-> 1140 outs = f(ins_batch)
1141 if not isinstance(outs, list):
1142 outs = [outs]
/home/aditya/anaconda3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
2071 session = get_session()
2072 updated = session.run(self.outputs + [self.updates_op],
-> 2073 feed_dict=feed_dict)
2074 return updated[:len(self.outputs)]
2075
/home/aditya/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
787 try:
788 result = self._run(None, fetches, feed_dict, options_ptr,
--> 789 run_metadata_ptr)
790 if run_metadata:
791 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/home/aditya/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
966 feed_handles[subfeed_name] = subfeed_val
967 else:
--> 968 np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
969
970 if (not is_tensor_handle_feed and
/home/aditya/anaconda3/lib/python3.6/site-packages/numpy/core/numeric.py in asarray(a, dtype, order)
529
530 """
--> 531 return array(a, dtype, copy=False, order=order)
532
533
ValueError: setting an array element with a sequence

Resources