fitting a simple image generator in a keras model - keras

I have a keras model that takes an input image and a label value.
I have a data generator that reads the image, processes it and feeds it into the net
from PIL import Image
def my_iterator():
i = 0
while True:
img_name = train_df.loc[i,'Image']
img_label = train_df.loc[i,'Id']
img = Image.open('master_train/'+str(img_name)).convert('L')
print(img.mode)
longer_side = max(img.size)
horizontal_padding = (longer_side - img.size[0]) / 2
vertical_padding = (longer_side - img.size[1]) / 2
img = img.crop((-horizontal_padding,-vertical_padding,img.size[0] + horizontal_padding,img.size[1] + vertical_padding))
img.thumbnail((128,128),Image.ANTIALIAS)
img_array = np.asarray(img,dtype='uint8')
img_array = img_array[:,:,np.newaxis]
print(img_array.ndim)
yield img_array,img_label
i = (i+1) % len(train_df)
from keras.models import Model
from keras.layers import Input,Dense
input_layer = Input(shape=(128,128,1))
x = Dense(100,activation='relu')(input_layer)
output_layer = Dense(1,activation='sigmoid')(x)
model = Model(inputs=input_layer,outputs=output_layer)
model.compile(loss='binary_crossentropy',optimizer='nadam',metrics['accuracy'])
model.summary()
training_generator = my_iterator()
model.fit(training_generator,steps_per_epoch=1)
I get the following error
AttributeError Traceback (most recent call last)
<ipython-input-189-7efa0828e76d> in <module>()
----> 1 model.fit(train_gen,steps_per_epoch=1)
~/work/venvs/keras3/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
1628 sample_weight=sample_weight,
1629 class_weight=class_weight,
-> 1630 batch_size=batch_size)
1631 # Prepare validation data.
1632 do_validation = False
~/work/venvs/keras3/lib/python3.6/site-packages/keras/engine/training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)
1474 self._feed_input_shapes,
1475 check_batch_axis=False,
-> 1476 exception_prefix='input')
1477 y = _standardize_input_data(y, self._feed_output_names,
1478 output_shapes,
~/work/venvs/keras3/lib/python3.6/site-packages/keras/engine/training.py in _standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
74 data = data.values if data.__class__.__name__ == 'DataFrame' else data
75 data = [data]
---> 76 data = [np.expand_dims(x, 1) if x is not None and x.ndim == 1 else x for x in data]
77
78 if len(data) != len(names):
~/work/venvs/keras3/lib/python3.6/site-packages/keras/engine/training.py in <listcomp>(.0)
74 data = data.values if data.__class__.__name__ == 'DataFrame' else data
75 data = [data]
---> 76 data = [np.expand_dims(x, 1) if x is not None and x.ndim == 1 else x for x in data]
77
78 if len(data) != len(names):
AttributeError: 'generator' object has no attribute 'ndim'
​

You should be using fit_generator to train a model using a generator, not the plain fit function.

Related

Incompatible shape problem with Keras ( segmentation model) for batch_size>1

I am trying to do semantic segmentation using Unet from segmentation model for multi channel (>3) image.
The code works if the batch_size =1. But if I change the batch_size to other values (e.g. 2) then error occurs (InvalidArgumentError: Incompatible shapes):
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-19-15dc3666afa8> in <module>
22 validation_steps = 1,
23 callbacks=build_callbacks(),
---> 24 verbose = 1)
25
~/.virtualenvs/sm/lib/python3.6/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name +
90 '` call to the Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
~/.virtualenvs/sm/lib/python3.6/site-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
1424 use_multiprocessing=use_multiprocessing,
1425 shuffle=shuffle,
-> 1426 initial_epoch=initial_epoch)
1427
1428 #interfaces.legacy_generator_methods_support
~/.virtualenvs/sm/lib/python3.6/site-packages/keras/engine/training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
189 outs = model.train_on_batch(x, y,
190 sample_weight=sample_weight,
--> 191 class_weight=class_weight)
192
193 if not isinstance(outs, list):
~/.virtualenvs/sm/lib/python3.6/site-packages/keras/engine/training.py in train_on_batch(self, x, y, sample_weight, class_weight)
1218 ins = x + y + sample_weights
1219 self._make_train_function()
-> 1220 outputs = self.train_function(ins)
1221 if len(outputs) == 1:
1222 return outputs[0]
~/.virtualenvs/sm/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
2659 return self._legacy_call(inputs)
2660
-> 2661 return self._call(inputs)
2662 else:
2663 if py_any(is_tensor(x) for x in inputs):
~/.virtualenvs/sm/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in _call(self, inputs)
2629 symbol_vals,
2630 session)
-> 2631 fetched = self._callable_fn(*array_vals)
2632 return fetched[:len(self.outputs)]
2633
~/.virtualenvs/sm/lib/python3.6/site-packages/tensorflow_core/python/client/session.py in __call__(self, *args, **kwargs)
1470 ret = tf_session.TF_SessionRunCallable(self._session._session,
1471 self._handle, args,
-> 1472 run_metadata_ptr)
1473 if run_metadata:
1474 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
InvalidArgumentError: Incompatible shapes: [2,256,256,1] vs. [2,256,256]
[[{{node loss_1/model_4_loss/mul}}]]
I tried to play around by following different posts in the forum but could not solve it. Here, is a portion of code which runs for batch_size=1.
batch_size = 1 # CHANGING ‘batch_size ‘ value other than 1 gives error
train_image_files = glob(patch_img + "/**/*.tif")
# simple_image_generator() is used to work with multi channel (>3) images (the function is
at the end)
train_image_generator = simple_image_generator(train_image_files,
batch_size=batch_size,
rotation_range=45,
horizontal_flip=True,
vertical_flip=True)
train_mask_files = glob(patch_ann + "/**/*.tif")
train_mask_generator = simple_image_generator(train_mask_files,
batch_size=batch_size)
test_image_files = glob(test_img + "/**/*.tif")
test_image_generator = simple_image_generator(test_image_files,
batch_size=batch_size,
rotation_range=45,
horizontal_flip=True,
vertical_flip=True)
test_mask_files = glob(test_ann + "/**/*.tif")
test_mask_generator = simple_image_generator(test_mask_files,
batch_size=batch_size)
train_generator = (pair for pair in zip(train_image_generator, train_mask_generator))
test_generator = (pair for pair in zip(test_image_generator, test_mask_generator))
.
.
num_channels = 8 # no. of channel
base_model = sm.Unet(backbone_name='resnet34', encoder_weights='imagenet')
inp = Input(shape=( None, None, num_channels))
layer_1 = Conv2D( 3, (1, 1))(inp) # map N channels data to 3 channels
out = base_model(layer_1)
model = Model(inp, out, name=base_model.name)
model.summary()
model.compile(
optimizer = keras.optimizers.Adam(lr=learning_rate),
loss = sm.losses.bce_jaccard_loss,
metrics = ['accuracy',sm.metrics.iou_score]
)
model_history = model.fit_generator(train_generator,
epochs = 1,
steps_per_epoch = 1,
validation_data = test_generator,
validation_steps = 1,
callbacks = build_callbacks(),
verbose = 1)
Additional Information:
I am not using the default imageGenerator provided by keras. I am using ‘simple_image_generator’ (slightly modified)
def simple_image_generator(files, batch_size=32,
rotation_range=0, horizontal_flip=False,
vertical_flip=False):
while True:
# select batch_size number of samples without replacement
batch_files = sample(files, batch_size)
# array for images
batch_X = []
# loop over images of the current batch
for idx, input_path in enumerate(batch_files):
image = np.array(imread(input_path), dtype=float)
# process image
if horizontal_flip:
# randomly flip image up/down
if choice([True, False]):
image = np.flipud(image)
if vertical_flip:
# randomly flip image left/right
if choice([True, False]):
image = np.fliplr(image)
# rotate image by random angle between
# -rotation_range <= angle < rotation_range
if rotation_range is not 0:
angle = np.random.uniform(low=-abs(rotation_range),
high=abs(rotation_range))
image = rotate(image, angle, mode='reflect',
order=1, preserve_range=True)
# put all together
batch_X += [image]
# convert lists to np.array
X = np.array(batch_X)
yield(X)
This error was solved by redefining a new image generator instead of simple_image_generator(). The simple_image_generator() worked well with the shape of the images (8 Bands) but did not cope well with the shape of the mask (1 band ).
During the execution, image_generator had 4 dimensions with [2,256,256,1] ( i.e. batch_size, (image size), bands) BUT mask_generator had 3 dimensions only vs. [2,256,256] (i.e. batch_size,(image size))
So reshaping the mask of [2,256,256] to [2,256,256, 1] solved the issue.

AttributeError: 'generator' object has no attribute 'ndim'

I'm working on Triplets networks on Keras to find the image similarity. However, I'm getting an error when feeding triplets to the model. Request you to kindly help on this.
Basically I'm trying to feel the model with 3 inputs(anchor, positive, negative)
I'm working on Python3 and fitting the model with fit_model using Keras. This is my function to train the model:
def train(trainDB, testDB, n_iter, batch_size, evaluate_every, test_size,
loss_every):
print("Starting training process!")
print("-------------------------------------")
best = -1
t_start = time.time()
inputs=trainDB.getTripletTrainData(batch_size)
targets=np.ones([batch_size])
for i in range(0, n_iter):
loss=tripletNet.fit(inputs, targets)
#print("Loss: {0}".format(loss))
if i % evaluate_every == 0:
print("Time for {0} iterations: {1}".format(i, time.time()-t_start))
val_acc = self.test_oneshot(testDB, test_size)
if val_acc > best:
print("Current best: {0}, previous best: {1}".format(val_acc, best))
print("Saving weights to: {0} \n".format(weights_path))
self.tripletNet.save_weights(weights_path)
best=val_acc
if i % loss_every == 0:
print("iteration {}, training loss: {:.2f},".format(i,loss))
Error message:
Starting training process!
-------------------------------------
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-163-b5442e61de2d> in <module>()
----> 1 train(trainDatabase, testDatabase, n_iter, batch_size, evaluate_every, test_size, loss_every)
5 frames
<ipython-input-161-f417f0ebcfc7> in train(trainDB, testDB, n_iter, batch_size, evaluate_every, test_size, loss_every)
10
11 for i in range(0, n_iter):
---> 12 loss=tripletNet.fit(inputs, targets)
13
14 #print("Loss: {0}".format(loss))
/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
950 sample_weight=sample_weight,
951 class_weight=class_weight,
--> 952 batch_size=batch_size)
953 # Prepare validation data.
954 do_validation = False
/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)
749 feed_input_shapes,
750 check_batch_axis=False, # Don't enforce the batch size.
--> 751 exception_prefix='input')
752
753 if y is not None:
/usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
90 data = data.values if data.__class__.__name__ == 'DataFrame' else data
91 data = [data]
---> 92 data = [standardize_single_array(x) for x in data]
93
94 if len(data) != len(names):
/usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py in <listcomp>(.0)
90 data = data.values if data.__class__.__name__ == 'DataFrame' else data
91 data = [data]
---> 92 data = [standardize_single_array(x) for x in data]
93
94 if len(data) != len(names):
/usr/local/lib/python3.6/dist-packages/keras/engine/training_utils.py in standardize_single_array(x)
25 'Got tensor with shape: %s' % str(shape))
26 return x
---> 27 elif x.ndim == 1:
28 x = np.expand_dims(x, 1)
29 return x
**AttributeError: 'generator' object has no attribute 'ndim'**
If i use fit_generator.. Im getting the below error..
Starting training process!
-------------------------------------
Epoch 1/1
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-194-b5442e61de2d> in <module>()
----> 1 train(trainDatabase, testDatabase, n_iter, batch_size, evaluate_every, test_size, loss_every)
3 frames
<ipython-input-193-8ad964a9916f> in train(trainDB, testDB, n_iter, batch_size, evaluate_every, test_size, loss_every)
10
11 for i in range(0, n_iter):
---> 12 loss=tripletNet.fit_generator(inputs, targets)
13
14 #print("Loss: {0}".format(loss))
/usr/local/lib/python3.6/dist-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name + '` call to the ' +
90 'Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
/usr/local/lib/python3.6/dist-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
1416 use_multiprocessing=use_multiprocessing,
1417 shuffle=shuffle,
-> 1418 initial_epoch=initial_epoch)
1419
1420 #interfaces.legacy_generator_methods_support
/usr/local/lib/python3.6/dist-packages/keras/engine/training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
178 steps_done = 0
179 batch_index = 0
--> 180 while steps_done < steps_per_epoch:
181 generator_output = next(output_generator)
182
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()

FastAI v1 PyTorch Custom Model

i have been trying to use fastai with a custom torch model. My code is as follow:
X_train = np.load(dirpath + 'X_train.npy')
X_valid = np.load(dirpath + 'X_valid.npy')
Y_train = np.load(dirpath + 'Y_train.npy')
Y_valid = np.load(dirpath + 'Y_valid.npy')
X_train's shape is : (240, 122, 96),
and Y_train's shape is : (240,1)
Then i convert these to torch tensors ,
# Converting data to torch tensors
def to_torch_data(x,np_type,tch_type):
return torch.from_numpy(x.astype(np_type)).to(tch_type)
X_train = to_torch_data(X_train,float,torch.float32)
X_valid = to_torch_data(X_valid,float,torch.float32)
Y_train = to_torch_data(Y_train,float,torch.float32)
Y_valid = to_torch_data(Y_valid,float,torch.float32)
Creating TensorDataSets for fastai DataBunch wrapper,
# Creating torch tensor datasets so that data can be used
# on ImageDataBunch function for fastai
train_ds = tdatautils.TensorDataset(X_train,Y_train)
valid_ds = tdatautils.TensorDataset(X_valid,Y_valid)
# Creating DataBunch object to be used as data in fastai methods.
batch_size = 24
my_data_bunch = DataBunch.create(train_ds,valid_ds,bs=batch_size)
And this is my custom torch model :
# Creating corresponding torch model
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self,droprate=0,activationF=None):
super(Net, self).__init__()
self.lstm_0 = nn.LSTM(96, 720)
self.activation_0 = nn.ELU()
self.dropout_0 = nn.Dropout(p=droprate)
self.lstm_1 = nn.LSTM(720,480)
self.activation_1 = nn.ELU()
self.batch_norm_1 = nn.BatchNorm1d(122)
self.fc_2 = nn.Linear(480,128)
self.dropout_2 = nn.Dropout(p=droprate)
self.last = nn.Linear(128,1)
self.last_act = nn.ReLU()
def forward(self, x):
out,hid1 = self.lstm_0(x)
out = self.dropout_0(self.activation_0(out))
out,hid2 = self.lstm_1(out)
out = out[:,-1,:]
out = self.batch_norm_1(self.activation_1(out))
out = self.dropout_2(self.fc_2(out))
out = self.last_act(self.last(out))
return out
#create instance of model
net = Net(droprate=train_droprate,activationF=train_activation) #.cuda()
print(net)
After all these, i run the learn for lr_find method. And i get this error :
Empty Traceback (most recent call last)
C:\Anaconda3\envs\fastai\lib\site-packages\torch\utils\data\dataloader.py in _try_get_batch(self, timeout)
510 try:
--> 511 data = self.data_queue.get(timeout=timeout)
512 return (True, data)
C:\Anaconda3\envs\fastai\lib\queue.py in get(self, block, timeout)
171 if remaining <= 0.0:
--> 172 raise Empty
173 self.not_empty.wait(remaining)
Empty:
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
<ipython-input-35-e4b7603c0a82> in <module>
----> 1 my_learner.lr_find()
~\Desktop\fastai\fastai\fastai\train.py in lr_find(learn, start_lr, end_lr, num_it, stop_div, wd)
30 cb = LRFinder(learn, start_lr, end_lr, num_it, stop_div)
31 epochs = int(np.ceil(num_it/len(learn.data.train_dl)))
---> 32 learn.fit(epochs, start_lr, callbacks=[cb], wd=wd)
33
34 def to_fp16(learn:Learner, loss_scale:float=None, max_noskip:int=1000, dynamic:bool=True, clip:float=None,
~\Desktop\fastai\fastai\fastai\basic_train.py in fit(self, epochs, lr, wd, callbacks)
197 callbacks = [cb(self) for cb in self.callback_fns + listify(defaults.extra_callback_fns)] + listify(callbacks)
198 if defaults.extra_callbacks is not None: callbacks += defaults.extra_callbacks
--> 199 fit(epochs, self, metrics=self.metrics, callbacks=self.callbacks+callbacks)
200
201 def create_opt(self, lr:Floats, wd:Floats=0.)->None:
~\Desktop\fastai\fastai\fastai\basic_train.py in fit(epochs, learn, callbacks, metrics)
97 cb_handler.set_dl(learn.data.train_dl)
98 cb_handler.on_epoch_begin()
---> 99 for xb,yb in progress_bar(learn.data.train_dl, parent=pbar):
100 xb, yb = cb_handler.on_batch_begin(xb, yb)
101 loss = loss_batch(learn.model, xb, yb, learn.loss_func, learn.opt, cb_handler)
C:\Anaconda3\envs\fastai\lib\site-packages\fastprogress\fastprogress.py in __iter__(self)
70 self.update(0)
71 try:
---> 72 for i,o in enumerate(self._gen):
73 if i >= self.total: break
74 yield o
~\Desktop\fastai\fastai\fastai\basic_data.py in __iter__(self)
73 def __iter__(self):
74 "Process and returns items from `DataLoader`."
---> 75 for b in self.dl: yield self.proc_batch(b)
76
77 #classmethod
C:\Anaconda3\envs\fastai\lib\site-packages\torch\utils\data\dataloader.py in __next__(self)
574 while True:
575 assert (not self.shutdown and self.batches_outstanding > 0)
--> 576 idx, batch = self._get_batch()
577 self.batches_outstanding -= 1
578 if idx != self.rcvd_idx:
C:\Anaconda3\envs\fastai\lib\site-packages\torch\utils\data\dataloader.py in _get_batch(self)
541 elif self.pin_memory:
542 while self.pin_memory_thread.is_alive():
--> 543 success, data = self._try_get_batch()
544 if success:
545 return data
C:\Anaconda3\envs\fastai\lib\site-packages\torch\utils\data\dataloader.py in _try_get_batch(self, timeout)
517 if not all(w.is_alive() for w in self.workers):
518 pids_str = ', '.join(str(w.pid) for w in self.workers if not w.is_alive())
--> 519 raise RuntimeError('DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str))
520 if isinstance(e, queue.Empty):
521 return (False, None)
RuntimeError: DataLoader worker (pid(s) 9584, 7236, 5108, 932, 13228, 13992, 4576, 13204) exited unexpectedly
I have researched about DataLoader but couldn't find anything useful.
Although I didn't understand the error message you posted, I see one problem in your code.
out = out[:,-1,:] # batch_size x 480
out = self.batch_norm_1(self.activation_1(out))
But you declared batch_norm_1 as:
self.batch_norm_1 = nn.BatchNorm1d(122)
Which should be:
self.batch_norm_1 = nn.BatchNorm1d(480)

sentiment classification using keras

I very new to deep learning classification. I have reviews data with the label(pos, neg) and I 'm trying to classify the data using keras. here is my code:
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
sentences_train, sentences_test, y_train, y_test =
train_test_split(review_list2, label_list, test_size=0.25, random_state=1000)
vectorizer = CountVectorizer()
vectorizer.fit(sentences_train)
X_train = vectorizer.transform(sentences_train)
X_test = vectorizer.transform(sentences_test)
#build the model
from keras.models import Sequential
from keras import layers
input_dim = X_train.shape[1]
model = Sequential()
model.add(layers.Dense(8, input_dim=input_dim, activation='relu'))
model.add(layers.Dense(2, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='adam', metrics=['accuracy'])
model.summary()
history = model.fit(X_train, y_train, epochs=100, verbose=False, validation_data=(X_test, y_test), batch_size=10)
I got an error:
AttributeError Traceback (most recent call last)
<ipython-input-52-34c39f53e335> in <module>
----> 1 history = model.fit(X_train, y_train, epochs=100, verbose=False, validation_data=(X_test, y_test), batch_size=10)
d:\py-ver35\lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
950 sample_weight=sample_weight,
951 class_weight=class_weight,
--> 952 batch_size=batch_size)
953 # Prepare validation data.
954 do_validation = False
d:\py-ver35\lib\site-packages\keras\engine\training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)
787 feed_output_shapes,
788 check_batch_axis=False, # Don't enforce the batch size.
--> 789 exception_prefix='target')
790
791 # Generate sample-wise weight values given the `sample_weight` and
d:\py-ver35\lib\site-packages\keras\engine\training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
90 data = data.values if data.__class__.__name__ == 'DataFrame' else data
91 data = [data]
---> 92 data = [standardize_single_array(x) for x in data]
93
94 if len(data) != len(names):
d:\py-ver35\lib\site-packages\keras\engine\training_utils.py in <listcomp>(.0)
90 data = data.values if data.__class__.__name__ == 'DataFrame' else data
91 data = [data]
---> 92 data = [standardize_single_array(x) for x in data]
93
94 if len(data) != len(names):
d:\py-ver35\lib\site-packages\keras\engine\training_utils.py in standardize_single_array(x)
25 'Got tensor with shape: %s' % str(shape))
26 return x
---> 27 elif x.ndim == 1:
28 x = np.expand_dims(x, 1)
29 return x
AttributeError: 'str' object has no attribute 'ndim'
I have tried every solution mention related to this error but still cannot fix.Any help? thanks in Advance

ValueError: setting an array element with a sequence in keras using nltk ngram

from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense
from keras.layers.recurrent import SimpleRNN
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import LabelEncoder
import numpy as np
text = open('eng.train').read().split()
words = []
tags_1 = []
tags_2 = []
for i in range(len(text)):
if i % 4 == 0:
words.append(text[i])
if i % 4 == 1:
tags_1.append(text[i])
if i % 4 == 3:
tags_2.append(text[i])
hashing_vectorizer = HashingVectorizer(decode_error = 'ignore', n_features = 2 **15)
X_v = hashing_vectorizer.fit_transform(words)
label_encoder = LabelEncoder()
y1 = label_encoder.fit_transform(tags_1)
y2 = label_encoder.fit_transform(tags_2)
y1 = np_utils.to_categorical(y1)
y2 = np_utils.to_categorical(y2)
import nltk
trigram_X = list(nltk.trigrams(X_v))
#trigram_X = list(trigram_X)
print(len(trigram_X))
X = numpy.array(trigram_X)
print(X.shape)
y = numpy.reshape(y1, (204567, 1, 46))
trigram_tags = list(nltk.trigrams(y))
#trigram_y = list(trigram_tags)
print (len(trigram_y))
target = numpy.array(trigram_y)
y = numpy.reshape(target, (204565, 3, 46))
X = numpy.reshape(X, (204565, 3, 1))
X_final = numpy.dstack((X, y))
print(X_final.shape)
X_input = X_final[: -1, :, :]
print(X_input.shape)
y_final = label_encoder.fit_transform(tags_1)
y_target = np_utils.to_categorical(y_final[3:])
print(y_target.shape)
from keras.layers import Dense
from keras.models import Sequential
from keras.layers.recurrent import SimpleRNN
Feature hashig is used here. Now the problem requires to give the generated hashed vector with a corresponding one hot encoded vector as the input. But the keras program is throwing the following error:
model = Sequential()
model.add(SimpleRNN(100,input_shape = (X_input.shape[1], X_input.shape[2])))
model.add(Dense(y_target.shape[1], activation = 'softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
model.fit(X_input, y_target, epochs = 20, batch_size = 200)
ValueError: setting an array element with a sequence.
Please explain the reason for the error and a possible solution
edit 1
I have attached the full error stack as below
ValueError Traceback (most recent call last)
<ipython-input-3-4d9a4c1d9885> in <module>()
62 model.add(Dense(y_target.shape[1], activation = 'softmax'))
63 model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
---> 64 model.fit(X_input, y_target, epochs = 20, batch_size = 200)
65
/home/aditya/anaconda3/lib/python3.6/site-packages/keras/models.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
843 class_weight=class_weight,
844 sample_weight=sample_weight,
--> 845 initial_epoch=initial_epoch)
846
847 def evaluate(self, x, y, batch_size=32, verbose=1,
/home/aditya/anaconda3/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, **kwargs)
1483 val_f=val_f, val_ins=val_ins, shuffle=shuffle,
1484 callback_metrics=callback_metrics,
-> 1485 initial_epoch=initial_epoch)
1486
1487 def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
/home/aditya/anaconda3/lib/python3.6/site-packages/keras/engine/training.py in _fit_loop(self, f, ins, out_labels, batch_size, epochs, verbose, callbacks, val_f, val_ins, shuffle, callback_metrics, initial_epoch)
1138 batch_logs['size'] = len(batch_ids)
1139 callbacks.on_batch_begin(batch_index, batch_logs)
-> 1140 outs = f(ins_batch)
1141 if not isinstance(outs, list):
1142 outs = [outs]
/home/aditya/anaconda3/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self, inputs)
2071 session = get_session()
2072 updated = session.run(self.outputs + [self.updates_op],
-> 2073 feed_dict=feed_dict)
2074 return updated[:len(self.outputs)]
2075
/home/aditya/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in run(self, fetches, feed_dict, options, run_metadata)
787 try:
788 result = self._run(None, fetches, feed_dict, options_ptr,
--> 789 run_metadata_ptr)
790 if run_metadata:
791 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
/home/aditya/anaconda3/lib/python3.6/site-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
966 feed_handles[subfeed_name] = subfeed_val
967 else:
--> 968 np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
969
970 if (not is_tensor_handle_feed and
/home/aditya/anaconda3/lib/python3.6/site-packages/numpy/core/numeric.py in asarray(a, dtype, order)
529
530 """
--> 531 return array(a, dtype, copy=False, order=order)
532
533
ValueError: setting an array element with a sequence

Resources