sentiment classification using keras - python-3.x

I very new to deep learning classification. I have reviews data with the label(pos, neg) and I 'm trying to classify the data using keras. here is my code:
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
sentences_train, sentences_test, y_train, y_test =
train_test_split(review_list2, label_list, test_size=0.25, random_state=1000)
vectorizer = CountVectorizer()
vectorizer.fit(sentences_train)
X_train = vectorizer.transform(sentences_train)
X_test = vectorizer.transform(sentences_test)
#build the model
from keras.models import Sequential
from keras import layers
input_dim = X_train.shape[1]
model = Sequential()
model.add(layers.Dense(8, input_dim=input_dim, activation='relu'))
model.add(layers.Dense(2, activation='softmax'))
model.compile(loss='binary_crossentropy',
optimizer='adam', metrics=['accuracy'])
model.summary()
history = model.fit(X_train, y_train, epochs=100, verbose=False, validation_data=(X_test, y_test), batch_size=10)
I got an error:
AttributeError Traceback (most recent call last)
<ipython-input-52-34c39f53e335> in <module>
----> 1 history = model.fit(X_train, y_train, epochs=100, verbose=False, validation_data=(X_test, y_test), batch_size=10)
d:\py-ver35\lib\site-packages\keras\engine\training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
950 sample_weight=sample_weight,
951 class_weight=class_weight,
--> 952 batch_size=batch_size)
953 # Prepare validation data.
954 do_validation = False
d:\py-ver35\lib\site-packages\keras\engine\training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)
787 feed_output_shapes,
788 check_batch_axis=False, # Don't enforce the batch size.
--> 789 exception_prefix='target')
790
791 # Generate sample-wise weight values given the `sample_weight` and
d:\py-ver35\lib\site-packages\keras\engine\training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
90 data = data.values if data.__class__.__name__ == 'DataFrame' else data
91 data = [data]
---> 92 data = [standardize_single_array(x) for x in data]
93
94 if len(data) != len(names):
d:\py-ver35\lib\site-packages\keras\engine\training_utils.py in <listcomp>(.0)
90 data = data.values if data.__class__.__name__ == 'DataFrame' else data
91 data = [data]
---> 92 data = [standardize_single_array(x) for x in data]
93
94 if len(data) != len(names):
d:\py-ver35\lib\site-packages\keras\engine\training_utils.py in standardize_single_array(x)
25 'Got tensor with shape: %s' % str(shape))
26 return x
---> 27 elif x.ndim == 1:
28 x = np.expand_dims(x, 1)
29 return x
AttributeError: 'str' object has no attribute 'ndim'
I have tried every solution mention related to this error but still cannot fix.Any help? thanks in Advance

Related

Keras: Using model output as the input for another: When feeding symbolic tensors to a model, we expect thetensors to have a static batch size

I have the following two models, where model_A is trained first, then the output of model_A is used to train the model_C:
import keras
from keras.layers import Input, Dense
from keras.models import Model
inputs = Input(shape=(12,))
# ---------------------------------------
# model_A
x = Dense(64, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
predictions_A = Dense(3, activation='softmax')(x)
model_A = Model(inputs=inputs, outputs=predictions_A)
model_A.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model_A.fit(my_data_x, axis = 1), pd.get_dummies(my_data['target_cate'],prefix=['cate_']))
#----------------------------------------
input_C_out_A = Input(shape=(3,))
# Concatenating the two input layers
concat = keras.layers.concatenate([inputs, input_C_out_A])
x1 = Dense(64, activation='relu')(concat)
x1 = Dense(64, activation='relu')(x1)
predictions_C= Dense(1, activation='sigmoid')(x1)
model_C = Model(inputs=[inputs, input_C_out_A], outputs=predictions_C)
model_C.compile(loss='mean_squared_error', optimizer='adam')
model_C.fit([my_data_x,predictions_A], my_data['target_numeric'])
model_A training seems to be fine, but then I got the following errors when training the model_C:
Epoch 1/1
374667/374667 [==============================] - 11s 30us/step - loss: 0.3157 - acc: 0.9119
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-78-8df7b1dec93f> in <module>
28 model_C = Model(inputs=[inputs, input_C_out_A], outputs=predictions_C)
29 model_C.compile(loss='mean_squared_error', optimizer='adam')
---> 30 model_C.fit([my_data_x,predictions_A], my_data['target_numeric'])
~/workspace/git/tensorplay/venv/lib/python3.7/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
950 sample_weight=sample_weight,
951 class_weight=class_weight,
--> 952 batch_size=batch_size)
953 # Prepare validation data.
954 do_validation = False
~/workspace/git/tensorplay/venv/lib/python3.7/site-packages/keras/engine/training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)
749 feed_input_shapes,
750 check_batch_axis=False, # Don't enforce the batch size.
--> 751 exception_prefix='input')
752
753 if y is not None:
~/workspace/git/tensorplay/venv/lib/python3.7/site-packages/keras/engine/training_utils.py in standardize_input_data(data, names, shapes, check_batch_axis, exception_prefix)
90 data = data.values if data.__class__.__name__ == 'DataFrame' else data
91 data = [data]
---> 92 data = [standardize_single_array(x) for x in data]
93
94 if len(data) != len(names):
~/workspace/git/tensorplay/venv/lib/python3.7/site-packages/keras/engine/training_utils.py in <listcomp>(.0)
90 data = data.values if data.__class__.__name__ == 'DataFrame' else data
91 data = [data]
---> 92 data = [standardize_single_array(x) for x in data]
93
94 if len(data) != len(names):
~/workspace/git/tensorplay/venv/lib/python3.7/site-packages/keras/engine/training_utils.py in standardize_single_array(x)
23 'When feeding symbolic tensors to a model, we expect the'
24 'tensors to have a static batch size. '
---> 25 'Got tensor with shape: %s' % str(shape))
26 return x
27 elif x.ndim == 1:
ValueError: When feeding symbolic tensors to a model, we expect thetensors to have a static batch size. Got tensor with shape: (None, 3)
Any idea what I missed? Thanks!
The model_C works by:
Giving the input data to model_A,
Getting output of model_A, and
Feeding that, along with the original inputs, to the first Dense layer.
So implement just what you said (and try to keep models separate, i.e. each with its own input/output layers):
input_C = Input(shape=(12,))
out_A = model_A(input_C) # get the output of model_A
concat = keras.layers.concatenate([input_C, out_A])
x1 = Dense(64, activation='relu')(concat)
x1 = Dense(64, activation='relu')(x1)
predictions_C= Dense(1, activation='sigmoid')(x1)
model_C = Model(inputs=input_C, outputs=predictions_C)
model_C.compile(loss='mean_squared_error', optimizer='adam')
model_C.fit(my_data_x, my_data['target_numeric'])
If you don't want the model_A to be trained when training model_C (i.e. if you have already trained model_A and don't want its weights to be changed), just set model_A.trainable = False before compiling model_C.
It makes no sense to put the output of a model (a symbolic tensor) into model.fit, since there is no input data there. You should first obtain predictions from model A and then use them to fit model C:
pred_a = model_A.predict(my_data_x)
model_C.fit([my_data_x, pred_a], my_data['target_numeric'])

UnknownError when trying to use a pre-trained model to predict classes

Using the below tutorial, I've tried to save a pre-trained model into a .h5 file using the model.save().
https://www.tensorflow.org/tutorials/keras/save_and_restore_models#save_the_entire_model
I've loaded this model in a new document using load_model() and when I'm trying to use it to predict on new data, I'm getting an UnknownError. I am using tensorflow-gpu.
The code I've used for training the model is as below:
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Dense, Dropout, Embedding, CuDNNLSTM, Bidirectional
from sklearn.model_selection import train_test_split
"""
My dataset is a two-column DataFrame of which the first column (X) contains
a pre-processed and encoded tweet which has been padded to a length of 47 words. The reason as to why I did this is because I'm passing in these values to an Embedding layer
The second column (Y) is a label associated with that tweet
X_train.shape = (89552, 47)
Y.shape = (89552,)
vocab_size = 66167
max_sent_len = 47
emd_dim = 75
"""
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=420)
x_train, x_val, Y_train, y_val = train_test_split(X_train, y_train, test_size=0.1, random_state=420)
model = Sequential([
Embedding(input_dim=vocab_size, output_dim=emb_dim, input_length=max_sent_len, trainable=True),
Bidirectional(CuDNNLSTM(64, return_sequences=False)),
Dropout(0.5),
Dense(2, activation='softmax')
])
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_train, Y_train, epochs=50, batch_size=300, shuffle=True, validation_data=(x_val, y_val))
model.summary()
model.save('trained/model-2-f.h5')
I am not getting any errors when trying to use this model for predictions.
I'm trying to test whether this model has been saved properly in another file and using another dataset of the same properties to test it on.
from tensorflow.keras.models import load_model
trained_model = load_model('trained/model-2-f.h5')
"""
w_x is the data I'm using to predict values for
w_x.shape = (201, 47)
"""
w_pred = trained_model.predict_classes(w_x)
The error I'm getting is:
UnknownError Traceback (most recent call last)
<ipython-input-18-cd338bbebc52> in <module>
----> 1 w_pred = trained_model.predict_classes(w_x)
2 w_pred
~\Anaconda3\envs\nlp\lib\site-packages\tensorflow\python\keras\engine\sequential.py in predict_classes(self, x, batch_size, verbose)
316 A numpy array of class predictions.
317 """
--> 318 proba = self.predict(x, batch_size=batch_size, verbose=verbose)
319 if proba.shape[-1] > 1:
320 return proba.argmax(axis=-1)
~\Anaconda3\envs\nlp\lib\site-packages\tensorflow\python\keras\engine\training.py in predict(self, x, batch_size, verbose, steps, callbacks, max_queue_size, workers, use_multiprocessing)
1076 verbose=verbose,
1077 steps=steps,
-> 1078 callbacks=callbacks)
1079
1080 def reset_metrics(self):
~\Anaconda3\envs\nlp\lib\site-packages\tensorflow\python\keras\engine\training_arrays.py in model_iteration(model, inputs, targets, sample_weights, batch_size, epochs, verbose, callbacks, val_inputs, val_targets, val_sample_weights, shuffle, initial_epoch, steps_per_epoch, validation_steps, validation_freq, mode, validation_in_fit, prepared_feed_values_from_dataset, steps_name, **kwargs)
361
362 # Get outputs.
--> 363 batch_outs = f(ins_batch)
364 if not isinstance(batch_outs, list):
365 batch_outs = [batch_outs]
~\Anaconda3\envs\nlp\lib\site-packages\tensorflow\python\keras\backend.py in __call__(self, inputs)
3290
3291 fetched = self._callable_fn(*array_vals,
-> 3292 run_metadata=self.run_metadata)
3293 self._call_fetch_callbacks(fetched[-len(self._fetches):])
3294 output_structure = nest.pack_sequence_as(
~\Anaconda3\envs\nlp\lib\site-packages\tensorflow\python\client\session.py in __call__(self, *args, **kwargs)
1456 ret = tf_session.TF_SessionRunCallable(self._session._session,
1457 self._handle, args,
-> 1458 run_metadata_ptr)
1459 if run_metadata:
1460 proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
UnknownError: 2 root error(s) found.
(0) Unknown: Fail to find the dnn implementation.
[[{{node bidirectional_2/CudnnRNN_1}}]]
[[dense_2/Softmax/_243]]
(1) Unknown: Fail to find the dnn implementation.
[[{{node bidirectional_2/CudnnRNN_1}}]]
0 successful operations.
0 derived errors ignored.

How can creat a path to my data for my CNN in jupyter notebook

Intro and setup
So I have been for some time now trying to make a simple Convolution Neural Network. I followed a simple tutorial, which can be found Here's a link!
It is a simple cat vs dog test (2 categories)
I have set my jupyter/tensorflow/keras up in
C:\Users\labadmin
What i have understood is that i just have to put the path from labadmin in order to implement my data for testing and training.
Since i am not sure what is causing the error i have pasted the whole code and error, i think it is about the system not getting the data.
The folder with the Data set-up as following:
labadmin has a folder called data withing that there are two folders
training
test
Both cat images and dog images are shuffled in both folders. There are 10000+ pictures in each folder, so there should be enough,
This is my code:
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
classifier = Sequential()
classifier.add(Convolution2D(32, 3, 3, input_shape = (64, 64, 3), activation = 'relu'))
classifier.add(MaxPooling2D(pool_size = (2,2)))
classifier.add(Flatten())
classifier.add(Dense(output_dim = 128, activation = 'relu'))
classifier.add(Dense(output_dim = 1, activation = 'sigmoid'))
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics=['accuracy'])
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
training_set = train_datagen.flow_from_directory(
'data\\training',
target_size=(64, 64),
batch_size=32,
class_mode='categorical',
shuffle=False)
test_set = test_datagen.flow_from_directory(
'data\\test',
target_size=(64, 64),
batch_size=32,
class_mode='categorical',
shuffle=False)
from IPython.display import display
from PIL import Image
classifier.fit_generator(
training_set,
steps_per_epoch=8000,
epochs=10,
validation_data = test_set,
validation_steps = 800)
import numpy as np
from keras_preprocessing import image
test_image = image.load_img('data\\random.jpg', target_size=(64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis = 0)
result = classifier.predict(test_image)
training_set.class_indices
if result[0][0]>= 0.5:
prediction = 'dog'
else:
prediction = 'cat'
print(prediction)
I get the following error:
C:\Users\labadmin\Miniconda3\envs\tensorflow\lib\site-packages\ipykernel_launcher.py:26: UserWarning: Update your `Conv2D` call to the Keras 2 API: `Conv2D(32, (3, 3), input_shape=(64, 64, 3..., activation="relu")`
C:\Users\labadmin\Miniconda3\envs\tensorflow\lib\site-packages\ipykernel_launcher.py:35: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation="relu", units=128)`
C:\Users\labadmin\Miniconda3\envs\tensorflow\lib\site-packages\ipykernel_launcher.py:36: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(activation="sigmoid", units=1)`
Found 0 images belonging to 0 classes.
Found 0 images belonging to 0 classes.
Epoch 1/10
---------------------------------------------------------------------------
ZeroDivisionError Traceback (most recent call last)
<ipython-input-5-393aaba195e9> in <module>
82 epochs=10,
83 validation_data = test_set,
---> 84 validation_steps = 800)
85
86 # Our image we now send through to test
~\Miniconda3\envs\tensorflow\lib\site-packages\keras\legacy\interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name + '` call to the ' +
90 'Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
~\Miniconda3\envs\tensorflow\lib\site-packages\keras\engine\training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
1416 use_multiprocessing=use_multiprocessing,
1417 shuffle=shuffle,
-> 1418 initial_epoch=initial_epoch)
1419
1420 #interfaces.legacy_generator_methods_support
~\Miniconda3\envs\tensorflow\lib\site-packages\keras\engine\training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
179 batch_index = 0
180 while steps_done < steps_per_epoch:
--> 181 generator_output = next(output_generator)
182
183 if not hasattr(generator_output, '__len__'):
~\Miniconda3\envs\tensorflow\lib\site-packages\keras\utils\data_utils.py in get(self)
707 "`use_multiprocessing=False, workers > 1`."
708 "For more information see issue #1638.")
--> 709 six.reraise(*sys.exc_info())
~\Miniconda3\envs\tensorflow\lib\site-packages\six.py in reraise(tp, value, tb)
691 if value.__traceback__ is not tb:
692 raise value.with_traceback(tb)
--> 693 raise value
694 finally:
695 value = None
~\Miniconda3\envs\tensorflow\lib\site-packages\keras\utils\data_utils.py in get(self)
683 try:
684 while self.is_running():
--> 685 inputs = self.queue.get(block=True).get()
686 self.queue.task_done()
687 if inputs is not None:
~\Miniconda3\envs\tensorflow\lib\multiprocessing\pool.py in get(self, timeout)
642 return self._value
643 else:
--> 644 raise self._value
645
646 def _set(self, i, obj):
~\Miniconda3\envs\tensorflow\lib\multiprocessing\pool.py in worker(inqueue, outqueue, initializer, initargs, maxtasks, wrap_exception)
117 job, i, func, args, kwds = task
118 try:
--> 119 result = (True, func(*args, **kwds))
120 except Exception as e:
121 if wrap_exception and func is not _helper_reraises_exception:
~\Miniconda3\envs\tensorflow\lib\site-packages\keras\utils\data_utils.py in next_sample(uid)
624 The next value of generator `uid`.
625 """
--> 626 return six.next(_SHARED_SEQUENCES[uid])
627
628
~\Miniconda3\envs\tensorflow\lib\site-packages\keras_preprocessing\image\iterator.py in __next__(self, *args, **kwargs)
98
99 def __next__(self, *args, **kwargs):
--> 100 return self.next(*args, **kwargs)
101
102 def next(self):
~\Miniconda3\envs\tensorflow\lib\site-packages\keras_preprocessing\image\iterator.py in next(self)
107 """
108 with self.lock:
--> 109 index_array = next(self.index_generator)
110 # The transformation of images is not under thread lock
111 # so it can be done in parallel
~\Miniconda3\envs\tensorflow\lib\site-packages\keras_preprocessing\image\iterator.py in _flow_index(self)
83 self._set_index_array()
84
---> 85 current_index = (self.batch_index * self.batch_size) % self.n
86 if self.n > current_index + self.batch_size:
87 self.batch_index += 1
ZeroDivisionError: integer division or modulo by zero
Thank you for your time.
Did you populate your data\\training and data\\test directories? From the output:
Found 0 images belonging to 0 classes.
Found 0 images belonging to 0 classes.
Epoch 1/10
it appears that your data augmentation generator did not find any images and the resulting dataset is empty; consequently, when Keras tries to run the fit_generator, you get the division by 0 error as it tries to iterate through your null image set.

Explaining LSTM keras with Eli5 library

I'm trying to use Eli5 for explaining an LSTM keras model for time series prediction. The keras model receives as input an array with shape (nsamples, timesteps, nfeatures).
This is my code:
def baseline_model():
model = Sequential()
model.add(LSTM(32, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(Dropout(0.2))
model.add(Dense(1))
model.compile(loss='logcosh', optimizer='adam')
return model
from keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
import eli5
from eli5.sklearn import PermutationImportance
my_model = KerasRegressor(build_fn= baseline_model, nb_epoch= 30, batch_size= 32, verbose= False)
history = my_model.fit(X_train, y_train)
So far, everything is ok. The problem is when I execute the following line that launchs an error:
# X_train has a shape equal to (nsamples, timesteps, nfeatures) and y_train has a shape (nsamples)
perm = PermutationImportance(my_model, random_state=1).fit(X_train, y_train)
Error:
ValueError Traceback (most recent call last)
in ()
2 d2_train_dataset = X_train.reshape((nsamples, timesteps * features))
3
----> 4 perm = PermutationImportance(my_model, random_state=1).fit(X_train, y_train)
5 #eli5.show_weights(perm, feature_names = X.columns.tolist())
~/anaconda3/lib/python3.6/site-packages/eli5/sklearn/permutation_importance.py in fit(self, X, y, groups, **fit_params)
183 self.estimator_.fit(X, y, **fit_params)
184
--> 185 X = check_array(X)
186
187 if self.cv not in (None, "prefit"):
~/anaconda3/lib/python3.6/site-packages/sklearn/utils/validation.py in check_array(array, accept_sparse, accept_large_sparse, dtype, order, copy, force_all_finite, ensure_2d, allow_nd, ensure_min_samples, ensure_min_features, warn_on_dtype, estimator)
568 if not allow_nd and array.ndim >= 3:
569 raise ValueError("Found array with dim %d. %s expected <= 2."
--> 570 % (array.ndim, estimator_name))
571 if force_all_finite:
572 _assert_all_finite(array,
ValueError: Found array with dim 3. Estimator expected <= 2.
What can I do to fix this error? How can I use eli5 with my LSTM Keras Model?

Dask DataFrame for fitting Keras model

I have a Dask DataFrame that I want to use for fitting a Keras autoencoder model:
DataFrame:
import dask.dataframe as dd
input_df = dd.read_csv(file_path)
input_df.dtypes
_2 float64
_3 float64
_4 float64
_5 float64 ...
Keras model:
autoencoder = Sequential()
autoencoder.add(Dense(dense[0], input_shape=(dense[0],), activation = 'relu' ))
autoencoder.add(Dense(dense[1], activation = 'relu' ))
autoencoder.add(Dense(dense[2], activation = 'relu' ))
autoencoder.add(Dense(dense[3], activation = 'relu' ))
autoencoder.add(Dense(dense[0], activation = 'relu' ))
autoencoder.compile(loss='mse',
optimizer='adam',
metrics=['mse'])
When I pass the DataFrame for fitting:
autoencoder.fit(input_df, input_df,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_split = val_split)
I get the error:
TypeError Traceback (most recent call last)
<ipython-input-23-d0480d8a460d> in <module>()
3 epochs=epochs,
4 verbose=1,
----> 5 validation_split = val_split)
~/anaconda3/envs/py36/lib/python3.6/site-packages/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, **kwargs)
950 sample_weight=sample_weight,
951 class_weight=class_weight,
--> 952 batch_size=batch_size)
953 # Prepare validation data.
954 do_validation = False
~/anaconda3/envs/py36/lib/python3.6/site-packages/keras/engine/training.py in _standardize_user_data(self, x, y, sample_weight, class_weight, check_array_lengths, batch_size)
799 for (ref, sw, cw, mode) in
800 zip(y, sample_weights, class_weights,
--> 801 feed_sample_weight_modes)
802 ]
803 # Check that all arrays have the same length.
~/anaconda3/envs/py36/lib/python3.6/site-packages/keras/engine/training.py in <listcomp>(.0)
797 sample_weights = [
798 standardize_weights(ref, sw, cw, mode)
--> 799 for (ref, sw, cw, mode) in
800 zip(y, sample_weights, class_weights,
801 feed_sample_weight_modes)
~/anaconda3/envs/py36/lib/python3.6/site-packages/keras/engine/training_utils.py in standardize_weights(y, sample_weight, class_weight, sample_weight_mode)
522 else:
523 if sample_weight_mode is None:
--> 524 return np.ones((y.shape[0],), dtype=K.floatx())
525 else:
526 return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
~/anaconda3/envs/py36/lib/python3.6/site-packages/numpy/core/numeric.py in ones(shape, dtype, order)
201
202 """
--> 203 a = empty(shape, dtype, order)
204 multiarray.copyto(a, 1, casting='unsafe')
205 return a
TypeError: 'float' object cannot be interpreted as an integer
Would appreciate some help! Thanks!

Resources