I have the following code:
from sklearn.datasets import fetch_openml
import numpy as np
import keras
mnist = fetch_openml('mnist_784', version=1)
X, y = mnist["data"], mnist["target"]
y = y.astype(np.uint8)
X_digits = [np.array(X.iloc[i]) for i in range(len(X))]
X = np.array([some_digit.reshape(28, 28) for some_digit in X_digits])
X_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]
model = keras.models.Sequential([
keras.layers.Conv2D(64, 7, activation="relu", padding="same",
input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(2),
keras.layers.Conv2D(128, 3, activation="relu", padding="same"),
keras.layers.Conv2D(128, 3, activation="relu", padding="same"),
keras.layers.MaxPooling2D(2),
keras.layers.Conv2D(256, 3, activation="relu", padding="same"),
keras.layers.Conv2D(256, 3, activation="relu", padding="same"),
keras.layers.MaxPooling2D(2),
keras.layers.Flatten(),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dropout(0.5),
keras.layers.Dense(64, activation="relu"),
keras.layers.Dropout(0.5),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="categorical_crossentropy")
That all seems to work fine. But then on this line:
model.fit(X_train, y_train)
I get this error:
ValueError Traceback (most recent call last)
<ipython-input-19-d768f88d541e> in <module>()
----> 1 model.fit(X_train, y_train)
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/framework/func_graph.py in autograph_handler(*args, **kwargs)
1127 except Exception as e: # pylint:disable=broad-except
1128 if hasattr(e, "ag_error_metadata"):
-> 1129 raise e.ag_error_metadata.to_exception(e)
1130 else:
1131 raise
ValueError: in user code:
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 878, in train_function *
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 867, in step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 860, in run_step **
outputs = model.train_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 810, in train_step
y, y_pred, sample_weight, regularization_losses=self.losses)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 141, in __call__
losses = call_fn(y_true, y_pred)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 245, in call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 1665, in categorical_crossentropy
y_true, y_pred, from_logits=from_logits, axis=axis)
File "/usr/local/lib/python3.7/dist-packages/keras/backend.py", line 4994, in categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
ValueError: Shapes (32, 1) and (32, 10) are incompatible
What is going wrong here?
As #Dr. Snoopy the shape of your labels is not correct.
After you split your data into train and test should make sure, that your labels are poperly encode with the number of classes you want to have (in this case 10).
Simply put this after your split and it should work:
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train, 10)
y_test = to_categorical(y_test, 10)
y_train.shape
Output should be:
(60000, 10)
Related
I am trying to evaluate a model using model.evaluate(X_test, Y_test) where, X_test and Y_test are the test sets obtained from cifar10 datasets. While evaluating the model, I get this error. Not sure, why I'm getting this. Any help will be appreciated.
Node: 'mean_squared_error/SquaredDifference'
required broadcastable shapes
[[{{node mean_squared_error/SquaredDifference}}]] [Op:__inference_test_function_1521711]
The way that I'm obtaining X_test and Y_test is given below:
#import all required libaries
from keras.datasets import cifar10
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D, BatchNormalization
from keras.models import Model, Sequential
from keras.callbacks import EarlyStopping
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import accuracy_score
from keras.models import load_model
from keras.losses import msle
import numpy as np # linear algebra
import matplotlib.pyplot as plt #visualization library
#load training and test dataset from cifar10 dataset
(X_train, Y_train), (X_test, Y_test) = cifar10.load_data()
# Unit normalizing
X_train = X_train.astype('float32')/255
X_test = X_test.astype('float32')/255
# Reshaping training and test datasets
X_train = X_train.reshape(len(X_train),X_train.shape[1],X_train.shape[2],3)
print(X_train.shape) #(50000, 32, 32, 3)
print(X_test.shape) #(10000, 32, 32, 3)
print(Y_test.shape) #(10000, 1)
def build_autoencoder(img_shape):
# The encoder network
encoder = Sequential()
encoder.add(Conv2D(32, kernel_size=3, strides=1, padding='same', activation='relu', input_shape=img_shape)) # 32x32x32
encoder.add(BatchNormalization()) # 32x32x32
encoder.add(MaxPooling2D(2, padding='same')) # 16x16x32
encoder.add(Conv2D(16, kernel_size=3, strides=1, padding='same', activation='relu')) # 16x16x16
encoder.add(BatchNormalization()) # 16x16x16
encoder.add(Conv2D(8, kernel_size=3, strides=1, padding='same', activation='relu')) # 16x16x8
encoder.add(BatchNormalization()) # 16x16x8
encoder.add(Conv2D(8, kernel_size=3, strides=1, padding='same', activation='relu')) # 16x16x8
# The decoder network
decoder = Sequential()
decoder.add(Conv2D(32, kernel_size=3, strides=1, padding='same', activation='relu')) # 16x16x32
decoder.add(BatchNormalization()) # 16x16x32
decoder.add(UpSampling2D()) # 32x32x32
decoder.add(Conv2D(16, kernel_size=3, strides=2, padding='same', activation='relu')) # 16x16x16
decoder.add(BatchNormalization()) # 16x16x16
decoder.add(UpSampling2D()) # 32x32x16
decoder.add(Conv2D(16, kernel_size=3, strides=1, padding='same', activation='relu')) # 32x32x16
decoder.add(BatchNormalization()) # 32x32x16
decoder.add(Conv2D(3, kernel_size=1, strides=1, padding='same', activation='sigmoid')) # 32x32x3
return encoder, decoder
IMG_SHAPE = X_train.shape[1:] #(32, 32, 3)
input_img = Input(shape=IMG_SHAPE) #create image input
encoder, decoder = build_autoencoder(IMG_SHAPE)
code = encoder(input_img) #encode image
reconstruction = decoder(code) #decoder image
autoencoder = Model(input_img,reconstruction) #create autoencoder model
autoencoder.compile(optimizer='adam', metrics=['accuracy'], loss='mean_squared_error') # compile model using adam optimizer and measure loss using mean_squared_error
print(autoencoder.summary())
Here's the summary of the modal.
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 32, 32, 3)] 0
sequential (Sequential) (None, 16, 16, 8) 7488
sequential_1 (Sequential) (None, 32, 32, 3) 9587
=================================================================
Total params: 17,075
Trainable params: 16,835
Non-trainable params: 240
_________________________________________________________________
autoencoder.fit(x=X_train, y=X_train,
validation_data=[X_test, X_test], batch_size=32, epochs=200)
#evaluate the model
autoencoder.evaluate(X_test, Y_test)
The error that it throws.
---------------------------------------------------------------------------
InvalidArgumentError Traceback (most recent call last)
<ipython-input-29-976ff2f8b7a2> in <module>()
----> 1 autoencoder.evaluate(X_test, Y_test)
1 frames
/usr/local/lib/python3.7/dist-packages/tensorflow/python/eager/execute.py in quick_execute(op_name, num_outputs, inputs, attrs, ctx, name)
53 ctx.ensure_initialized()
54 tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name,
---> 55 inputs, attrs, num_outputs)
56 except core._NotOkStatusException as e:
57 if name is not None:
InvalidArgumentError: Graph execution error:
Detected at node 'mean_squared_error/SquaredDifference' defined at (most recent call last):
File "/usr/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py", line 16, in <module>
app.launch_new_instance()
File "/usr/local/lib/python3.7/dist-packages/traitlets/config/application.py", line 846, in launch_instance
app.start()
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelapp.py", line 499, in start
self.io_loop.start()
File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 132, in start
self.asyncio_loop.run_forever()
File "/usr/lib/python3.7/asyncio/base_events.py", line 541, in run_forever
self._run_once()
File "/usr/lib/python3.7/asyncio/base_events.py", line 1786, in _run_once
handle._run()
File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
self._context.run(self._callback, *self._args)
File "/usr/local/lib/python3.7/dist-packages/tornado/platform/asyncio.py", line 122, in _handle_events
handler_func(fileobj, events)
File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 577, in _handle_events
self._handle_recv()
File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 606, in _handle_recv
self._run_callback(callback, msg)
File "/usr/local/lib/python3.7/dist-packages/zmq/eventloop/zmqstream.py", line 556, in _run_callback
callback(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/tornado/stack_context.py", line 300, in null_wrapper
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 283, in dispatcher
return self.dispatch_shell(stream, msg)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 233, in dispatch_shell
handler(stream, idents, msg)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/kernelbase.py", line 399, in execute_request
user_expressions, allow_stdin)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/ipkernel.py", line 208, in do_execute
res = shell.run_cell(code, store_history=store_history, silent=silent)
File "/usr/local/lib/python3.7/dist-packages/ipykernel/zmqshell.py", line 537, in run_cell
return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2718, in run_cell
interactivity=interactivity, compiler=compiler, result=result)
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2828, in run_ast_nodes
if self.run_code(code, result):
File "/usr/local/lib/python3.7/dist-packages/IPython/core/interactiveshell.py", line 2882, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-29-976ff2f8b7a2>", line 1, in <module>
autoencoder.evaluate(X_test, Y_test)
File "/usr/local/lib/python3.7/dist-packages/keras/utils/traceback_utils.py", line 64, in error_handler
return fn(*args, **kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1716, in evaluate
tmp_logs = self.test_function(iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1525, in test_function
return step_function(self, iterator)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1514, in step_function
outputs = model.distribute_strategy.run(run_step, args=(data,))
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1507, in run_step
outputs = model.test_step(data)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 1473, in test_step
self.compute_loss(x, y, y_pred, sample_weight)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/training.py", line 919, in compute_loss
y, y_pred, sample_weight, regularization_losses=self.losses)
File "/usr/local/lib/python3.7/dist-packages/keras/engine/compile_utils.py", line 201, in __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 141, in __call__
losses = call_fn(y_true, y_pred)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 245, in call
return ag_fn(y_true, y_pred, **self._fn_kwargs)
File "/usr/local/lib/python3.7/dist-packages/keras/losses.py", line 1329, in mean_squared_error
return backend.mean(tf.math.squared_difference(y_pred, y_true), axis=-1)
Node: 'mean_squared_error/SquaredDifference'
required broadcastable shapes
[[{{node mean_squared_error/SquaredDifference}}]] [Op:__inference_test_function_1521711]
I was working speech emotion recognition project. it works one week ago and i upgraded anaconda but the code I used to run no longer works.
i couldnt find the problem
it gives the error in the title. my code is:
# New model
model = Sequential()
model.add(Conv1D(256, 8, padding='same',input_shape=(X_train.shape[1],1))) # X_train.shape[1] = No. of Columns
model.add(Activation('relu'))
model.add(Conv1D(256, 8, padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(MaxPooling1D(pool_size=(8)))
model.add(Conv1D(128, 8, padding='same'))
model.add(Activation('relu'))
model.add(Conv1D(128, 8, padding='same'))
model.add(Activation('relu'))
model.add(Conv1D(128, 8, padding='same'))
model.add(Activation('relu'))
model.add(Conv1D(128, 8, padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(MaxPooling1D(pool_size=(8)))
model.add(Conv1D(64, 8, padding='same'))
model.add(Activation('relu'))
model.add(Conv1D(64, 8, padding='same'))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(14)) # Target class number
model.add(Activation('softmax'))
# opt = keras.optimizers.SGD(lr=0.0001, momentum=0.0, decay=0.0, nesterov=False)
# opt = keras.optimizers.Adam(lr=0.0001)
opt = tf.keras.optimizers.RMSprop(lr=0.00001, decay=1e-6)
model.summary()
# %%
model.compile(loss='categorical_crossentropy', optimizer=opt,metrics=['accuracy'])
model_history=model.fit(X_train, y_train, batch_size=16, epochs=100, validation_data=(X_test, y_test))
and i getting this error
ValueError: in user code:
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:806 train_function *
return step_function(self, iterator)
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:796 step_function **
outputs = model.distribute_strategy.run(run_step, args=(data,))
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:1211 run
return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2585 call_for_each_replica
return self._call_for_each_replica(fn, args, kwargs)
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\distribute\distribute_lib.py:2945 _call_for_each_replica
return fn(*args, **kwargs)
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:789 run_step **
outputs = model.train_step(data)
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\keras\engine\training.py:748 train_step
loss = self.compiled_loss(
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\keras\engine\compile_utils.py:204 __call__
loss_value = loss_obj(y_t, y_p, sample_weight=sw)
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\keras\losses.py:149 __call__
losses = ag_call(y_true, y_pred)
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\keras\losses.py:253 call **
return ag_fn(y_true, y_pred, **self._fn_kwargs)
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\util\dispatch.py:201 wrapper
return target(*args, **kwargs)
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\keras\losses.py:1535 categorical_crossentropy
return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\util\dispatch.py:201 wrapper
return target(*args, **kwargs)
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\keras\backend.py:4687 categorical_crossentropy
target.shape.assert_is_compatible_with(output.shape)
C:\Users\oguz_\anaconda3\lib\site-packages\tensorflow\python\framework\tensor_shape.py:1134 assert_is_compatible_with
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
ValueError: Shapes (None, 15) and (None, 14) are incompatible
my value error code is
def print_confusion_matrix(confusion_matrix, class_names, figsize = (10,7), fontsize=15):
df_cm = pd.DataFrame(
confusion_matrix, index=class_names, columns=class_names,
)
fig = plt.figure(figsize=figsize)
try:
heatmap = sns.heatmap(df_cm, annot=True, fmt="d")
except ValueError:
raise ValueError("Confusion matrix values must be integers.")
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=fontsize)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=fontsize)
plt.ylabel('Doğru')
plt.xlabel('Tahmin Edilen')
# Gender recode function
def gender(row):
if row == 'kadin_igrenme' or 'kadin_korku' or 'kadin_mutlu' or 'kadin_uzgun' or 'kadin_saskin' or 'kadin_sakin':
return 'kadin'
elif row == 'erkek_kizgin' or 'erkek_korku' or 'erkek_mutlu' or 'erkek_uzgun' or 'erkek_saskin' or 'erkek_sakin' or 'erkek_igrenme':
return 'erkek'
can anyone help me
Edit
i added x train y train shapes
print(X_train.shape)
print(y_train.shape)
print(y_test.shape)
print(X_test.shape)
print(lb.classes_)
#print(y_train[0:10])
#print(y_test[0:10])
(9031, 216, 1)
(9031, 15)
(3011, 15)
(3011, 216, 1)
[1]: https://i.stack.imgur.com/LKZDB.png
The issue is with the network output shape. Since the labels have shape
(b, 15) where b = 9031 for train and 3011 for test
the final dense layer in the network should also have 15 neurons. Update the final layer to be
model.add(Dense(15)
and it should work fine.
I'm trying to combine the CausalConv1d with Conv2d as the encoder of my VAE. But I got this error which is produced on Encoder part. The CausalConv1d is implemented by a nn.Conv1d network, So it should only have 3-dimensional weight, but why the error says expected 4-dimensional? And I have another question, why I can't use a single int but only tuple in Pycharm when I set the "kernel_size", "stride" etc. parameters in a Convs layer? Although the official document said both int and tuple are valid. Here is the traceback:
Traceback (most recent call last):
File "training.py", line 94, in <module>
training(args)
File "training.py", line 21, in training
summary(model, torch.zeros(64, 1, 784), show_input=True, show_hierarchical=False)
File "C:\Anaconda\envs\vae_reservior_computing\lib\site-packages\pytorch_model_summary\model_summary.py", line 118, in summary
model(*inputs)
File "C:\Anaconda\envs\vae_reservior_computing\lib\site-packages\torch\nn\modules\module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "F:\VAE_reservoir_computing\VAE.py", line 34, in forward
mu, log_var = self.encoder.forward(image)
File "F:\VAE_reservoir_computing\CausalCnn_Reservoir.py", line 30, in forward
x = self.conv1d(x.view(x.shape[0], 1, 784))
File "C:\Anaconda\envs\vae_reservior_computing\lib\site-packages\torch\nn\modules\module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "C:\Anaconda\envs\vae_reservior_computing\lib\site-packages\torch\nn\modules\container.py", line 119, in forward
input = module(input)
File "C:\Anaconda\envs\vae_reservior_computing\lib\site-packages\torch\nn\modules\module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "F:\VAE_reservoir_computing\CausalConv1d.py", line 21, in forward
conv1d_out = self.conv1d(x)
File "C:\Anaconda\envs\vae_reservior_computing\lib\site-packages\torch\nn\modules\module.py", line 889, in _call_impl
result = self.forward(*input, **kwargs)
File "C:\Anaconda\envs\vae_reservior_computing\lib\site-packages\torch\nn\modules\conv.py", line 263, in forward
return self._conv_forward(input, self.weight, self.bias)
File "C:\Anaconda\envs\vae_reservior_computing\lib\site-packages\torch\nn\modules\conv.py", line 260, in _conv_forward
self.padding, self.dilation, self.groups)
RuntimeError: Expected 4-dimensional input for 4-dimensional weight [256, 1, 3, 3], but got 3-dimensional input of size [64, 1, 786] instead
Here is my code of the Encoder-Decoder part:
import torch
import torch.nn as nn
from CausalConv1d import CausalConv1d
class CausalReservoirEncoder(nn.Module):
def __init__(self, in_channels, out_channels, num_filters, z_dim, *args):
super(CausalReservoirEncoder, self).__init__()
self.num_filters = num_filters
self.z_dim = z_dim
hidden_filters = num_filters
self.Conv1d = nn.Sequential(
CausalConv1d(in_channels,out_channels,kernel_size=(3,3),dilation=1,A=False),
nn.LeakyReLU()
)
for p in self.parameters():
p.requires_grad = False
self.encoder = nn.Sequential(
nn.Conv2d(out_channels, self.num_filters, kernel_size=(4, 4), padding=(1, 1), stride=(2, 2)), # 28x28 -> 14x14
nn.LeakyReLU(),
nn.Conv2d(hidden_filters, 2 * hidden_filters, kernel_size=(4, 4), padding=(1, 1), stride=(2, 2)), # 14x14 -> 7x7
nn.LeakyReLU(),
nn.Flatten(),
nn.Linear(2*hidden_filters*7*7, self.z_dim)
)
def forward(self, x):
x = self.Conv1d(x.view(x.shape[0], 1, 784))
h_e = self.encoder(x.view(x.shape[0], -1, 28, 28))
mu, log_var = torch.chunk(h_e, 2, dim=1)
return mu, log_var
class CausalReservoirDecoder(nn.Module):
def __init__(self, z_dim, out_channels, num_filters, **kwargs):
super(CausalReservoirDecoder, self).__init__()
self.z_dim = z_dim
self.num_filters = num_filters
hidden_filters = num_filters
self.linear = nn.Sequential(
nn.Linear(self.z_dim, 2*hidden_filters*7*7),
nn.LeakyReLU()
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(2*hidden_filters, hidden_filters, kernel_size=(4,4), padding=(1,1), stride=(2,2)), #7x7 -> 14x14
nn.LeakyReLU(),
nn.ConvTranspose2d(hidden_filters, out_channels, kernel_size=(4,4), padding=(1,1), stride=(2,2)), # 14x14 -> 28x28
nn.Sigmoid()
)
def forward(self, z):
x = self.linear(z)
x = x.reshape(x.shape[0], -1, 7, 7)
x_recon = self.decoder(x)
return x_recon
Here is the implementation of the CausalConv1d:
import torch.nn as nn
import torch.nn.functional as F
class CausalConv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, dilation, A=False, *args, **kwargs):
super(CausalConv1d, self).__init__()
self.kernel_size = kernel_size
self.dilation = dilation
self.A = A
self.padding = (kernel_size[0] - 1) * dilation + A * 1
self.conv1d = nn.Conv1d(in_channels,out_channels,self.kernel_size,stride=(1,1),padding=(0,0),dilation=dilation,**kwargs)
def forward(self, x):
x = F.pad(x, (self.padding, 0))
conv1d_out = self.conv1d(x)
if self.A:
return conv1d_out[:,:,: -1]
else:
return conv1d_out
So anyone can give me some suggestions?
I know this may not be intuitive, but when you use a kernel_size with 2-dim (e.g., (3,3)), then your Conv1d has 4-dim weights. Therefore, to solve your issue, you must change from:
CausalConv1d(in_channels,out_channels,kernel_size=(3,3),dilation=1,A=False),
to:
CausalConv1d(in_channels, out_channels, kernel_size=3, dilation=1, A=False),
i am training a mnist CNN. When i ran my code the problem is coming . I tried other answers but they do not work. I am a new to TensorFlow so can someone explain me this error. Here is my code. i am using Pycharm 2020.2. and Python 3.6 in anaconda. There is no help i could find.
import tensorflow as tf
from tensorflow.keras.models import Sequential
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = tf.keras.utils.normalize(x_train, axis=1)
x_test = tf.keras.utils.normalize(x_train, axis=1)
model = Sequential()
model.add(tf.keras.layers.Dense(256))
model.add(tf.keras.layers.Conv1D(kernel_size=4, strides=1, filters=4, activation="relu"))
model.add(tf.keras.layers.Conv1D(kernel_size=3, strides=1, activation="relu", filters=3))
model.add(tf.keras.layers.Dense(128, activation="relu"))
model.add(tf.keras.layers.Conv1D(kernel_size=2, filters=2, strides=1, activation="relu"))
model.add(tf.keras.layers.Conv1D(kernel_size=1, filters=1, strides=1, activation="relu"))
model.add(tf.keras.layers.Dense(64, activation="relu"))
model.add(tf.keras.layers.MaxPool1D(pool_size=2, strides=1))
model.add(tf.keras.layers.Dense(256, activation="relu"))
model.add(tf.keras.layers.Conv1D(kernel_size=4, filters=4, strides=1, activation="relu"))
model.add(tf.keras.layers.Conv1D(kernel_size=3, filters=3, strides=1, activation="relu"))
model.add(tf.keras.layers.MaxPool1D(pool_size=2, strides=1))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(128, activation="relu"))
model.add(tf.keras.layers.Conv1D(kernel_size=2, filters=2, strides=1, activation="relu"))
model.add(tf.keras.layers.Conv1D(kernel_size=1, filters=1, strides=1, activation="relu"))
model.add(tf.keras.layers.Dense(64, activation="relu"))
model.add(tf.keras.layers.Dense(16, activation="softmax"))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x=x_train, y=y_train, batch_size=64, epochs=5, shuffle=True, validation_split=0.1)
model.summary()
it is giving the error:
Train on 54000 samples, validate on 6000 samples
Epoch 1/5
2020-09-09 15:16:16.953428: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cublas64_10.dll
2020-09-09 15:16:17.146701: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library cudnn64_7.dll
2020-09-09 15:16:17.741916: W tensorflow/stream_executor/gpu/redzone_allocator.cc:312] Internal: Invoking GPU asm compilation is supported on Cuda non-Windows platforms only
Relying on driver to perform ptx compilation. This message will be only logged once.
2020-09-09 15:16:18.085250: W tensorflow/core/common_runtime/base_collective_executor.cc:217] BaseCollectiveExecutor::StartAbort Invalid argument: assertion failed: [Condition x == y did not hold element-wise:] [x (loss/output_1_loss/SparseSoftmaxCrossEntropyWithLogits/Shape_1:0) = ] [64 1] [y (loss/output_1_loss/SparseSoftmaxCrossEntropyWithLogits/strided_slice:0) = ] [64 14]
[[{{node loss/output_1_loss/SparseSoftmaxCrossEntropyWithLogits/assert_equal_1/Assert/Assert}}]]
64/54000 [..............................] - ETA: 39:34Traceback (most recent call last):
File "F:\anaconda\envs\tensorflow1\lib\site-packages\IPython\core\interactiveshell.py", line 3331, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-2-d2317d03e1c1>", line 1, in <module>
runfile('F:/Pycharm_projects/my_fun_project/Fake or real news/fake-or-real-news/bitcoin.py', wdir='F:/Pycharm_projects/my_fun_project/Fake or real news/fake-or-real-news')
File "C:\Program Files\JetBrains\PyCharm Community Edition 2019.3.3\plugins\python-ce\helpers\pydev\_pydev_bundle\pydev_umd.py", line 197, in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the script
File "C:\Program Files\JetBrains\PyCharm Community Edition 2019.3.3\plugins\python-ce\helpers\pydev\_pydev_imps\_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "F:/Pycharm_projects/my_fun_project/Fake or real news/fake-or-real-news/bitcoin.py", line 41, in <module>
model.fit(x=x_train, y=y_train, batch_size=64, epochs=5, shuffle=True, validation_split=0.1)
File "F:\anaconda\envs\tensorflow1\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 819, in fit
use_multiprocessing=use_multiprocessing)
File "F:\anaconda\envs\tensorflow1\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 342, in fit
total_epochs=epochs)
File "F:\anaconda\envs\tensorflow1\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 128, in run_one_epoch
batch_outs = execution_function(iterator)
File "F:\anaconda\envs\tensorflow1\lib\site-packages\tensorflow_core\python\keras\engine\training_v2_utils.py", line 98, in execution_function
distributed_function(input_fn))
File "F:\anaconda\envs\tensorflow1\lib\site-packages\tensorflow_core\python\eager\def_function.py", line 568, in __call__
result = self._call(*args, **kwds)
File "F:\anaconda\envs\tensorflow1\lib\site-packages\tensorflow_core\python\eager\def_function.py", line 632, in _call
return self._stateless_fn(*args, **kwds)
File "F:\anaconda\envs\tensorflow1\lib\site-packages\tensorflow_core\python\eager\function.py", line 2363, in __call__
return graph_function._filtered_call(args, kwargs) # pylint: disable=protected-access
File "F:\anaconda\envs\tensorflow1\lib\site-packages\tensorflow_core\python\eager\function.py", line 1611, in _filtered_call
self.captured_inputs)
File "F:\anaconda\envs\tensorflow1\lib\site-packages\tensorflow_core\python\eager\function.py", line 1692, in _call_flat
ctx, args, cancellation_manager=cancellation_manager))
File "F:\anaconda\envs\tensorflow1\lib\site-packages\tensorflow_core\python\eager\function.py", line 545, in call
ctx=ctx)
File "F:\anaconda\envs\tensorflow1\lib\site-packages\tensorflow_core\python\eager\execute.py", line 67, in quick_execute
six.raise_from(core._status_to_exception(e.code, message), None)
File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.InvalidArgumentError: assertion failed: [Condition x == y did not hold element-wise:] [x (loss/output_1_loss/SparseSoftmaxCrossEntropyWithLogits/Shape_1:0) = ] [64 1] [y (loss/output_1_loss/SparseSoftmaxCrossEntropyWithLogits/strided_slice:0) = ] [64 14]
[[node loss/output_1_loss/SparseSoftmaxCrossEntropyWithLogits/assert_equal_1/Assert/Assert (defined at F:/Pycharm_projects/my_fun_project/Fake or real news/fake-or-real-news/bitcoin.py:41) ]] [Op:__inference_distributed_function_2970]
Function call stack:
distributed_function
The error is because your output_shape and label_shape don't match.
This is the architecture of the model you created:
.
As you can see, your model outputs (batch_size, 14, 16) but the labels you provide have a shape of (batch_size, 16).
In order to fix this try adding the Flatten layer before your final Dense layers.
Code:
model = Sequential()
model.add(tf.keras.layers.Dense(256, input_shape = (28,28)))
model.add(tf.keras.layers.Conv1D(kernel_size=4, strides=1, filters=4, activation="relu"))
model.add(tf.keras.layers.Conv1D(kernel_size=3, strides=1, activation="relu", filters=3))
model.add(tf.keras.layers.Dense(128, activation="relu"))
model.add(tf.keras.layers.Conv1D(kernel_size=2, filters=2, strides=1, activation="relu"))
model.add(tf.keras.layers.Conv1D(kernel_size=1, filters=1, strides=1, activation="relu"))
model.add(tf.keras.layers.Dense(64, activation="relu"))
model.add(tf.keras.layers.MaxPool1D(pool_size=2, strides=1))
model.add(tf.keras.layers.Dense(256, activation="relu"))
model.add(tf.keras.layers.Conv1D(kernel_size=4, filters=4, strides=1, activation="relu"))
model.add(tf.keras.layers.Conv1D(kernel_size=3, filters=3, strides=1, activation="relu"))
model.add(tf.keras.layers.MaxPool1D(pool_size=2, strides=1))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(128, activation="relu"))
model.add(tf.keras.layers.Conv1D(kernel_size=2, filters=2, strides=1, activation="relu"))
model.add(tf.keras.layers.Conv1D(kernel_size=1, filters=1, strides=1, activation="relu"))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(64, activation="relu"))
model.add(tf.keras.layers.Dense(16, activation="softmax"))
Now your model architecture looks like this:
Now, your model has matching shapes and will train without any issues.
I am having an Indexing Error when running the following code:
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import tflearn.datasets.mnist as mnist
X, Y, test_x, test_y = mnist.load_data(one_hot=True)
X = X.reshape([-1, 28, 28, 1])
test_x = X.reshape([-1, 28, 28, 1])
convnet = input_data(shape=[None, 28, 28, 1], name='input')
convnet = conv_2d(convnet, 32, 2, activation='relu')
convnet = max_pool_2d(convnet,2)
convnet = conv_2d(convnet, 64, 2, activation='relu')
convnet = max_pool_2d(convnet,2)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 10, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet)
model.fit({'input':X},{'targets':Y}, n_epoch=10,
validation_set=({'input':test_x},{'targets':test_y}),
snapshot_step=500, show_metric=True, run_id='mnist')
model.save('tflearncnn.model')
I cannot figure out how to make the index larger than 0-9999 (10000) as i am not sure where the error is occurring.
here is the error in my Terminal:
---------------------------------
Run id: mnist
Log directory: /tmp/tflearn_logs/
---------------------------------
Training samples: 55000
Validation samples: 55000
--
Exception in thread Thread-5:oss: 0.13790 | time: 29.813s
Traceback (most recent call last):0 - acc: 0.9592 -- iter: 31936/55000
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/home/usr/.local/lib/python3.6/site-packages/tflearn/data_flow.py", line 187, in fill_feed_dict_queue
data = self.retrieve_data(batch_ids)
File "/home/usr/.local/lib/python3.6/site-packages/tflearn/data_flow.py", line 222, in retrieve_data
utils.slice_array(self.feed_dict[key], batch_ids)
File "/home/usr/.local/lib/python3.6/site-packages/tflearn/utils.py", line 187, in slice_array
return X[start]
IndexError: index 10000 is out of bounds for axis 0 with size 10000
this happens when i reach the point where a new Epoch is supposed to start as shown when step 499 is reached:
---------------------------------
Run id: mnist
Log directory: /tmp/tflearn_logs/
---------------------------------
Training samples: 55000
Validation samples: 55000
--
Training Step: 499 | total loss: 0.12698 | time: 27.880s
| Adam | epoch: 001 | loss: 0.12698 - acc: 0.9616 -- iter: 31936/55000
I have tried the following:
-Changing size of snapshot_steps
-changing the size of n_units in fully_connected()
-changing the nb_filter in conv_2d
This is just your typo
test_x = X.reshape([-1, 28, 28, 1])
test_x = test_x.reshape([-1, 28, 28, 1])