I am learning about neural networks with Kaggle tutorials. I have made a neural net to predict concrete strength and I want to display the MSE (for starters) metric after fitting the model. I have failed both with print(metrics) and plotting the metrics (displays an empty graph).
df = concrete.copy()
df_train = df.sample(frac=0.7, random_state=0)
df_valid = df.drop(df_train.index)
X_train = df_train.drop('CompressiveStrength', axis=1)
X_valid = df_valid.drop('CompressiveStrength', axis=1)
y_train = df_train['CompressiveStrength']
y_valid = df_valid['CompressiveStrength']
model = keras.Sequential([
layers.BatchNormalization(),
layers.Dense(512, activation='relu', input_shape=input_shape),
layers.BatchNormalization(),
layers.Dense(512, activation='relu'),
layers.Dropout(rate=0.3), # apply 30% dropout to the next layer
layers.Dense(512, activation='relu'),
layers.BatchNormalization(),
layers.Dense(512, activation='relu'),
layers.BatchNormalization(),
layers.Dense(1),
])
model.compile(
optimizer='sgd', # SGD is more sensitive to differences of scale
loss='mse',
metrics=[tf.keras.metrics.MeanSquaredError()]
)
history = model.fit(
X_train, y_train,
validation_data=(X_valid, y_valid),
batch_size=64,
epochs=100,
verbose=0,
callbacks=[early_stopping],
)
print(history)
pyplot.plot(history.history['mean_squared_error'])
Related
I wanted to use CNN for the classification of my dataset which is numerical dataset. My dataset is 3200x36 size. Whenever I used the following code and passed my data, I did not get any result. For the accuracy, it just runs but do not output anything. What did I do wrong, Please explain.
x_train, x_test, y_train, y_test = train_test_split(feature, target, train_size=0.75)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
model = Sequential()
model.add(Conv2D(filters=64, kernel_size=3, activation='relu'))
model.add(MaxPooling2D(pool_size=4))
model.add(Dense(3, activation='softmax'))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=100, batch_size=8149, verbose=1)
accuracy = model.evaluate(x_test, y_test)
print( model.evaluate(x_test, y_test))
MODEL CNN
create a list of the target columns
target_cols = [y_toxic,y_severe_toxic,y_obscene,y_threat,y_insult,y_identity_hate]
preds = []
for col in target_cols:
print('\n')
# set the value of y
y = col
# create a stratified split
X_train, X_eval, y_train ,y_eval = train_test_split(X, y,test_size=0.25,shuffle=True,
random_state=5,stratify=y)
# cnn model
model = Sequential()
e = Embedding(189722, 100, weights=[embedding_matrix],
input_length=500, trainable=False)
model.add(e)
model.add(Conv1D(128, 3, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Dropout(0.2))
model.add(Conv1D(64, 3, activation='relu'))
model.add(MaxPooling1D(3))
model.add(Dropout(0.2))
model.add(Conv1D(64, 3, activation='relu'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
# compile the model
Adam_opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(optimizer=Adam_opt, loss='binary_crossentropy', metrics=['acc'])
early_stopping = EarlyStopping(monitor='val_loss', patience=5, mode='min')
save_best = ModelCheckpoint('toxic.hdf', save_best_only=True,
monitor='val_loss', mode='min')
history = model.fit(X_train, y_train, validation_data=(X_eval, y_eval),
epochs=100, verbose=1,callbacks=[early_stopping,save_best])
# make a prediction on y (target column)
model.load_weights(filepath = 'toxic.hdf')
predictions = model.predict(X_test)
y_preds = predictions[:,0]
# append the prediction to a python list
preds.append(y_preds)
let me know this why it getting alike.... I tried ot reshape it still the same error
I'm using keras to perform multilabel classification. I'm using 'binary_crossentropy' as loss function, metrics=['accuracy'], 'sigmoid' as activation.
During the training I saw accuracy above the 90%, and also using evaluate on the test set I have something similar.
If I try to manually compute the accuracy using predict module the accuracy also on the training set dramatically leaves 45%.
This is the model:
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(8, kernel_size=3, strides=2, activation='relu', input_shape=(N_qubits, N_qubits,2)))
model.add(keras.layers.Conv2D(16, kernel_size=2, activation='relu'))
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(128, activation='relu'))
model.add(keras.layers.Conv2D(32, kernel_size=1, activation='relu'))
model.add(keras.layers.GlobalMaxPooling2D())
model.add(keras.layers.Dense(units=y_train.shape[1], activation='sigmoid'))
adam_optimizer = keras.optimizers.adam()
model.compile(loss='binary_crossentropy', optimizer=adam_optimizer, metrics=['accuracy'])
history = model.fit(X_train, y_train, batch_size=150, epochs=1000, verbose=1, validation_split=0.05)
Here where I use evaluate()
results = model.evaluate(X_test, y_test, batch_size=128)
Here when I use predict()
y_pred_train = model.predict(X_train)
y_pred_test = model.predict(X_test)
I am new to ML I receive the error when I try to fit my model. I am trying to train a cat classifier.
Defining the new model
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=1e-4),
metrics=['acc'])
# All images will be rescaled by 1./255
train_datagen = ImageDataGenerator(rescale=1./255)
test_datagen = ImageDataGenerator(rescale=1./255)
# Flow training images in batches of 20 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # This is the source directory for training images
target_size=(250, 250), # All images will be resized to 150x150
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
Flow validation images in batches of 20 using test_datagen generator
validation_generator = test_datagen.flow_from_directory(
validation_dir,
target_size=(250, 250),
batch_size=20,
class_mode='binary')
Fir the model on data
history = model.fit_generator(
train_generator,
steps_per_epoch=100, # 2000 images = batch_size * steps
epochs=100,
validation_data=validation_generator,
validation_steps=50, # 1000 images = batch_size * steps
verbose=2)
Here I am trying to fit the model but I end up with an error of input. Please check where did I went wrong.
This error can be removed if you do 2 changes at your code here:
target_size=(150, 150) even your comment say so. So why you are trying to use 250 I don't understand
Recently I just started learning to implement neural network using Keras, and I tried to implement Le-Net5 NN for MNIST problem based on the structure listed as below.
Code:
# Load the data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Parameter set up
input_shape = (32,32,1)
batch_size = 128
# Format the image info
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
x_train = np.pad(x_train, ((0,0),(2,2),(2,2),(0,0)), 'constant')
x_test = np.pad(x_test, ((0,0),(2,2),(2,2),(0,0)), 'constant')
# Encode label
y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Construct the model
model = keras.Sequential()
conv_stride = 1
pooling_stride = 2
model.add(layers.Conv2D(filters=6, kernel_size=[5,5], input_shape=input_shape, padding="valid", strides=[conv_stride,conv_stride], activation='tanh'))
model.add(layers.AveragePooling2D(pool_size=[2,2], padding="valid", strides=pooling_stride))
model.add(layers.Conv2D(filters=16, kernel_size=[5,5], padding="valid", strides=[conv_stride,conv_stride],activation='tanh'))
model.add(layers.AveragePooling2D(pool_size=[2,2], padding="valid", strides=pooling_stride))
model.add(layers.Conv2D(filters=120, kernel_size=[5,5], padding="valid", strides=[conv_stride,conv_stride], activation='tanh'))
model.add(layers.Flatten())
model.add(layers.Dense(84, activation='tanh'))
model.add(layers.Dense(10, activation=tf.nn.softmax))
print(model.summary())
model.compile(optimizer="sgd", loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=batch_size, verbose=1, epochs=12)
score = model.evaluate(x_test, y_test, verbose=1)
print("Test Accuracy: ", score[1])
However, there's an error when I run the program, the error message is:
Matrix size-incompatible: In[0]: [128,1536], In[1]: [1176,200]
[[{{node dense/MatMul}} = MatMul[T=DT_FLOAT, _class=["loc:#training/Adam/gradients/dense/MatMul_grad/MatMul"], transpose_a=false, transpose_b=false, _device="/job:localhost/replica:0/task:0/device:CPU:0"](flatten/Reshape, dense/MatMul/ReadVariableOp)]]
I have checked the structure using model.summary() and it seems like the neural network's structure is correct. Can anybody tell me which part leads to the error?