Keras-tuner search function throws Failed to create a NewWriteableFile error - keras

The relatively new keras-tuner module for tensorflow-2 is causing the error 'Failed to create a NewWriteableFile'. The tuner.search function is working, it is only after the trial completes that the error is thrown. This is a tutorial from the sentdex Youtube channel.
Here is the code:
from tensorflow import keras
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Activation, Flatten
from kerastuner.tuners import RandomSearch
from kerastuner.engine.hyperparameters import HyperParameters
import matplotlib.pyplot as plt
import time
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
x_train = x_train[:1000].reshape(-1, 28, 28, 1)
x_test = x_test[:100].reshape(-1, 28, 28, 1)
y_train = y_train[:1000]
y_test = y_test[:100]
# x_train = x_train.reshape(-1, 28, 28, 1)
# x_test = x_test.reshape(-1, 28, 28, 1)
LOG_DIR = f"{int(time.time())}"
def build_model(hp):
model = keras.models.Sequential()
model.add(Conv2D(hp.Int("layer1_channels", min_value=32,
max_value=256, step=32), (3,3), input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
for i in range(hp.Int("n_layers", 1, 4)):
model.add(Conv2D(hp.Int(f"conv_{i}_channels", min_value=32,
max_value=256, step=32), (3,3)))
model.add(Flatten())
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
tuner = RandomSearch(build_model,
objective = "val_accuracy",
max_trials = 1,
executions_per_trial = 1,
directory = LOG_DIR,
project_name = 'junk')
tuner.search(x_train,
y_train,
epochs=1,
batch_size=64,
validation_data=(x_test, y_test))
This is the traceback printout:
(tf_2.0) C:\Users\redex\OneDrive\Documents\Education\Sentdex Tutorials\Keras-Tuner>C:/Users/redex/Anaconda3/envs/tf_2.0/python.exe "c:/Users/redex/OneDrive/Documents/Education/Sentdex Tutorials/Keras-Tuner/keras-tuner.py"
2019-12-21 10:07:47.556531: I tensorflow/core/platform/cpu_feature_guard.cc:145] This TensorFlow binary is optimized with Intel(R) MKL-DNN to use the following CPU instructions in performance critical operations: AVX AVX2
To enable them in non-MKL-DNN operations, rebuild TensorFlow with the appropriate compiler flags.
2019-12-21 10:07:47.574699: I tensorflow/core/common_runtime/process_util.cc:115] Creating new thread pool with default inter op setting: 8. Tune using inter_op_parallelism_threads for best performance.
Train on 1000 samples, validate on 100 samples
960/1000 [===========================>..] - ETA: 0s - loss: 64.0616 - accuracy: 0.2844
2019-12-21 10:07:55.080024: W tensorflow/core/framework/op_kernel.cc:1622] OP_REQUIRES failed at save_restore_v2_ops.cc:109 : Not found: Failed to create a NewWriteableFile: 1576951667\junk\trial_c5a5436b1d28a85446ce55c8d13f9657\checkpoints\epoch_0\checkpoint_temp_8a230a5ae2d046098456d1fdfc696690/part-00000-of-00001.data-00000-of-00001.tempstate15377864750281844169 : The system cannot find the path specified.
; No such process
Traceback (most recent call last):
File "c:/Users/redex/OneDrive/Documents/Education/Sentdex Tutorials/Keras-Tuner/keras-tuner.py", line 65, in <module>
validation_data=(x_test, y_test))
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\kerastuner\engine\base_tuner.py", line 122, in search
self.run_trial(trial, *fit_args, **fit_kwargs)
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\kerastuner\engine\multi_execution_tuner.py", line 95, in run_trial
history = model.fit(*fit_args, **fit_kwargs, callbacks=callbacks)
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 728, in fit
use_multiprocessing=use_multiprocessing)
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 372, in fit
prefix='val_')
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\contextlib.py", line 119, in __exit__
next(self.gen)
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 685, in on_epoch
self.callbacks.on_epoch_end(epoch, epoch_logs)
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\tensorflow_core\python\keras\callbacks.py", line 298, in on_epoch_end
callback.on_epoch_end(epoch, logs)
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\tensorflow_core\python\keras\callbacks.py", line 965, in on_epoch_end
self._save_model(epoch=epoch, logs=logs)
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\tensorflow_core\python\keras\callbacks.py", line 999, in _save_model
self.model.save_weights(filepath, overwrite=True)
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\tensorflow_core\python\keras\engine\network.py", line 1090, in save_weights
self._trackable_saver.save(filepath, session=session)
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\tensorflow_core\python\training\tracking\util.py", line 1155, in save
file_prefix=file_prefix_tensor, object_graph_tensor=object_graph_tensor)
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\tensorflow_core\python\training\tracking\util.py", line 1103, in _save_cached_when_graph_building
save_op = saver.save(file_prefix)
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\tensorflow_core\python\training\saving\functional_saver.py", line 230, in save
sharded_saves.append(saver.save(shard_prefix))
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\tensorflow_core\python\training\saving\functional_saver.py", line 72, in save
return io_ops.save_v2(file_prefix, tensor_names, tensor_slices, tensors)
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\tensorflow_core\python\ops\gen_io_ops.py", line 1932, in save_v2
ctx=_ctx)
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\tensorflow_core\python\ops\gen_io_ops.py", line 1969, in save_v2_eager_fallback
ctx=_ctx, name=name)
File "C:\Users\redex\Anaconda3\envs\tf_2.0\lib\site-packages\tensorflow_core\python\eager\execute.py", line 67, in quick_execute
six.raise_from(core._status_to_exception(e.code, message), None)
File "<string>", line 3, in raise_from
tensorflow.python.framework.errors_impl.NotFoundError: Failed to create a NewWriteableFile: 1576951667\junk\trial_c5a5436b1d28a85446ce55c8d13f9657\checkpoints\epoch_0\checkpoint_temp_8a230a5ae2d046098456d1fdfc696690/part-00000-of-00001.data-00000-of-00001.tempstate15377864750281844169 : The system cannot find the path specified.
; No such process [Op:SaveV2]
My machine is Windows 10
The keras-tuner documentation specifies Tensorflow 2.0 and Python 3.6 but I'm using 3.7.4. I presume more recent is OK. I'm no software expert so this is about all I know, any help is appreciated.

I had the similas problem while using kerastuner in Windows and I've solved it:
The first issue is that the path to the log directory may be too long. I had to reduced it.
The second problem is that python (or tf) doens't work in Windows with mixed slashes. But kerastuner forms the path with backslashes. So I should provide the path with backslashes. I've done this with os.path.normpath() method:
tuner=RandomSearch(build_model,objective='val_accuracy',max_trials=10,directory=os.path.normpath('C:/'))
tuner.search(x_train,y_train,batch_size=256,epochs=30,validation_split=0.2,verbose=1)
Now I don't receive this error.

In my case, the path exceeds the maximum length of path in windows because the length of generated path by Keras Turner is about 170. After I make my folder shorter, it works normally.

The problem it would appear is a Windows issue. Running the same code in a Linux environment had no issue in this regard.

Related

TimeDistributed of a KerasLayer in Tensorflow 2.0

I'm trying to build a CNN + RNN using a pre-trained model from tensorflow-hub:
base_model = hub.KerasLayer('https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/4', input_shape=(244, 244, 3)
base_model.trainable = False
model = Sequential()
model.add(TimeDistributed(base_model, input_shape=(15, 244, 244, 3)))
model.add(LSTM(512))
model.add(Dense(256, activation='relu'))
model.add(Dense(3, activation='softmax'))
adam = Adam(learning_rate=learning_rate)
model.compile(loss='categorical_crossentropy' , optimizer=adam , metrics=['accuracy'])
model.summary()
and this what I get:
2020-01-29 16:1
6:37.585888: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2494000000 Hz
2020-01-29 16:16:37.586205: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x3b553f0 executing computations on platform Host. Devices:
2020-01-29 16:16:37.586231: I tensorflow/compiler/xla/service/service.cc:175] StreamExecutor device (0): Host, Default Version
Traceback (most recent call last):
File "./RNN.py", line 45, in <module>
model.add(TimeDistributed(base_model, input_shape=(None, 244, 244, 3)))
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/training/tracking/base.py", line 457, in _method_wrapper
result = method(self, *args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/sequential.py", line 178, in add
layer(x)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 842, in __call__
outputs = call_fn(cast_inputs, *args, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/layers/wrappers.py", line 256, in call
output_shape = self.compute_output_shape(input_shape).as_list()
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/layers/wrappers.py", line 210, in compute_output_shape
child_output_shape = self.layer.compute_output_shape(child_input_shape)
File "/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/keras/engine/base_layer.py", line 639, in compute_output_shape
raise NotImplementedError
NotImplementedError
any suggestions?
Is it possible to convert a KerasLayer to Conv2D,... layers?
It doesn't seem like you can use the TimeDistributed layer for this problem. However, as you don't want Resnet to train and just need the output, you can do the following to avoid the TimeDistributed layer.
Instead of model.add(TimeDistributed(base_model, input_shape=(15, 244, 244, 3))), do
Option 1
# 2048 is the output size
model.add(
Lambda(
lambda x: tf.reshape(base_model(tf.reshape(x, [-1, 244, 244,3])),[-1, 15, 2048])
, input_shape=(15, 244, 244, 3))
)
Option 2
If you don't want to depend too much on the output shape (this sacrifices performance though).
model.add(
Lambda(
lambda x: tf.stack([base_model(xx) for xx in tf.unstack(x, axis=1) ], axis=1)
, input_shape=(15, 244, 244, 3))
)
See my answer here. The error is thrown because it can't compute the output shape, you might be able to solve your problem by implementing compute_output_shape manually.
If someone faces this issue, as #josef mentioned, the problem is that compute_output_shape is not implemented. You can overcome the problem by specifying the output shape of the layer:
extractor = tfhub.KerasLayer("https://tfhub.dev/google/imagenet/mobilenet_v2_100_224/feature_vector/4",
input_shape=(IMG_SIZE, IMG_SIZE, CHANNELS),
output_shape=(EXTRACTOR_SIZE),
trainable=False)
model.add(keras.layers.Lambda(lambda x: extractor(x)))
As you can see, I also had to wrap the layer inside of a Lambda function since it looks like you can't wrap KerasLayer directly inside of a TimeDistributed layer.
In the code, EXTRACTOR_SIZE is 1280 but that is specific of MobileNet.
This workaround worked for me.

Model.fit Value Error (Text Classification Model)

I need your help please...
I am trying go get the following Text Classification Module working:
# Train and validate model.
history = model.fit(x_train,
train_labels,
epochs=epochs,
callbacks=callbacks,
validation_data=(x_val, val_labels),
verbose=2,
batch_size=batch_size) # Logs once per epoch.
Source File Can be Found Here: Google - Git Hub Text Classification Code
However I am getting the following error on execution:
Traceback (most recent call last):
File "train_ngram_model.py", line 113, in <module>
train_ngram_model(data)
File "train_ngram_model.py", line 93, in train_ngram_model
batch_size=batch_size) # Logs once per epoch.
File "C:\Users\joebloggs\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training.py", line 819, in fit
use_multiprocessing=use_multiprocessing)
File "C:\Users\joebloggs\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 235, in fit
use_multiprocessing=use_multiprocessing)
File "C:\Users\joebloggs\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 593, in _process_training_inputs
use_multiprocessing=use_multiprocessing)
File "C:\Users\joebloggs\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 646, in _process_inputs
x, y, sample_weight=sample_weights)
File "C:\Users\joebloggs\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training.py", line 2383, in _standardize_user_data
batch_size=batch_size)
File "C:\Users\joebloggs\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training.py", line 2428, in _standardize_tensors
converted_x.append(_convert_scipy_sparse_tensor(a, b))
File "C:\Users\joebloggs\AppData\Roaming\Python\Python37\site-packages\tensorflow_core\python\keras\engine\training.py", line 3198, in _convert_scipy_sparse_tensor
raise ValueError('A SciPy sparse matrix was passed to a model '
ValueError: A SciPy sparse matrix was passed to a model that expects dense inputs. Please densify your inputs first, such as by calling `x.toarray()`.
I have spent several hours now to find a solution, and I haven't gotten anywhere.
Thank you in advance for your reply.

ValueError:Layer conv1d was called with an input that isn't a symbolic tensor.All inputs to the layer should be tensors

I built this model and it was working fine.
###Building the Model.
input_layer= Embedding(num_words, 300, input_length=35, weights=[embedding_matrix],trainable=True)
conv_blocks = []
filter_sizes = (2,3,4)
for fx in filter_sizes:
conv_layer= Conv1D(100, kernel_size=fx, activation='relu', data_format='channels_first')(input_layer)
maxpool_layer = MaxPooling1D(pool_size=4)(conv_layer)
flat_layer= Flatten()(maxpool_layer)
conv_blocks.append(flat_layer)
#conc_layer=concatenate(conv_blocks, axis=1)
conc_layer=Concatenate(axis=-1)([conv_blocks])
graph = Model(inputs=input_layer, outputs=conc_layer)
model = Sequential()
model.add(graph)
model.add(Dropout(0.2))
model.add(Dense(3, activation='sigmoid'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
I recently reran it and I'm getting an error
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/am/embassy/vol/x6/jetbrains/apps/PyCharm-P/ch-0/191.6183.50/helpers/pydev/_pydev_bundle/pydev_umd.py", line 197, in runfile
pydev_imports.execfile(filename, global_vars, local_vars) # execute the script
File "/am/embassy/vol/x6/jetbrains/apps/PyCharm-P/ch-0/191.6183.50/helpers/pydev/_pydev_imps/_pydev_execfile.py", line 18, in execfile
exec(compile(contents+"\n", file, 'exec'), glob, loc)
File "/home/kosimadukwe/PycharmProjects/untitled/WordEmb.py", line 128, in <module>
conv_layer= Conv1D(100, kernel_size=fx, activation='relu', data_format='channels_first')(input_layer) #filters=100, kernel_size=3
File "/home/kosimadukwe/PycharmProjects/untitled/venv/lib/python3.7/site-packages/keras/engine/base_layer.py", line 414, in __call__
self.assert_input_compatibility(inputs)
File "/home/kosimadukwe/PycharmProjects/untitled/venv/lib/python3.7/site-packages/keras/engine/base_layer.py", line 285, in assert_input_compatibility
str(inputs) + '. All inputs to the layer '
ValueError: Layer conv1d_1 was called with an input that isn't a symbolic tensor. Received type: <class 'keras.layers.embeddings.Embedding'>. Full input: [<keras.layers.embeddings.Embedding object at 0x7fae61513c18>]. All inputs to the layer should be tensors.
I have checked similar post here but none is very similar to mine. I have tried their suggestions like adding the axis to Concatenate() or using concatenate instead but nothing changed.
[embedding_matrix] is a 2d array
Error is thrown because input_layer is Layer and not Tensor.
You are passing Embedding "layer" as input to Conv1D, in this case you have not provided any input to embedding layer.
Change this one:
input_layer= Embedding(num_words, 300, input_length=35, weights=[embedding_matrix],trainable=True)
and add input tensor to this layer:
input_layer= Embedding(num_words, 300, input_length=35, weights=[embedding_matrix],trainable=True)(input_tensor)
Also I think you are trying to Concatenate outputs from three separate filters, if that is the case then:
conc_layer=Concatenate(axis=-1)([conv_blocks])
graph = Model(inputs=input_layer, outputs=conc_layer)
this part would come outside loop.

How to fix 'IndexError: tuple index out of range' in model.fit with tensorflow?

I am trying to create a CNN with Tensorflow and keras with Sequential method.
The inputs have a (size, 50, 50, 1) shape and the labels have (size,). Size is the number of data in the dataset.
The problem is, after compilation, when I call the fit method with my model, I get an index error. See the code bellow :
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(32, 4, input_shape=(50, 50, 1), activation='relu', name="conv1"))
model.add(tf.keras.layers.Conv2D(64, 3, activation='relu', name="conv2"))
model.add(tf.keras.layers.Conv2D(128, 3, activation='relu', name="conv3"))
model.add(tf.keras.layers.Flatten(name='Flatten'))
model.add(tf.keras.layers.Dense(128, activation='relu', name="d1"))
model.add(tf.keras.layers.Dense(4, activation='softmax', name="output"))
# Compile the model
model.compile(
loss="sparse_categorical_crossentropy",
optimizer="Adam",
metrics=["accuracy"]
)
model.fit(x_trains, y_labels, epochs=5, verbose=2, validation_data=0.33, shuffle=True)
And the error :
2019-09-24 13:59:40.902561: I tensorflow/core/platform/cpu_feature_guard.cc:142] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
Traceback (most recent call last):
File "C:/Users/eloim/Documents/Programmation/Python/CNN_tf/face_train_seq.py", line 49, in <module>
model.fit(x_trains, y_labels, epochs=5, verbose=2, validation_data=0.33, shuffle=True)
File "C:\Users\{}\Anaconda3\envs\CNN_tf\lib\site-packages\tensorflow_core\python\keras\engine\training.py", line 728, in fit
use_multiprocessing=use_multiprocessing)
File "C:\Users\{}\Anaconda3\envs\CNN_tf\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 224, in fit
distribution_strategy=strategy)
File "C:\Users\{}\Anaconda3\envs\CNN_tf\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 564, in _process_training_inputs
distribution_strategy=distribution_strategy)
File "C:\Users\{}\Anaconda3\envs\CNN_tf\lib\site-packages\tensorflow_core\python\keras\engine\training_v2.py", line 606, in _process_inputs
use_multiprocessing=use_multiprocessing)
File "C:\Users\{}\Anaconda3\envs\CNN_tf\lib\site-packages\tensorflow_core\python\keras\engine\data_adapter.py", line 479, in __init__
batch_size=batch_size, shuffle=shuffle, **kwargs)
File "C:\Users\{}\Anaconda3\envs\CNN_tf\lib\site-packages\tensorflow_core\python\keras\engine\data_adapter.py", line 238, in __init__
num_samples = set(int(i.shape[0]) for i in nest.flatten(inputs))
File "C:\Users\{}\Anaconda3\envs\CNN_tf\lib\site-packages\tensorflow_core\python\keras\engine\data_adapter.py", line 238, in <genexpr>
num_samples = set(int(i.shape[0]) for i in nest.flatten(inputs))
IndexError: tuple index out of range
At the line :
model.fit(x_trains, y_labels, epochs=5, verbose=2, validation_data=0.33, shuffle=True)
I do not understand the nature of this error. How can I be rid of it ?
Thanks for your help.

TensorFlow AlphaDropout: rank undefined

I am trying to set-up a neural network using TensorFlow's tf.contrib.nn.alpha_dropout (as implemented in TensorFlow version 1.12.0). Please consider the following example:
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
from tensorflow.contrib.nn import alpha_dropout
import numpy as np
N_data = 100
x_in = tf.placeholder(tf.float32, shape=[None, N_data], name="x_in")
keep_prob = tf.placeholder(tf.float32)
fc = fully_connected(inputs=x_in, num_outputs=N_data)
drop = alpha_dropout(fc, keep_prob=keep_prob)
x_out = fully_connected(inputs=drop, num_outputs=N_data)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
fd = {
x_in: np.random.rand(2, N_data),
keep_prob: 0.5,
}
output = x_out.eval(feed_dict=fd)
When evaluating the output of the dropout layer, everything seems normal, but when the output from the dropout layer is linked to a second dense layer, I get the following error message:
Traceback (most recent call last):
File "/***/problem_alpha_dropout.py", line 14, in <module>
x_out = fully_connected(inputs=drop, num_outputs=N_data)
File "/***/anaconda3/envs/TensorFlow/lib/python3.6/site-packages/tensorflow/contrib/framework/python/ops/arg_scope.py", line 182, in func_with_args
return func(*args, **current_args)
File "/***/anaconda3/envs/TensorFlow/lib/python3.6/site-packages/tensorflow/contrib/layers/python/layers/layers.py", line 1854, in fully_connected
outputs = layer.apply(inputs)
File "/***/anaconda3/envs/TensorFlow/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py", line 817, in apply
return self.__call__(inputs, *args, **kwargs)
File "/***/anaconda3/envs/TensorFlow/lib/python3.6/site-packages/tensorflow/python/layers/base.py", line 374, in __call__
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
File "/***/anaconda3/envs/TensorFlow/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py", line 730, in __call__
self._assert_input_compatibility(inputs)
File "/***/anaconda3/envs/TensorFlow/lib/python3.6/site-packages/tensorflow/python/keras/engine/base_layer.py", line 1465, in _assert_input_compatibility
self.name + ' is incompatible with the layer: '
ValueError: Input 0 of layer fully_connected_1 is incompatible with the layer: its rank is undefined, but the layer requires a defined rank.
This behaviour does not emerge when tf.contrib.nn.alpha_dropout is replaced by tf.nn.dropout (same usage).
Additional information:
TensorFlow version: 1.12.0 (GPU)
Python version: 3.6 (through Anaconda)
OS: Linux Mint
Just specify the shape of the keep_prob placeholder:
keep_prob = tf.placeholder(tf.float32, shape=())

Resources