Face Generation TensorFlow increase size from 28px conv2d_transpose - python-3.x

I am following this tutorial and can successfully generate 28px sized faces using my GPU. However I do not know how to increase the size of the face (currently 28px) with the logic of the generator function below:
def generator(z, out_channel_dim, is_train=True, alpha=0.2, keep_prob=0.5):
with tf.variable_scope('generator', reuse=(not is_train)):
# First fully connected layer, 4x4x1024
fc = tf.layers.dense(z, 4 * 4 * 1024, use_bias=False)
fc = tf.reshape(fc, (-1, 4, 4, 1024))
bn0 = tf.layers.batch_normalization(fc, training=is_train)
lrelu0 = tf.maximum(alpha * bn0, bn0)
drop0 = tf.layers.dropout(lrelu0, keep_prob, training=is_train)
# Deconvolution, 7x7x512
conv1 = tf.layers.conv2d_transpose(drop0, 512, 4, 1, 'valid', use_bias=False)
bn1 = tf.layers.batch_normalization(conv1, training=is_train)
lrelu1 = tf.maximum(alpha * bn1, bn1)
drop1 = tf.layers.dropout(lrelu1, keep_prob, training=is_train)
# Deconvolution, 14x14x256
conv2 = tf.layers.conv2d_transpose(drop1, 256, 5, 2, 'same', use_bias=False)
bn2 = tf.layers.batch_normalization(conv2, training=is_train)
lrelu2 = tf.maximum(alpha * bn2, bn2)
drop2 = tf.layers.dropout(lrelu2, keep_prob, training=is_train)
# Output layer, 28x28xn
logits = tf.layers.conv2d_transpose(drop2, out_channel_dim, 5, 2, 'same')
out = tf.tanh(logits)
return out
I assume I will need to change something in the line of:
conv2 = tf.layers.conv2d_transpose(drop1, 256, 5, 2, 'same', use_bias=False)

The change should be made on the last layer logits since that is the layer which determine the size of the output. changing out_channel_dim value to the size you want will be the solution. This may cause some error (due to the kernel size and stride values) or the result will not be the same (since your network is not trained for this size).

Related

RuntimeError: mat1 and mat2 shapes cannot be multiplied (4x32 and 400x120)

class Net(nn.Module):
def __init__(self):
super().__init__()
#(input channel, output channel, kenel size)
#channel is a dimension of a tensor which is a container that can house data in N dimensions (matrices)
self.conv1 = nn.Conv2d(3, 6, 5)
#shrink the image stack by pooling(kernel size, stride(shift)) and take max value per window
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
#TODO: add conv3
self.conv3 = nn.Conv2d(16, 32, 5)
#drop layer deletes 20% of the feautures to help prevent overfitting
self.drop = nn.Dropout2d(p=0.2)
#linear predicts the output as a linear function of inputs
#(output channels, height, width, batch size
#TODO:
self.fc1 = nn.Linear(16 * 16 * 5, 120)
#TODO:
self.fc1_5 = nn.Linear()
#layer(size of input, size of output)
#Linear layer=Fully connected layer
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
#F.ReLUs change negative values to 0. Apply to all stack of images.
#they are activation functions. We apply it after each liner layer.
#only used in hidden layers.
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
#Select some feautures to drop after 3rd conv to prevent overfitting
x = self.drop(F.relu(self.conv3(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch into 1-D
x = F.relu(self.fc1(x))
#TODO: add fc1_5
x = F.relu(self.fc1_5(x))
x = F.relu(self.fc2(x))
#Feed to Fully connected layer to predict class
x = self.fc3(x) # no relu b/c it's a last layer.
return x
I am using images from CIFAR10 which are of size 3x32x32.
When I ran the code before, it stopped because self.fc1 linear layer size did not work with self.conv3 I've added.
I'm also not sure what to write for self.fc1_5.
Can someone explain me how this is actually working and the solution as well?
Thank you!
I have added an extra convolutional layer and you can see it is
self.conv3 = nn.Conv2d(16, 32, 5).
Lines under the TODO are where I'm stuck at.
I updated the line to:
self.fc1 = nn.Linear(16 * 16 * 5, 120)
before, it was:
self.fc1 = nn.Linear(16 * 5 * 5, 120).
When you create a CNN for classification with a fixed input size, it's easy to figure out the size of your image by the time it has progressed through your CNN layers. Since we start with images of size [32,32] (channels are unimportant for now):
def __init__(self):
super().__init__()
#(input channel, output channel, kenel size)
#channel is a dimension of a tensor which is a container that can house data in N dimensions (matrices)
self.conv1 = nn.Conv2d(3, 6, 5) # size 28x28 - lose 2 px from each side with a kernel of size 5
#shrink the image stack by pooling(kernel size, stride(shift)) and take max value per window
self.pool = nn.MaxPool2d(2, 2) # size 14x14 - max pooling with K=2 halves the image size
self.conv2 = nn.Conv2d(6, 16, 5) # size 10x10 -> 5x5 after pooling
#TODO: add conv3
self.conv3 = nn.Conv2d(16, 32, 5) # size 1x1
#drop layer deletes 20% of the feautures to help prevent overfitting
self.drop = nn.Dropout2d(p=0.2)
#linear predicts the output as a linear function of inputs
#(output channels, height, width, batch size
self.fc1 = nn.Linear(1 * 1 * 32, 120)
self.fc1_5 = nn.Linear(120,120) # matches the output size of fc1 and input size of fc2
The CNN size losses can be negated by using padding of (K-1)//2, where K=kernel_size.

Keras OOM when allocating tensor with shape

I have been scratching my head over this OOM Error for days and I am new to Keras. I have tried sampling down my data, lowering batch size, and removing layers from 3D-Unet but nothing is working for me. I am using LIDC IDRI dataset of CT scans of 1010 Patients. After pre-processing I save my volumes of 64x64x64 shape on disk which I extracted from resampled 256x256x256 whole CT scans (That is because at first I was first trying to train on whole CT scans but after getting OOM I decided to go with 64 cubic size shapes). Each patient has 64 shapes of 64x64x64, and in total that makes 64,640 samples on which I have to train my 3D-Unet.
Here’s my Keras code for the model:
im_width = 64
im_height = 64
im_depth = 64
path_train = 'D:/LIDC-IDRI-Dataset/'
def npz_volume_generator(inputPath, bs, mode="train", aug=None):
batch_start_index = 0
patients = os.listdir(inputPath + "images")
# loop indefinitely
while True:
# initialize our batches of scans and masks
scan_pixels = []
mask_pixels = []
# keep looping until we reach our batch size
for id_ in range(batch_start_index, batch_start_index+bs):
# attempt to read the next sample from path
scan_pixel = np.zeros((im_depth, im_width, im_height))
scan_pixel = np.load(inputPath + 'images/' + patients[id_])['arr_0']
mask_pixel = np.zeros((im_depth, im_width, im_height))
mask_pixel = np.load(inputPath + 'masks/' + patients[id_])['arr_0']
# check to see if we have reached the end of our samples
if(batch_start_index >= len(patients)):
# reset the batch start index to the beginning of our samples
batch_start_index -= len(patients)
# if we are evaluating we should now break from our
# loop to ensure we don't continue to fill up the
# batch from samples from the beginning
if mode == "eval":
break
# update our corresponding batch lists
scan_pixels.append(scan_pixel)
mask_pixels.append(mask_pixel)
batch_start_index += bs
if(batch_start_index >= len(patients)):
batch_start_index -= len(patients)
# if the data augmentation object is not None, apply it
if aug is not None:
(scan_pixels, mask_pixels) = next(aug.flow(np.array(scan_pixels),np.array(mask_pixels), batch_size=bs))
#Re-shaping and adding a channel dimension (5D Tensor)
#batch_size, length, breadth, height, channel [None,im_width,im_height,im_depth,1]
#yield the batch to the calling function
yield (np.array(expand_dims(scan_pixels, axis=4)), np.array(expand_dims(mask_pixels, axis=4)))
def conv3d_block(input_tensor, n_filters, kernel_size=3, batchnorm=True):
# first layer
x = Conv3D(filters=n_filters, kernel_size=(kernel_size, kernel_size, kernel_size), kernel_initializer="he_normal",
padding="same")(input_tensor)
if batchnorm:
x = BatchNormalization()(x)
x = Activation("relu")(x)
# second layer
x = Conv3D(filters=n_filters, kernel_size=(kernel_size, kernel_size, kernel_size), kernel_initializer="he_normal",
padding="same")(x)
if batchnorm:
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def get_unet(input_img, n_filters=16, dropout=0.5, batchnorm=True):
# contracting path
c1 = conv3d_block(input_img, n_filters=n_filters*1, kernel_size=3, batchnorm=batchnorm)
p1 = MaxPooling3D((2, 2, 2)) (c1)
p1 = Dropout(dropout*0.5)(p1)
c2 = conv3d_block(p1, n_filters=n_filters*2, kernel_size=3, batchnorm=batchnorm)
p2 = MaxPooling3D((2, 2, 2)) (c2)
p2 = Dropout(dropout)(p2)
c3 = conv3d_block(p2, n_filters=n_filters*4, kernel_size=3, batchnorm=batchnorm)
p3 = MaxPooling3D((2, 2, 2)) (c3)
p3 = Dropout(dropout)(p3)
c4 = conv3d_block(p3, n_filters=n_filters*16, kernel_size=3, batchnorm=batchnorm)
# expansive path
u5 = Conv3DTranspose(n_filters*8, (3, 3, 3), strides=(2, 2, 2), padding='same') (c4)
u5 = concatenate([u5, c3])
u5 = Dropout(dropout)(u5)
c5 = conv3d_block(u5, n_filters=n_filters*8, kernel_size=3, batchnorm=batchnorm)
u6 = Conv3DTranspose(n_filters*4, (3, 3, 3), strides=(2, 2, 2), padding='same') (c5)
u6 = concatenate([u6, c2])
u6 = Dropout(dropout)(u6)
c6 = conv3d_block(u6, n_filters=n_filters*4, kernel_size=3, batchnorm=batchnorm)
u7 = Conv3DTranspose(n_filters*2, (3, 3,3), strides=(2, 2, 2), padding='same') (c6)
u7 = concatenate([u7, c1])
u7 = Dropout(dropout)(u7)
c7 = conv3d_block(u7, n_filters=n_filters*2, kernel_size=3, batchnorm=batchnorm)
outputs = Conv3D(1, (1, 1, 1), activation='sigmoid') (c7)
model = Model(inputs=[input_img], outputs=[outputs])
return model
# initialize the number of epochs to train for and batch size
NUM_EPOCHS = 50
BS = 8
# initialize the total number of training and testing image
NUM_TRAIN_IMAGES = len(os.listdir(path_train+ 'images/'))
NUM_TEST_IMAGES = len(os.listdir(path_train+ 'test/'))
# construct the training image generator for data augmentation
aug = ImageDataGenerator(rotation_range=20, zoom_range=0.15,
width_shift_range=0.2, height_shift_range=0.2, shear_range=0.15,
horizontal_flip=True, fill_mode="nearest")
# initialize both the training and testing image generators
trainGen = npz_volume_generator(path_train, BS, mode="train", aug=aug)
testGen = npz_volume_generator(path_train, BS, mode="train", aug=None)
# initialize our Keras model and compile it
model = get_unet(Input((im_depth, im_width, im_height, 1)), n_filters=16, dropout=0.05, batchnorm=True)
print(model.summary())
model.compile(optimizer=Adam(), loss="binary_crossentropy", metrics=["accuracy"])
# train the network
print("[INFO] training w/ generator...")
H = model.fit_generator(trainGen, steps_per_epoch=NUM_TRAIN_IMAGES // BS,
validation_data=testGen, validation_steps=NUM_TEST_IMAGES // BS,
epochs=NUM_EPOCHS)
There are two issues with the output I get. The first warning I get is this:
\Anaconda3\lib\site-packages\keras_preprocessing\image\numpy_array_iterator.py:127: UserWarning: NumpyArrayIterator is set to use the data format convention "channels_last" (channels on axis 3), i.e. expected either 1, 3, or 4 channels on axis 3. However, it was passed an array with shape (8, 64, 64, 64) (64 channels). str(self.x.shape[channels_axis]) + ' channels).')
It states that the shape passed to Keras library was (8, 64, 64, 64) (64 channels), however the input shape I declared in Input() function of Keras is (64, 64, 64, 1) with 1 being the channel on last axis, you don’t declare batch size here which is 8 in my case, yet Keras state that the shape passed on to it has 64 channels, ignoring the last dimension I gave it.
The second error that I get is as following:
ResourceExhaustedError: OOM when allocating tensor with shape[8,32,64,64,64] and type float on /job:localhost/replica:0/task:0/device:GPU:0 by allocator GPU_0_bfc
[[{{node conv3d_transpose_3/conv3d_transpose}}]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
[[{{node loss/mul}}]]
Hint: If you want to see a list of allocated tensors when OOM happens, add report_tensor_allocations_upon_oom to RunOptions for current allocation info.
Again I have a problem with shape here, Tensor Shape. My shape should be (8,64,64,64,1) but what it reports is (8,32,64,64,64), not only my number of channels are huge here but I also have no idea where that 32 came from. Is there a different interpretation to Tensor Shape? I think there’s something wrong with my input shapes (which is unknowingly being to set to very large) and that is causing the OOM error.

Keras layer asks for different shape than in the summary

I'm writing a U-net CNN in keras, and trying to use fit_generator for training. In order for this to work, I used a generator script, that could feed the images and labels for my network (simple fit function is working but I want to train a big dataset which cannot fit into the memory).
My problem is that in the model summary, it says correctly that, the output layer has a shape: (None, 288, 512, 4)
https://i.imgur.com/69xG8pO.jpg
but when I try actual training I get this error:
https://i.imgur.com/j7H6sHX.jpg
I don't get why keras wants (288, 512, 1) when in the summary it expects (288, 512, 4)
I tried it with my own unet code, and copied a working code from github also, but both of them has the exact same problem which leads me to believe that my generator script is the weak link. Below is the code I used (the image and label array functions used here were already working when I used them with "fit" in a previous CNN):
def generator(img_path, label_path, batch_size, height, width, num_classes):
input_pairs = get_pairs(img_path, label_path) # rewrite if param name changes
random.shuffle(input_pairs)
iterate_pairs = itertools.cycle(input_pairs)
while True:
X = []
Y = []
for _ in range(batch_size):
im, lab = next(iterate_pairs)
appended_im = next(iter(im))
appended_lab = next(iter(lab))
X.append(input_image_array(appended_im, width, height))
Y.append(input_label_array(appended_lab, width, height, num_classes, palette))
yield (np.array(X), np.array(Y))
I tried the generator out and the provided batches has the shapes of (for batch size of 15):
(15, 288, 512, 3)
(15, 288, 512, 4)
So I really do not know what could be the problem here.
EDIT: Here is the model code I used:
def conv_block(input_tensor, n_filter, kernel=(3, 3), padding='same', initializer="he_normal"):
x = Conv2D(n_filter, kernel, padding=padding, kernel_initializer=initializer)(input_tensor)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = Conv2D(n_filter, kernel, padding=padding, kernel_initializer=initializer)(x)
x = BatchNormalization()(x)
x = Activation("relu")(x)
return x
def deconv_block(input_tensor, residual, n_filter, kernel=(3, 3), strides=(2, 2), padding='same'):
y = Conv2DTranspose(n_filter, kernel, strides, padding)(input_tensor)
y = concatenate([y, residual], axis=3)
y = conv_block(y, n_filter)
return y
# NETWORK - n_classes is the desired number of classes, filters are fixed
def Unet(input_height, input_width, n_classes=4, filters=64):
# Downsampling
input_layer = Input(shape=(input_height, input_width, 3), name='input')
conv_1 = conv_block(input_layer, filters)
conv_1_out = MaxPooling2D(pool_size=(2, 2))(conv_1)
conv_2 = conv_block(conv_1_out, filters*2)
conv_2_out = MaxPooling2D(pool_size=(2, 2))(conv_2)
conv_3 = conv_block(conv_2_out, filters*4)
conv_3_out = MaxPooling2D(pool_size=(2, 2))(conv_3)
conv_4 = conv_block(conv_3_out, filters*8)
conv_4_out = MaxPooling2D(pool_size=(2, 2))(conv_4)
conv_4_drop = Dropout(0.5)(conv_4_out)
conv_5 = conv_block(conv_4_drop, filters*16)
conv_5_drop = Dropout(0.5)(conv_5)
# Upsampling
deconv_1 = deconv_block(conv_5_drop, conv_4, filters*8)
deconv_1_drop = Dropout(0.5)(deconv_1)
deconv_2 = deconv_block(deconv_1_drop, conv_3, filters*4)
deconv_2_drop = Dropout(0.5)(deconv_2)
deconv_3 = deconv_block(deconv_2_drop, conv_2, filters*2)
deconv_3 = deconv_block(deconv_3, conv_1, filters)
# Output - mapping each 64-component feature vector to number of classes
output = Conv2D(n_classes, (1, 1))(deconv_3)
output = BatchNormalization()(output)
output = Activation("softmax")(output)
# embed into functional API
model = Model(inputs=input_layer, outputs=output, name="Unet")
return model
Change your loss to categorical_crossentropy.
When using the sparse_categorical_crossentropy loss, your targets
should be integer targets.

Batch Normalization doesn't have gradient in tensorflow 2.0?

I am trying to make a simple GANs to generate digits from the MNIST dataset. However when I get to training(which is custom) I get this annoying warning that I suspect is the cause of not training like I'm used to.
Keep in mind this is all in tensorflow 2.0 using it's default eager execution.
GET THE DATA(not that important)
(train_images,train_labels),(test_images,test_labels) = tf.keras.datasets.mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
train_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1]
BUFFER_SIZE = 60000
BATCH_SIZE = 256
train_dataset = tf.data.Dataset.from_tensor_slices((train_images,train_labels)).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
GENERATOR MODEL(This is where the Batch Normalization is at)
def make_generator_model():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU())
model.add(tf.keras.layers.Reshape((7, 7, 256)))
assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size
model.add(tf.keras.layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
assert model.output_shape == (None, 7, 7, 128)
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU())
model.add(tf.keras.layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 14, 14, 64)
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.LeakyReLU())
model.add(tf.keras.layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, 28, 28, 1)
return model
DISCRIMINATOR MODEL (likely not that important)
def make_discriminator_model():
model = tf.keras.Sequential()
model.add(tf.keras.layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same'))
model.add(tf.keras.layers.LeakyReLU())
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(tf.keras.layers.LeakyReLU())
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(1))
return model
INSTANTIATE THE MODELS(likely not that important)
generator = make_generator_model()
discriminator = make_discriminator_model()
DEFINE THE LOSSES(maybe the generator loss is important since that is where the gradient comes from)
def generator_loss(generated_output):
return tf.nn.sigmoid_cross_entropy_with_logits(labels = tf.ones_like(generated_output), logits = generated_output)
def discriminator_loss(real_output, generated_output):
# [1,1,...,1] with real output since it is true and we want our generated examples to look like it
real_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(real_output), logits=real_output)
# [0,0,...,0] with generated images since they are fake
generated_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(generated_output), logits=generated_output)
total_loss = real_loss + generated_loss
return total_loss
MAKE THE OPTIMIZERS(likely not important)
generator_optimizer = tf.optimizers.Adam(1e-4)
discriminator_optimizer = tf.optimizers.Adam(1e-4)
RANDOM NOISE FOR THE GENERATOR(likely not important)
EPOCHS = 50
noise_dim = 100
num_examples_to_generate = 16
# We'll re-use this random vector used to seed the generator so
# it will be easier to see the improvement over time.
random_vector_for_generation = tf.random.normal([num_examples_to_generate,
noise_dim])
A SINGLE TRAIN STEP(This is where I get the error
def train_step(images):
# generating noise from a normal distribution
noise = tf.random.normal([BATCH_SIZE, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images[0], training=True)
generated_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(generated_output)
disc_loss = discriminator_loss(real_output, generated_output)
This line >>>>>
gradients_of_generator = gen_tape.gradient(gen_loss, generator.variables)
<<<<< This line
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.variables))
THE FULL TRAIN(not important except that it calls train_step)
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for images in dataset:
train_step(images)
display.clear_output(wait=True)
generate_and_save_images(generator,
epoch + 1,
random_vector_for_generation)
# saving (checkpoint) the model every 15 epochs
if (epoch + 1) % 15 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print ('Time taken for epoch {} is {} sec'.format(epoch + 1,
time.time()-start))
# generating after the final epoch
display.clear_output(wait=True)
generate_and_save_images(generator,
epochs,
random_vector_for_generation)
BEGIN TRAINING
train(train_dataset, EPOCHS)
The error I get is as follows,
W0330 19:42:57.366302 4738405824 optimizer_v2.py:928] Gradients does
not exist for variables ['batch_normalization_v2_54/moving_mean:0',
'batch_normalization_v2_54/moving_variance:0',
'batch_normalization_v2_55/moving_mean:0',
'batch_normalization_v2_55/moving_variance:0',
'batch_normalization_v2_56/moving_mean:0',
'batch_normalization_v2_56/moving_variance:0'] when minimizing the
loss.
And I get an image from the generator which looks like this:
which is kinda what I would expect without the normalization. Everything would clump to one corner because there are extreme values.
The problem is here:
gradients_of_generator = gen_tape.gradient(gen_loss, generator.variables)
You should only be getting gradients for the trainable variables. So you should change it to
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
The same goes for the three lines following. The variables field includes stuff like the running averages batch norm uses during inference. Because they are not used during training, there are no sensible gradients defined and trying to compute them will lead to a crash.

CoreML: creating a custom layer for ONNX RandomNormal

I've trainined a VAE that in PyTorch that I need to convert to CoreML. From this thread PyTorch VAE fails conversion to onnx I was able to get the ONNX model to export, however, this just pushed the problem one step further to the ONNX-CoreML stage.
The original function that contains the torch.randn() call is the reparametrize func:
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
if self.have_cuda:
eps = torch.randn(self.bs, self.nz, device='cuda')
else:
eps = torch.randn(self.bs, self.nz)
return eps.mul(std).add_(mu)
The solution is, of course, to create a custom layer, but I'm having problems creating a layer with no inputs (i.e., it's just a randn() call).
I can get the CoreML conversion to complete with this def:
def convert_randn(node):
params = NeuralNetwork_pb2.CustomLayerParams()
params.className = "RandomNormal"
params.description = "Random normal distribution generator"
params.parameters["dtype"].intValue = node.attrs.get('dtype', 1)
params.parameters["bs"].intValue = node.attrs.get("shape")[0]
params.parameters["nz"].intValue = node.attrs.get("shape")[1]
return params
I do the conversion with:
coreml_model = convert(onnx_model, add_custom_layers=True,
image_input_names = ['input'],
custom_conversion_functions={"RandomNormal": convert_randn})
I should also note that, at the completion of the mlmodel export, the following is printed:
Custom layers have been added to the CoreML model corresponding to the
following ops in the onnx model:
1/1: op type: RandomNormal, op input names and shapes: [], op output
names and shapes: [('62', 'Shape not available')]
Bringing the .mlmodel into Xcode complains that Layer '62' of type 500 has 0 inputs but expects at least 1. So I'm wondering how to specify a kind of "dummy" input to the layer, since it doesn't actually have an input -- it's just a wrapper around torch.randn() (or, more specifically, the onnx RandonNormal op). I should clarify that I do need the whole VAE, not just the decoder, as I'm actually using the entire process to "error correct" my inputs (i.e., the encoder estimates my z vector, based on an input, then the decoder generates the closest generalizable prediction of the input).
Any help greatly appreciated.
UPDATE: Okay, I finally got a version to load in Xcode (thanks to #MattijsHollemans and his book!). The originalConversion.mlmodel is the initial output of converting my model from ONNX to CoreML. To this, I had to manually insert the input for the RandomNormal layer. I made it (64, 28, 28) for no great reason — I know my batch size is 64, and my inputs are 28 x 28 (but presumably it could also be (1, 1, 1), since it's a "dummy"):
spec = coremltools.utils.load_spec('originalConversion.mlmodel')
nn = spec.neuralNetwork
layers = {l.name:i for i,l in enumerate(nn.layers)}
layer_idx = layers["62"] # '62' is the name of the layer -- see above
layer = nn.layers[layer_idx]
layer.input.extend(["dummy_input"])
inp = spec.description.input.add()
inp.name = "dummy_input"
inp.type.multiArrayType.SetInParent()
spec.description.input[1].type.multiArrayType.shape.append(64)
spec.description.input[1].type.multiArrayType.shape.append(28)
spec.description.input[1].type.multiArrayType.shape.append(28)
spec.description.input[1].type.multiArrayType.dataType = ft.ArrayFeatureType.DOUBLE
coremltools.utils.save_spec(spec, "modelWithInsertedInput.mlmodel")
This loads in Xcode, but I have yet to test the functioning of the model in my app. Since the additional layer is simple, and the input is literally a bogus, non-functional input (just to keep Xcode happy), I don't imagine it will be a problem, but I'll post again if it doesn't run properly.
UPDATE 2: Unfortunately, the model doesn't load at runtime. It fails with [espresso] [Espresso::handle_ex_plan] exception=Failed in 2nd reshape after missing custom layer info. What I find very strange and confusing is that, inspecting model.espresso.shape, I see that almost every node has a shape like:
"62" : {
"k" : 0,
"w" : 0,
"n" : 0,
"seq" : 0,
"h" : 0
}
I have two question/concerns: 1) Most obviously, why are all the values zero (this is the case with all but the input nodes), and 2) Why does it appear to be a sequential model, when it's just a fairly conventional VAE? Opening model.espresso.shape for a fully-functioning GAN in the same app, I see that the nodes are of the format:
"54" : {
"k" : 256,
"w" : 16,
"n" : 1,
"h" : 16
}
That is, they contain reasonable shape info, and they don't have seq fields.
Very, very confused...
UPDATE 3: I've also just noticed in the compiler report the error: IMPORTANT: new sequence length computation failed, falling back to old path. Your compilation was sucessful, but please file a radar on Core ML | Neural Networks and attach the model that generated this message.
Here's the original PyTorch model:
class VAE(nn.Module):
def __init__(self, bs, nz):
super(VAE, self).__init__()
self.nz = nz
self.bs = bs
self.encoder = nn.Sequential(
# input is (nc) x 28 x 28
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# size = (ndf) x 14 x 14
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# size = (ndf*2) x 7 x 7
nn.Conv2d(ndf * 2, ndf * 4, 3, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# size = (ndf*4) x 4 x 4
nn.Conv2d(ndf * 4, 1024, 4, 1, 0, bias=False),
nn.LeakyReLU(0.2, inplace=True),
)
self.decoder = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d( 1024, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# size = (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 3, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# size = (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# size = (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, nc, 4, 2, 1, bias=False),
nn.Sigmoid()
)
self.fc1 = nn.Linear(1024, 512)
self.fc21 = nn.Linear(512, nz)
self.fc22 = nn.Linear(512, nz)
self.fc3 = nn.Linear(nz, 512)
self.fc4 = nn.Linear(512, 1024)
self.lrelu = nn.LeakyReLU()
self.relu = nn.ReLU()
def encode(self, x):
conv = self.encoder(x);
h1 = self.fc1(conv.view(-1, 1024))
return self.fc21(h1), self.fc22(h1)
def decode(self, z):
h3 = self.relu(self.fc3(z))
deconv_input = self.fc4(h3)
deconv_input = deconv_input.view(-1,1024,1,1)
return self.decoder(deconv_input)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.randn(self.bs, self.nz, device='cuda') # needs custom layer!
return eps.mul(std).add_(mu)
def forward(self, x):
# print("x", x.size())
mu, logvar = self.encode(x)
z = self.reparametrize(mu, logvar)
decoded = self.decode(z)
return decoded, mu, logvar
To add an input to your Core ML model, you can do the following from Python:
import coremltools
spec = coremltools.utils.load_spec("YourModel.mlmodel")
nn = spec.neuralNetworkClassifier # or just spec.neuralNetwork
layers = {l.name:i for i,l in enumerate(nn.layers)}
layer_idx = layers["your_custom_layer"]
layer = nn.layers[layer_idx]
layer.input.extend(["dummy_input"])
inp = spec.description.input.add()
inp.name = "dummy_input"
inp.type.doubleType.SetInParent()
coremltools.utils.save_spec(spec, "NewModel.mlmodel")
Here, "your_custom_layer" is the name of the layer you want to add the dummy input to. In your model it looks like it's called 62. You can look at the layers dictionary to see the names of all the layers in the model.
Notes:
If your model is not a classifier, use nn = spec.neuralNetwork instead of neuralNetworkClassifier.
I made the new dummy input have the type "double". That means your custom layer gets a double value as input.
You need to specify a value for this dummy input when using the model.

Resources