pool_2d in Theano throws dimention mismatch error - theano

While trying out the 'pool_2d' method of Theano, I got the error "Wrong number of dimensions: expected 4, got 1 with shape (16, )". The code is given below.
from theano import tensor, shared, function
from theano.tensor.signal.pool import pool_2d
import numpy
class Test:
def __init__(self):
i = tensor.dtensor4()
o = pool_2d(input = i, ws = (2, 2), ignore_border = True, stride = None, pad = (0, 0), mode = 'max')
self.pool = function([i], o)
def pool(self, iput):
iput = numpy.array(iput)
iput.shape = (1, 1, 4, 4)
print(self.pool(iput))
x = Test()
x.pool([1.,2.,3.,4.,5.,6.,7.,8.,9.,10.,11.,12.,13.,14.,15.,16.])
What is the reason for the error here?

It was a stupid mistake I did. I assigned the Theano function to self.pool variable and then created another method with the same name as 'pool' causing name clash.

Related

How to fix this SettingWithCopyWarning with pd.DataFrame.apply()?

I've a dataframe with 2-columns: ImageData, Label. ImageData column is of 2-D array for various dimensions. Label column is boolean True/False.
I'm trying to convert data in the "ImageData" column to 128x128 shape (along with minor other transformations). So, I'm doing following:
def convert_image_to_binary_image(img: np.ndarray, threshold: int = 1, max_value: int = 1) -> np.ndarray:
ret, bin_img = cv.threshold(img, thresh=threshold, maxval=max_value, type=cv.THRESH_BINARY)
bin_img = bin_img.astype('float32')
return bin_img
def transform_img_dimension(img: np.ndarray, target_width: int = 128, target_height: int = 128) -> np.ndarray:
img = img.astype('uint8')
bin_image = convert_image_to_binary_image(img)
bin_3dimg = tf.expand_dims(input=bin_image, axis=2)
bin_img_reshaped = tf.image.resize_with_pad(image=bin_3dimg, target_width=target_width, target_height=target_height, method="bilinear")
xformed_img = np.squeeze(bin_img_reshaped, axis=2)
# return xformed_img.copy()
return xformed_img
I'm calling apply as following:
testDF["ImageData"] = testDF.apply(lambda row: transform_img_dimension(row["ImageData"], axis=1)
But that's causing SettingWithCopyWarning.
I tried defining a wrapper function (instead of lambda) as following:
def transform_dimension(row: pd.Series, target_width: int = 128, target_height: int = 128) -> np.ndarray:
copy_row = row.copy(deep=True)
xformed_data = transform_img_dimension(copy_row["ImageData"], target_width=target_width, target_height=target_height)
del copy_row
return xformed_data
And updated the call to apply as following:
testDF["ImageData"] = testDF.apply(transform_dimension, axis=1)
However, this is not resolving the problem. What is the fix for this warning for my case?
Update 1:
If I rewrite as following, I don't get the warning
testDF2 = testDF.copy(deep=True)
testDF2["ImageData"] = testDF.apply(lambda row: transform_img_dimension(row["ImageData"], axis=1)
Is it not memory overhead now to hold 2 dataframes? Am I recommended to delete the original dataframe, testDF, now?

Problem with adding smiles on photos with convolutional autoencoder

I have a dataset with images and another dataset as it's description:
There are a lot of pictures: people with and without sunglasses, smiles and other attributes. What I want to do is be able to add smiles to photos where people are not smiling.
I've started like this:
smile_ids = attrs['Smiling'].sort_values(ascending=False).iloc[100:125].index.values
smile_data = data[smile_ids]
no_smile_ids = attrs['Smiling'].sort_values(ascending=True).head(5).index.values
no_smile_data = data[no_smile_ids]
eyeglasses_ids = attrs['Eyeglasses'].sort_values(ascending=False).head(25).index.values
eyeglasses_data = data[eyeglasses_ids]
sunglasses_ids = attrs['Sunglasses'].sort_values(ascending=False).head(5).index.values
sunglasses_data = data[sunglasses_ids]
When I print them their are fine:
plot_gallery(smile_data, IMAGE_H, IMAGE_W, n_row=5, n_col=5, with_title=True, titles=smile_ids)
Plot gallery looks like this:
def plot_gallery(images, h, w, n_row=3, n_col=6, with_title=False, titles=[]):
plt.figure(figsize=(1.5 * n_col, 1.7 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
try:
plt.imshow(images[i].reshape((h, w, 3)), cmap=plt.cm.gray, vmin=-1, vmax=1, interpolation='nearest')
if with_title:
plt.title(titles[i])
plt.xticks(())
plt.yticks(())
except:
pass
Then I do:
def to_latent(pic):
with torch.no_grad():
inputs = torch.FloatTensor(pic.reshape(-1, 45*45*3))
inputs = inputs.to('cpu')
autoencoder.eval()
output = autoencoder.encode(inputs)
return output
def from_latent(vec):
with torch.no_grad():
inputs = vec.to('cpu')
autoencoder.eval()
output = autoencoder.decode(inputs)
return output
After that:
smile_latent = to_latent(smile_data).mean(axis=0)
no_smile_latent = to_latent(no_smile_data).mean(axis=0)
sunglasses_latent = to_latent(sunglasses_data).mean(axis=0)
smile_vec = smile_latent-no_smile_latent
sunglasses_vec = sunglasses_latent - smile_latent
And finally:
def add_smile(ids):
for id in ids:
pic = data[id:id+1]
latent_vec = to_latent(pic)
latent_vec[0] += smile_vec
pic_output = from_latent(latent_vec)
pic_output = pic_output.view(-1,45,45,3).cpu()
plot_gallery([pic,pic_output], IMAGE_H, IMAGE_W, n_row=1, n_col=2)
def add_sunglasses(ids):
for id in ids:
pic = data[id:id+1]
latent_vec = to_latent(pic)
latent_vec[0] += sunglasses_vec
pic_output = from_latent(latent_vec)
pic_output = pic_output.view(-1,45,45,3).cpu()
plot_gallery([pic,pic_output], IMAGE_H, IMAGE_W, n_row=1, n_col=2)
But when I execute this line I don't get any faces:
add_smile(no_smile_ids)
The output:
Could someone please explain where is my mistake or why it can happen? Thanks for any help.
Added: checking the shape of pic_output:
Wild guess, but it seems you are broadcasting your images instead of permuting the axes. The former will have the undesired effect of mixing information across the batches/channels.
pic_output = pic_output.view(-1, 45, 45, 3).cpu()
should be replaced with
pic_output = pic_output.permute(0, 2, 3, 1).cpu()
Assuming tensor pic_output is already shaped like (-1, 3, 45, 45).

Pyomo: Param and Var not constructed when placed in a list

While modeling an optimisation problem using pyomo I noticed a weird behaviour when using a list of Var or Param: I always get the following error ValueError: Evaluating the numeric value of parameter 'SimpleParam' before the Param has been constructed (there is currently no value to return).
The following code (minise 4*x+1 such that x >= 0) runs exactly as expected:
import pyomo.environ as pyo
from pyomo.opt import SolverFactory
def _obj(model):
return model.c*model.x + 1
model = pyo.ConcreteModel()
model.x = pyo.Var(domain=pyo.NonNegativeReals)
model.c = pyo.Param(initialize=lambda model: 4, domain=pyo.NonNegativeReals)
model.obj = pyo.Objective(rule=_obj, sense=pyo.minimize)
opt = SolverFactory('glpk')
opt.solve(model)
but as I set model.x and model.c in lists, the program crashes when creating the objective function:
import pyomo.environ as pyo
from pyomo.opt import SolverFactory
def _obj(model):
return model.c[0]*model.x[0] + 1
model = pyo.ConcreteModel()
model.x = [pyo.Var(domain=pyo.NonNegativeReals)]
model.c = [pyo.Param(initialize=lambda model: 4, domain=pyo.NonNegativeReals)]
model.obj = pyo.Objective(rule=_obj, sense=pyo.minimize)
opt = SolverFactory('glpk')
opt.solve(model)
What is causing this error? Is this a desired behaviour for a reason that I don't understand or is this a bug? Anyway, how can I use lists of Params and Vars in a problem? I know that I can theoretically flatten all of my parameters and variables into a single IndexedVar or IndexedParam and handle the new indices myself, but that would be tedious since the range of the 3rd and 4th indices of my x and c depend on the 1st and 2nd index, therefore it would be a lot clearer in my code if I could use lists.
More precisely: I have a code looking like this (though I am still interested in knowning why the MWE above does not work):
# I, J are lists of indices and N is a list of integer values
model.Vs = [pyo.RangeSet(N[i]) for i in range(len(N))]
model.xs = [[pyo.Var(model.Vs[i], model.Vs[j]) for j in J] for i in I]
model.cs = [[pyo.Param(model.Vs[i], model.Vs[j]) for j in J] for i in I]
def _obj(model):
sum(model.xs[i][j][k,ell] * model.xs[i][j][k,ell] \\
for i in I for j in J \\
for k in model.Vs[i] for ell in model.Vs[j])
model.obj = Objective(rule=_obj, sense=pyo.minimize)
model.constraints = [
[pyo.Constraint(model.Vs[i], model.Vs[j], rule=...) for j in J]
for i in I
]
opt = SolverFactory('glpk')
opt.solve(model)
Your minimal example
import pyomo.environ as pyo
from pyomo.opt import SolverFactory
def _obj(model):
return model.c[0]*model.x[0] + 1
model = pyo.ConcreteModel()
model.x = [pyo.Var(domain=pyo.NonNegativeReals)]
model.c = [pyo.Param(initialize=lambda model: 4, domain=pyo.NonNegativeReals)]
model.obj = pyo.Objective(rule=_obj, sense=pyo.minimize)
opt = SolverFactory('glpk')
opt.solve(model)
generates the following error:
ValueError: Evaluating the numeric value of parameter 'SimpleParam' before
the Param has been constructed (there is currently no value to return).
The reason is because you are not directly attaching the Var and Param you are generating to the model. A lot happens when you attach a Pyomo modeling component to a Block (ConcreteModel objects are instances of constructed Blocks):
The component is assigned a name (that matches the Block attribute name)
The component is inserted into the hierarchy (basically, pointers are set so that methods can walk up and down the model hierarchy)
The component is categorized so that writers, solvers, and transformations can find it later
(If the Block has already been constructed), the component is automatically constructed.
By placing the component in a list, you are effectively "hiding" its existence from Pyomo. The first error you get has to do with this last bullet (the Param hasn't been constructed). However, simply constructing the Param and Var as you build the list will be insufficient, as the other actions won't take place and you will just hit a different error later (the next error would be an obsure one when the LP writer comes across a Var in the Objective that it had not found when it first walked the model hierarchy).
Perhaps this will help. I'm not sure I can answer why your example fails other than to say that pyomo is a modeling language that passes a structured math problem to a solver and the sets need to be discretely defined, not in lists of objects. Maybe somebody else can pitch in and explain it more clearly.
In your modeling, it appears you want to construct some kind of ragged set for x[i,j] where the range of j can vary based on i. You typically want to make sets for both I and J in order to support various constraint constructs. Then you can make a subset of "valid" (i, j) tuples for whatever model component needs to be indexed by this ragged set. You can either use this subset as the basis of iteration or use it to check membership if you are constructing things on-the-fly.
Here is an example using your list N:
import pyomo.environ as pyo
N = [1, 4, 3]
m = pyo.ConcreteModel()
m.I = pyo.Set(initialize=range(len(N)))
m.J = pyo.Set(initialize=range(max(N)))
m.IJ = pyo.Set(within=m.I * m.J, initialize =
[(i, j) for i in range(len(N)) for j in range(N[i])])
m.x = pyo.Var(m.IJ, domain=pyo.NonNegativeReals)
def _obj(model):
return sum(m.x[t] for t in m.IJ)
m.obj = pyo.Objective(rule=_obj)
def constrain_x2(model):
return sum(m.x[2, j] for j in m.J if (2, j) in m.IJ) >=1
m.c1 = pyo.Constraint(rule=constrain_x2)
m.pprint()
Yields:
4 Set Declarations
I : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(0, 2)
[0, 1, 2]
IJ : Dim=0, Dimen=2, Size=8, Domain=IJ_domain, Ordered=False, Bounds=None
[(0, 0), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2)]
IJ_domain : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
Virtual
J : Dim=0, Dimen=1, Size=4, Domain=None, Ordered=False, Bounds=(0, 3)
[0, 1, 2, 3]
1 Var Declarations
x : Size=8, Index=IJ
Key : Lower : Value : Upper : Fixed : Stale : Domain
(0, 0) : 0 : None : None : False : True : NonNegativeReals
(1, 0) : 0 : None : None : False : True : NonNegativeReals
(1, 1) : 0 : None : None : False : True : NonNegativeReals
(1, 2) : 0 : None : None : False : True : NonNegativeReals
(1, 3) : 0 : None : None : False : True : NonNegativeReals
(2, 0) : 0 : None : None : False : True : NonNegativeReals
(2, 1) : 0 : None : None : False : True : NonNegativeReals
(2, 2) : 0 : None : None : False : True : NonNegativeReals
1 Objective Declarations
obj : Size=1, Index=None, Active=True
Key : Active : Sense : Expression
None : True : minimize : x[0,0] + x[1,0] + x[1,1] + x[1,2] + x[1,3] + x[2,0] + x[2,1] + x[2,2]
1 Constraint Declarations
c1 : Size=1, Index=None, Active=True
Key : Lower : Body : Upper : Active
None : 1.0 : x[2,0] + x[2,1] + x[2,2] : +Inf : True
7 Declarations: I J IJ_domain IJ x obj c1

Finding the minimum using fmin()

I am trying to minimize the "function()" with respect to two parameters. I have done so by creating mesh arrays and used them in the above "function()" to return similar meshed array values. However, upon using "fmin()" to find the minimum, the output says that the operators could not be broadcasted.
The code is pasted below:
import numpy as np
from scipy.optimize import fmin
import matplotlib.pyplot as plt
i=0
x_values = np.arange(-10,10,2)
y_values = np.arange(-10,10,2)
x_mesh = np.empty((0,len(x_values)))
y_mesh = np.empty((0,len(y_values)))
for i in range(len(x_values)):
y_mesh = np.vstack((y_mesh, y_values))
i=0
for i in range(len(y_values)):
x_mesh = np.vstack((x_mesh, x_values))
y_mesh = np.transpose(y_mesh)
def function(x_mesh, y_mesh):
return (2*x_mesh**2 + y_mesh**2)
''' Want to minimize function '''
x_start = np.zeros((len(x_values), len(y_values)))
y_start = x_start
y = fmin(lamda x_mesh: function(x_mesh, y_mesh), (x_start, y_start), full_output = True, disp = 0)
The output shown was:
File "C:/Users/User/Documents/Year2/Programming/elrter.py", line 42, in function
return (2*x_mesh**2 + y_mesh**2)
ValueError: operands could not be broadcast together with shapes (200,) (10,10)
But why does this happen? What is the solution?

How to provide different values for the same object?

I am new to Python and trying to make a simple fishing game. I would like to have the object 'salmon' to be added to inventory more than once with different values for 'weight' and 'resist'. With the inventory.add_item function below, it appears that either only one item is being added to the inventory or the same item is added twice with the same values.
How am I able to get the same type of object with different values?
import random
class Inventory(object):
def __init__(self):
self.fishes = {}
def add_item(self, fish):
self.fishes[fish.species] = fish
def print_items(self):
print('\t'.join(['Weight', 'Resist', 'Species']))
for fish in self.fishes.values():
print('\t'.join([str(x) for x in [fish.weight, fish.resist, fish.species]]))
inventory = Inventory()
class Fish(object):
def __init__(self, weight, resist, species):
self.weight = weight
self.resist = resist
self.species = species
salmon = Fish(random.randint(2, 10), random.randint(5, 7), 'Atlantic Salmon')
print('Going fishing...\nCaught a Salmon!')
inventory.add_item(Fish(salmon.weight, salmon.resist, salmon.species))
inventory.add_item(Fish(salmon.weight, salmon.resist, salmon.species))
inventory.print_items()
Try this:
salmon = Fish(random.randint(2, 10), random.randint(5, 7), 'Atlantic Salmon')
salmon2 = Fish(random.randint(2, 10), random.randint(5, 7), 'Atlantic Salmon2')
print('Going fishing...\nCaught a Salmon!')
inventory.add_item(salmon)
inventory.add_item(salmon2)
inventory.print_items()
First the key for the dictionary must be unique:
fish.species
also the object you add, because of the key...

Resources