When I run the following script, I notice the following couple of errors:
import tensorflow as tf
import numpy as np
import seaborn as sns
import random
#set random seed:
random.seed(42)
def potential(N):
points = np.random.rand(N,2)*10
values = np.array([np.exp((points[i][0]-5.0)**2 + (points[i][1]-5.0)**2) for i in range(N)])
return points, values
def init_weights(shape,var_name):
"""
Xavier initialisation of neural networks
"""
init = tf.contrib.layers.xavier_initializer()
return tf.get_variable(initializer=init,name = var_name,shape=shape)
def neural_net(X):
with tf.variable_scope("model",reuse=tf.AUTO_REUSE):
w_h = init_weights([2,10],"w_h")
w_h2 = init_weights([10,10],"w_h2")
w_o = init_weights([10,1],"w_o")
### bias terms:
bias_1 = init_weights([10],"bias_1")
bias_2 = init_weights([10],"bias_2")
bias_3 = init_weights([1],"bias_3")
h = tf.nn.relu(tf.add(tf.matmul(X, w_h),bias_1))
h2 = tf.nn.relu(tf.add(tf.matmul(h, w_h2),bias_2))
return tf.nn.relu(tf.add(tf.matmul(h2, w_o),bias_3))
X = tf.placeholder(tf.float32, [None, 2])
with tf.Session() as sess:
model = neural_net(X)
## define optimizer:
opt = tf.train.AdagradOptimizer(0.0001)
values =tf.placeholder(tf.float32, [None, 1])
squared_loss = tf.reduce_mean(tf.square(model-values))
## define model variables:
model_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,"model")
train_model = opt.minimize(squared_loss,var_list=model_vars)
sess.run(tf.global_variables_initializer())
for i in range(10):
points, val = potential(100)
train_feed = {X : points,values: val.reshape((100,1))}
sess.run(train_model,feed_dict = train_feed)
print(sess.run(model,feed_dict = {X:points}))
### plot the approximating model:
res = 0.1
xy = np.mgrid[0:10:res, 0:10:res].reshape(2,-1).T
values = sess.run(model, feed_dict={X: xy})
sns.heatmap(values.reshape((int(10/res),int(10/res))),xticklabels=False,yticklabels=False)
On the first run I get:
[nan] [nan] [nan] [nan] [nan] [nan] [nan]] Traceback (most
recent call last):
...
File
"/Users/aidanrockea/anaconda/lib/python3.6/site-packages/seaborn/matrix.py",
line 485, in heatmap
yticklabels, mask)
File
"/Users/aidanrockea/anaconda/lib/python3.6/site-packages/seaborn/matrix.py",
line 167, in init
cmap, center, robust)
File
"/Users/aidanrockea/anaconda/lib/python3.6/site-packages/seaborn/matrix.py",
line 206, in _determine_cmap_params
vmin = np.percentile(calc_data, 2) if robust else calc_data.min()
File
"/Users/aidanrockea/anaconda/lib/python3.6/site-packages/numpy/core/_methods.py",
line 29, in _amin
return umr_minimum(a, axis, None, out, keepdims)
ValueError: zero-size array to reduction operation minimum which has
no identity
On the second run I have:
ValueError: Variable model/w_h/Adagrad/ already exists, disallowed.
Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope?
It's not clear to me why I get either of these errors. Furthermore, when I use:
for i in range(10):
points, val = potential(10)
train_feed = {X : points,values: val.reshape((10,1))}
sess.run(train_model,feed_dict = train_feed)
print(sess.run(model,feed_dict = {X:points}))
I find that on the first run, I sometimes get a network that has collapsed to the constant function with output 0. Right now my hunch is that this might simply be a numerics problem but I might be wrong.
If so, it's a serious problem as the model I have used here is very simple.
Right now my hunch is that this might simply be a numerics problem
indeed, when running potential(100) I sometimes get values as large as 1E21. The largest points will dominate your loss function and will drive the network parameters.
Even when normalizing your target values e.g. to unit variance, the problem of the largest values dominating the loss would still remain (look e.g. at plt.hist(np.log(potential(100)[1]), bins = 100)).
If you can, try learning the log of val instead of val itself. Note however that then you are changing the assumption of the loss function from 'predictions follow a normal distribution around the target values' to 'log predictions follow a normal distribution around log of the target values'.
Related
I am generating some (noisy) data-points (y) with some known parameters (m,c) that represent the equation of a straight line. Using sampling-based Bayesian methods, I now want to know the true values of parameters (m,c) from the data. Therefore, I am using DE Metropolis (PyMC3) to estimate the true parameters.
I am getting theano error theano.gof.fg.MissingInputError: Input 0 of the graph (indices start from 0), used to compute sigmoid(c_interval__), was not provided and not given a value.
Theano version: 1.0.4
PyMC3 version: 3.9.1
import matplotlib.pyplot as plt
import numpy as np
import arviz as az
import pymc3
import theano.tensor as tt
from theano.compile.ops import as_op
plt.style.use("ggplot")
# define a theano Op for our likelihood function
class LogLike(tt.Op):
itypes = [tt.dvector] # expects a vector of parameter values when called
otypes = [tt.dscalar] # outputs a single scalar value (the log likelihood)
def __init__(self, loglike, data, x, sigma):
# add inputs as class attributes
self.likelihood = loglike
self.data = data
self.x = x
self.sigma = sigma
def perform(self, node, inputs, outputs):
# the method that is used when calling the Op
theta, = inputs # this will contain my variables
# call the log-likelihood function
logl = self.likelihood(theta, self.x, self.data, self.sigma)
outputs[0][0] = np.array(logl) # output the log-likelihood
def my_model(theta, x):
y = theta[0]*x + theta[1]
return y
def my_loglike(theta, x, data, sigma):
model = my_model(theta, x)
ll = -(0.5/sigma**2)*np.sum((data - model)**2)
return ll
# set up our data
N = 10 # number of data points
sigma = 1. # standard deviation of noise
x = np.linspace(0., 9., N)
mtrue = 0.4 # true gradient
ctrue = 3. # true y-intercept
truemodel = my_model([mtrue, ctrue], x)
# make data
np.random.seed(716742) # set random seed, so the data is reproducible each time
data = sigma*np.random.randn(N) + truemodel
print(data)
ndraws = 3000 # number of draws from the distribution
# create our Op
logl = LogLike(my_loglike, data, x, sigma)
# use PyMC3 to sampler from log-likelihood
with pymc3.Model():
# uniform priors on m and c
m = pymc3.Uniform('m', lower=-10., upper=10.)
c = pymc3.Uniform('c', lower=-10., upper=10.)
# convert m and c to a tensor vector
theta = tt.as_tensor_variable([m, c])
# use a DensityDist (use a lamdba function to "call" the Op)
pymc3.DensityDist('likelihood', lambda v: logl(v), observed={'v': theta})
step = pymc3.DEMetropolis()
trace = pymc3.sample(ndraws, step)
# plot the traces
axes = az.plot_trace(trace)
fig = axes.ravel()[0].figure
fig.savefig('./trace_plots.png')
Find the full trace here:
Population sampling (4 chains)
DEMetropolis: [c, m]
Attempting to parallelize chains to all cores. You can turn this off with `pm.sample(cores=1)`.
Population parallelization failed. Falling back to sequential stepping of chains.---------------------| 0.00% [0/4 00:00<00:00]
Sampling 4 chains for 0 tune and 4_000 draw iterations (0 + 16_000 draws total) took 5 seconds.███████| 100.00% [4000/4000 00:04<00:00]
Traceback (most recent call last):
File "test.py", line 75, in <module>
trace = pymc3.sample(ndraws, step)
File "/home/csl_user/.local/lib/python3.7/site-packages/pymc3/sampling.py", line 599, in sample
idata = arviz.from_pymc3(trace, **ikwargs)
File "/home/csl_user/.local/lib/python3.7/site-packages/arviz/data/io_pymc3.py", line 531, in from_pymc3
save_warmup=save_warmup,
File "/home/csl_user/.local/lib/python3.7/site-packages/arviz/data/io_pymc3.py", line 159, in __init__
self.observations, self.multi_observations = self.find_observations()
File "/home/csl_user/.local/lib/python3.7/site-packages/arviz/data/io_pymc3.py", line 172, in find_observations
multi_observations[key] = val.eval() if hasattr(val, "eval") else val
File "/home/csl_user/.local/lib/python3.7/site-packages/theano/gof/graph.py", line 522, in eval
self._fn_cache[inputs] = theano.function(inputs, self)
File "/home/csl_user/.local/lib/python3.7/site-packages/theano/compile/function.py", line 317, in function
output_keys=output_keys)
File "/home/csl_user/.local/lib/python3.7/site-packages/theano/compile/pfunc.py", line 486, in pfunc
output_keys=output_keys)
File "/home/csl_user/.local/lib/python3.7/site-packages/theano/compile/function_module.py", line 1839, in orig_function
name=name)
File "/home/csl_user/.local/lib/python3.7/site-packages/theano/compile/function_module.py", line 1487, in __init__
accept_inplace)
File "/home/csl_user/.local/lib/python3.7/site-packages/theano/compile/function_module.py", line 181, in std_fgraph
update_mapping=update_mapping)
File "/home/csl_user/.local/lib/python3.7/site-packages/theano/gof/fg.py", line 175, in __init__
self.__import_r__(output, reason="init")
File "/home/csl_user/.local/lib/python3.7/site-packages/theano/gof/fg.py", line 346, in __import_r__
self.__import__(variable.owner, reason=reason)
File "/home/csl_user/.local/lib/python3.7/site-packages/theano/gof/fg.py", line 391, in __import__
raise MissingInputError(error_msg, variable=r)
theano.gof.fg.MissingInputError: Input 0 of the graph (indices start from 0), used to compute sigmoid(c_interval__), was not provided and not given a value. Use the Theano flag exception_verbosity='high', for more information on this error.
I've run into the same problem when following the example how to sample from a black box likelihood found here:
https://docs.pymc.io/notebooks/blackbox_external_likelihood.html
This seems to be a version problem. I'm on Manjaro Linux and also ran theano 1.0.4 and pymc3 3.9 using python 3.8. I could solve the issue and make the code work by downgrading to python 3.7 and pymc3 3.8. This seems to be in issue with python 3.8, as simply downgrading pymc3 did not solve the issue for me. I am far from an expert in pymc3 so I don't have a solution how to fix this issue using the newest versions, but for now downgrading makes my simulations run.
Hope this helps.
Edit: The devs seem to be aware of this, there is a an open issue on their github page
https://github.com/pymc-devs/pymc3/issues/4002
I am trying to define func(x) in order to use the genetic algs library here:
https://github.com/bobirdmi/genetic-algorithms/tree/master/examples
However, when I try and use sga.init_random_population(population_size, params, interval) the code complains of me using tf.Tensors as python bools.
However, I am only referencing one bool in the entire code (Elitism) so I have no idea why this error is even showing. Asked around others who used sga.init_... and my inputs/setup is fine. Any suggestions would be greatly appreciated.
Full traceback:
Traceback (most recent call last):
File "C:\Users\Eric\eclipse-workspace\hw1\ga2.py", line 74, in <module>
sga.init_random_population(population_size, params, interval)
File "C:\Program Files\Python36\lib\site-packages\geneticalgs\real_ga.py", line 346, in init_random_population
self._sort_population()
File "C:\Program Files\Python36\lib\site-packages\geneticalgs\standard_ga.py", line 386, in _sort_population
self.population.sort(key=lambda x: x.fitness_val, reverse=True)
File "C:\Program Files\Python36\lib\site-packages\tensorflow\python\framework\ops.py", line 671, in __bool__
raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. "
TypeError: Using a `tf.Tensor` as a Python `bool` is not allowed. Use `if t is not None:` instead of `if t:` to test if a tensor is defined, and use TensorFlow ops such as tf.cond to execute subgraphs conditioned on the value of a tensor.
code
import hw1
#import matplotlib
from geneticalgs import BinaryGA, RealGA, DiffusionGA, MigrationGA
#import numpy as np
#import csv
#import time
#import pickle
#import math
#import matplotlib.pyplot as plt
from keras.optimizers import Adam
from hw1 import x_train, y_train, x_test, y_test
from keras.losses import mean_squared_error
#import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout
# GA standard settings
generation_num = 50
population_size = 16
elitism = True
selection = 'rank'
tournament_size = None # in case of tournament selection
mut_type = 1
mut_prob = 0.05
cross_type = 1
cross_prob = 0.95
optim = 'min' # minimize or maximize a fitness value? May be 'min' or 'max'.
interval = (-1, 1)
# Migration GA settings
period = 5
migrant_num = 3
cloning = True
def func(x):
#dimensions of weights and biases
#layer0weights = [10][23]
#layer0biases = [10]
#layer1weights = [10][20]
#layer1biases = [20]
#layer2weights = [1][20]
#layer2biases = [1]
#split up x for weights and biases
lay0 = x[0:230]
bias0 = x[230:240]
lay1 = x[240:440]
bias1 = x[440:460]
lay2 = x[460:480]
bias2 = x[480:481]
#fit to the shape of the actual model
lay0 = lay0.reshape(23,10)
bias0 = bias0.reshape(10,)
lay1 = lay1.reshape(10,20)
bias1 = bias1.reshape(20,)
lay2 = lay2.reshape(20,1)
bias2 = bias2.reshape(1,)
#set the newly shaped object to layers
hw1.model.layers[0].set_weights([lay0, bias0])
hw1.model.layers[1].set_weights([lay1, bias1])
hw1.model.layers[2].set_weights([lay2, bias2])
res = hw1.model.predict(x_train)
error = mean_squared_error(res,y_train)
return error
ga_model = Sequential()
ga_model.add(Dense(10, input_dim=23, activation='relu'))
ga_model.add(Dense(20, activation='relu'))
ga_model.add(Dense(1, activation='sigmoid'))
sga = RealGA(func, optim=optim, elitism=elitism, selection=selection,
mut_type=mut_type, mut_prob=mut_prob,
cross_type=cross_type, cross_prob=cross_prob)
params = 481
sga.init_random_population(population_size, params, interval)
optimal = sga.best_solution[0]
predict = func(optimal)
print(predict)
Tensorflow generates a computational graph of operations to be executed in an Tensorflow session.
geneticalgs.RealGA.init_random_population is an operation that uses the numpy.random.uniform to generate a numpy array. 1
The generated population being a Tensor object could mean maybe:
numpy.random.uniform invoked in geneticalgs.RealGA.init_random_population was decorated to return Tensors
numpy.random.uniform was added in the computation graph to be executed in a session.
I'll try executing the program eagerly by enabling eager execution. 2
tf.enable_execution()
You can also in a way execute the parts that you care about eagerly.
size = tf.placeholder(tf.int64)
dim = tf.placeholder(tf.int64)
interval = tf.placeholder(tf.int64, shape=(2,))
init_random_population = tf.py_func(
sga.init_random_population, [size, dim, interval], [])
with tf.Session() as session:
session.run(
init_random_population,
{size: population_size, dim: params, interval: interval})
Copy and pasting this code into the python3 REPL works, but when I run it as a script, I get a type error.
"""Softmax."""
scores = [3.0, 1.0, 0.2]
import numpy as np
from math import e
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
results = []
x = np.transpose(x)
for j in range(len(x)):
exps = [np.exp(s) for s in x[j]]
_sum = np.sum(np.exp(x[j]))
softmax = [i / _sum for i in exps]
results.append(softmax)
final = np.vstack(results)
return np.transpose(final)
# pass # TODO: Compute and return softmax(x)
print(softmax(scores))
# Plot softmax curves
import matplotlib.pyplot as plt
x = np.arange(-2.0, 6.0, 0.1)
scores = np.vstack([x, np.ones_like(x), 0.2 * np.ones_like(x)])
plt.plot(x, softmax(scores).T, linewidth=2)
plt.show()
The error I get running the script via CLI is the following:
bash$ python3 softmax.py
Traceback (most recent call last):
File "softmax.py", line 22, in <module>
print(softmax(scores))
File "softmax.py", line 13, in softmax
exps = [np.exp(s) for s in x[j]]
TypeError: 'numpy.float64' object is not iterable
This kind of crap makes me so nervous about running interpreted code in production with libraries like these, seriously unreliable and undefined behaviour is totally unacceptable IMO.
At the top of your script, you define
scores = [3.0, 1.0, 0.2]
This is the argument in your first call of softmax(scores). When converted to a numpy array, scores is 1-d array with shape (3,).
You pass scores into the function, and then it is converted to a numpy array by the call
x = np.transpose(x)
However, it is still 1-d, with shape (3,). The transpose function swaps dimensions, but it does not add a dimension to a 1-d array. In effect, transpose is a "no-op" when applied to a 1-d array.
Then, in the loop that follows, x[j] is a scalar of type numpy.float64, so it does not make sense to write [np.exp(s) for s in x[j]]. x[j] is a scalar, not a sequence, so you can't iterate over it.
In the bottom part of your script, you redefine scores as
x = np.arange(-2.0, 6.0, 0.1)
scores = np.vstack([x, np.ones_like(x), 0.2 * np.ones_like(x)])
Now scores is 2-d array (scores.shape is (3, 80)), so you don't get an error when you call softmax(scores).
At the very start of declaring the Session with tf.Session(), I declare both tf.global_variables_initializer and tf.local_variables_initializer functions and unfortunately, keep receiving error messages detailing the use of "Uninitialized value Variable_1." Why?
I did some searching around and found this StackExchange Question, but the answer doesn't help my situation. So I looked through the TensorFlow API and found an operation that should return any uninitialized variables, tf.report_uninitialized_variables(). I printed the results and received an empty pair of square brackets, which doesn't make any sense considering the description of my error messages. So what's going on? I've been clawing my eyes out for a day now. Any help is appreciated.
import tensorflow as tf
import os
from tqdm import tqdm
#hyperparam
training_iterations = 100
PATH = "C:\\Users\\ratno\\Desktop\\honest chaos\\skin cam\\drive-download-20180205T055458Z-001"
#==================================import training_data=================================
def import_data(image_path):
image_contents = tf.read_file(filename=image_path)
modified_image = tf.image.decode_jpeg(contents=image_contents, channels=1)
image_tensor = tf.cast(tf.reshape(modified_image, [1, 10000]), dtype=tf.float32)
return image_tensor
#========================neural network================================
def neural_network(input_layer):
Weight_net_1 = {'weights': tf.Variable(tf.random_normal(shape=(10000, 16))),
'bias': tf.Variable(tf.random_normal(shape=(1, 1)))}
Weight_net_2 = {'weights': tf.Variable(tf.random_normal(shape=(16, 16))),
'bias': tf.Variable(tf.random_normal(shape=(1, 1)))}
Weight_net_3 = {'weights': tf.Variable(tf.random_normal(shape=(16, 16))),
'bias': tf.Variable(tf.random_normal(shape=(1, 1)))}
Weight_net_4 = {'weights': tf.Variable(tf.random_normal(shape=(16, 1))),
'bias': tf.Variable(tf.random_normal(shape=(1, 1)))}
#Input Layer
hypothesis = input_layer; x = hypothesis
#Hidden Layer 1
hypothesis = tf.nn.relu(tf.matmul(x, Weight_net_1['weights']) + Weight_net_1['bias']); x = hypothesis
#Hidden Layer 2
hypothesis = tf.nn.relu(tf.matmul(x, Weight_net_2['weights']) + Weight_net_2['bias']); x = hypothesis
#Hidden Layer 3
hypothesis = tf.nn.relu(tf.matmul(x, Weight_net_3['weights']) + Weight_net_3['bias']); x = hypothesis
# output cell
hypothesis = tf.nn.relu(tf.matmul(x, Weight_net_4['weights']) + Weight_net_4['bias'])
return hypothesis
#============================training the network=========================
def train(hypothesis):
LOSS = tf.reduce_sum(1 - hypothesis)
tf.train.AdamOptimizer(0.01).minimize(LOSS)
#Session==================================================================
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
image_list = [os.path.join(PATH, file_name) for file_name in os.listdir(PATH)]
for iteration in tqdm(range(training_iterations), desc="COMPLETION", ncols=80):
for i in image_list:
modified_image_tensor = sess.run(import_data(image_path=i))
hypo = sess.run(neural_network(input_layer=modified_image_tensor))
sess.run(train(hypothesis=hypo))
print("\n\nTraining completed.\nRunning test prediction.\n")
DIRECTORY = input("Directory: ")
test_input = sess.run(import_data(DIRECTORY))
prediction = sess.run(neural_network(input_layer=test_input))
print(prediction)
if prediction >= 0.5:
print ("Acne")
else:
print ("What")
And as for the error message:
Caused by op 'Variable/read', defined at:
File "C:/Users/ratno/Desktop/honest chaos/Hotdog/HDogntoHDog.py", line 75, in <module>
hypo = sess.run(neural_network(input_layer=modified_image_tensor))
File "C:/Users/ratno/Desktop/honest chaos/Hotdog/HDogntoHDog.py", line 23, in neural_network
Weight_net_1 = {'weights': tf.Variable(tf.random_normal(shape=(10000, 16))),
File "C:\Users\ratno\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\ops\variables.py", line 199, in __init__
expected_shape=expected_shape)
File "C:\Users\ratno\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\ops\variables.py", line 330, in _init_from_args
self._snapshot = array_ops.identity(self._variable, name="read")
File "C:\Users\ratno\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\ops\gen_array_ops.py", line 1400, in identity
result = _op_def_lib.apply_op("Identity", input=input, name=name)
File "C:\Users\ratno\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\framework\op_def_library.py", line 767, in apply_op
op_def=op_def)
File "C:\Users\ratno\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\framework\ops.py", line 2630, in create_op
original_op=self._default_original_op, op_def=op_def)
File "C:\Users\ratno\AppData\Roaming\Python\Python36\site-packages\tensorflow\python\framework\ops.py", line 1204, in __init__
self._traceback = self._graph._extract_stack() # pylint: disable=protected-access
FailedPreconditionError (see above for traceback): Attempting to use uninitialized value Variable
[[Node: Variable/read = Identity[T=DT_FLOAT, _class=["loc:#Variable"], _device="/job:localhost/replica:0/task:0/cpu:0"](Variable)]]
Let's take a look at your main function, starting from with tf.Session() as sess:. This will be the first line executed when you run your program. The next thing that happens is that you are calling the variables_initializers -- but, you have not yet declared any variables! This is because you have not called any of the other functions you have defed. So this is why, when you then call, e.g., neural_network inside a sess.run call, it will create the (uninitialized) variables as neural_networkis called, and then attempt to use them for sess.run. Obviously this will not work since you have not initialized these newly-created variables.
You have to create your network and all necessary variables in the computational graph before calling the initializers. You could try something along these lines:
data = import_data(image_path)
out = neural_network(data)
tr = train(hypothesis=out)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
By the way, your function train also has no return value so it is unlikely it will work as you are expecting it to. Please re-read the tutorials for tensorflow to understand how to operate an optimizer.
from numpy import mean,cov,cumsum,dot,linalg,size,flipud
import numpy as np
from pylab import imread,subplot,imshow,title,gray,figure,show,NullLocator
def princomp(A,numpc=0):
#computing eigenvalues and eigenvectors of covariance matrix
M = (A-mean(A.T,axis=1)).T # subtract the mean (along columns)
[latent,coeff] = linalg.eig(cov(M))
p = size(coeff,axis=1)
idx = np.argsort(latent) # sorting the eigenvalues
idx = idx[::-1] # in ascending order
#sorting eigenvectors according to the sorted eigenvalues
coeff = coeff[:,idx]
latent = latent[idx] # sorting eigenvalues
if numpc < p and numpc >= 0:
coeff = coeff[:,range(numpc)] # cutting some PCs if needed
score = dot(coeff.T,M) # projection of the data in the new space
return coeff,score,latent
A = imread('beatles.jpg') # load an image
A = mean(A,2) # to get a 2-D array
full_pc = size(A,axis=1) # numbers of all the principal components
i = 1
dist = []
for numpc in range(0,full_pc+10,10): # 0 10 20 ... full_pc
coeff, score, latent = princomp(A,numpc)
Ar = dot(coeff,score).T+mean(A,axis=0) # image reconstruction
# difference in Frobenius norm
dist.append(linalg.norm(A-Ar,'fro'))
# showing the pics reconstructed with less than 50 PCs
if numpc <= 50:
ax = subplot(2,3,i,frame_on=False)
ax.xaxis.set_major_locator(NullLocator()) # remove ticks
ax.yaxis.set_major_locator(NullLocator())
i += 1
imshow(flipud(Ar))
title('PCs # '+str(numpc))
gray()
figure()
imshow(flipud(A))
title('numpc FULL')
gray()
show()
The error is coming as shown above. This is a sample code on PCA image compression that I found on the net. Suggestions would be helpful. The entire error is :
Traceback (most recent call last):
File "try.py", line 36, in <module>
imshow(flipud(Ar))
File "/home/user/.virtualenvs/cv/local/lib/python3.5/site-packages/matplotlib/pyplot.py", line 3157, in imshow
**kwargs)
File "/home/user/.virtualenvs/cv/local/lib/python3.5/site-packages/matplotlib/__init__.py", line 1898, in inner
return func(ax, *args, **kwargs)
File "/home/user/.virtualenvs/cv/local/lib/python3.5/site-packages/matplotlib/axes/_axes.py", line 5124, in imshow
im.set_data(X)
File "/home/user/.virtualenvs/cv/local/lib/python3.5/site-packages/matplotlib/image.py", line 596, in set_data
raise TypeError("Image data can not convert to float")
TypeError: Image data can not convert to float
You cannot plot a complex array with matplotlib imshow. So you need to take the real or imaginary part alone, e.g.
Ar = dot(coeff,score).T+mean(A,axis=0) # image reconstruction
Ar = Ar.real