I want to skip some data that have specific labels (like if label >= 7 or other). My code is here:
true = tf.constant(True)
less_op = tf.less(label, tf.constant(delimiter))
label = tf.cast(
tf.slice(record_bytes, [0], [label_bytes]), tf.int32)
tf.cond(less_op, lambda: true, lambda: true)
and on the 4th line I have error: ValueError: Shapes (1,) and () are not compatible. My assumption that it's caused by less_op (if I substitute it with true code works). Also I investigated that there is some problem with label: code less_op = tf.less(tf.constant(1), tf.constant(delimiter)) works perfectly.
Tensorflow expects it to be of shape None or [] and not (1,). It's weird behavior that should be fixed in my opionion because tf.less returns a tensor of shape (1,) and not shape ().
Change this:
tf.cond(less_op, lambda: true, lambda: true)
to this:
tf.cond(tf.reshape(less_op,[]), lambda: true, lambda: true)
Related
If I call this function on my dataset:
def reconstruct_flight(data, sequence_lenght, flight_len, param_len):
stack_factor = int(flight_len/sequence_lenght)
data_reconstructed = []
for i in range(0, len(data_clean), stack_factor):
if i<len(data_clean):
data_reconstructed.append(
data[i:i+stack_factor].reshape(
[flight_len, param_len])
)
return np.array(data_reconstructed)
I get the following error:
ValueError: cannot reshape array of size 0 into shape (1500,77)
But if I run the for loop in the console without passing it as a function:
data_reconstructed = []
for i in range(0, len(data_clean), stack_factor):
if i<len(data_clean):
data_reconstructed.append(
data[i:i+stack_factor].reshape(
[flight_len, param_len])
)
It works as expected. Why is that ?
When reshaping, if you are keeping the same data contiguity and just reshaping the box, you can reshape your data with
data_reconstructed = data_clean.reshape((10,1500,77))
if you are changing the contiguity from one axis to another, you will need to add a permutation of the axes beforehand https://numpy.org/doc/stable/reference/generated/numpy.transpose.html
I trie to concatenate 2 tensors of different batch size within a function decorated with #tf.function. I tried 2 methods and the first one is listed as below:
import tensorflow as tf
#tf.function # indispensable
def fun1(tensors, indices):
results = []
for i in tf.range(2): # batch size = 2
pos = tf.where(indices==i)
emb = tf.gather_nd(tensors, pos)
# do something to emb, but do nothing here for simplicity.
results += [emb]
results = tf.concat(results, axis=0)
return results
tensors = tf.random.uniform((5, 2))
fun1(tensors, indices=[0, 0, 1, 1, 1])
But it raises errors as following:
TypeError: 'results' does not have the same nested structure after one iteration.
The two structures don't have the same nested structure.
First structure: type=list str=[]
Second structure: type=list str=[<tf.Tensor 'while/GatherNd:0' shape=(None, 2) dtype=float32>]
More specifically: The two structures don't have the same number of elements. First structure: type=list str=[]. Second structure: type=list str=[<tf.Tensor 'while/GatherNd:0' shape=(None, 2) dtype=float32>]
Entire first structure:
[]
Entire second structure:
[.]
So I tried the second method:
import tensorflow as tf
#tf.function # indispensable
def fun2(tensors, indices):
results = tf.reshape(tf.constant([], dtype=tf.float32), (0, 2)) # make empty tensors
for i in tf.range(2): # batch size = 2
pos = tf.where(indices==i)
emb = tf.gather_nd(tensors, pos)
# do something to emb, but do nothing here for simplicity
results = tf.concat([results, emb], axis=0)
return results
tensors = tf.random.uniform((5, 2))
fun2(tensors, indices=[0, 0, 1, 1, 1])
But it raises errors:
ValueError: 'results' has shape (0, 2) before the loop, but shape (None, 2) after one iteration. Use tf.autograph.experimental.set_loop_options to set shape invariants.
How should I resolve the problems? Thanks
I found I could achieve it by adding one line of codes to the second method as following:
#tf.function
def fun2(tensors, indices):
results = tf.reshape(tf.constant([], dtype=tf.float32), (0, 2)) # make empty tensors
for i in tf.range(2): # batch size = 2
tf.autograph.experimental.set_loop_options(shape_invariants=[(results, tf.TensorShape([None, 2]))])
pos = tf.where(indices==i)
emb = tf.gather_nd(tensors, pos)
# do something to emb, but do nothing here for simplicity
results = tf.concat([results, emb], axis=0)
return results
Below is the function that I am passing to a keras Lambda layer.
I am getting a problem with the output of tf.cond(). It returns a shape of <unknown>. The input tensor (t) and the constant weight tensor have shapes of (None,6) and (6,), respectively. When I add these two outside of tf.cond() then I get a tensor of shape (None,6), which is what I need it to be. However, when the same add operation is returned from within tf.cond(), I get a tensor of shape <unknown>.
What changes when this operation goes via tf.cond().
def class_segmentation(t):
class_segments = tf.constant([0,0,1,1,2,2])
a = tf.math.segment_mean(t, class_segments, name=None)
b = tf.math.argmax(a)
left_weights = tf.constant([1.0,1.0,0.0,0.0,0.0,0.0])
middle_weights = tf.constant([0.0,0.0,1.0,1.0,0.0,0.0])
right_weights = tf.constant([0.0,0.0,0.0,0.0,1.0,1.0])
zero_weights = tf.constant([0.0,0.0,0.0,0.0,0.0,0.0])
c = tf.cond(tf.math.equal(b,0), lambda: tf.math.add(t, left_weights), lambda: zero_weights)
d = tf.cond(tf.math.equal(b,1), lambda: tf.math.add(t, middle_weights ), lambda: zero_weights)
e = tf.cond(tf.math.equal(b,2), lambda: tf.math.add(t, right_weights), lambda: zero_weights)
f = tf.math.add_n([c,d,e])
print("Tensor shape: ", f.shape) # returns "Unknown"
return f
You have a few problems in your code.
tf.math.segment_mean() expects class_segments to have the same shape as first dimension of your input t. So None must be equal 6 in order for your code to run. This is most likely cause of you getting the unknown shape - because the shape of your tensors depends on None which is determined on runtime. You could apply transformation for your code to run (not sure if that is what you are trying to achieve), eg.
a = tf.math.segment_mean(tf.transpose(t), class_segments)
In tf.cond() true_fn and false_fn must return tensors of same shape. In your case true_fn returns (None, 6) because of broadcasting and false_fn returns tensor of shape (6,).
The predicate in tf.cond() must be reduced to a rank 0. For example, if you were to apply
b = tf.math.argmax(tf.math.segment_mean(tf.transpose(t), class_segments), 0)
then the shape of b would be (None) and the predicate pred in tf.cond() will be broadcasted to the same shape (which will raise an error).
Without knowing what are you trying to achieve further help is impossible.
In Python programming, which using Pytorch i got this error
4 and >5 are printing the type of x to check the type of tensor, but returning different results
why does this happen?My Code of Execution
x = x.type(torch.int64)
print(x.type())
type(X)
This is a known behavior of PyTorch since version 0.4, though the maintainers don't offer a justification.
Note also that the type() of a Tensor no longer reflects the data type. Use isinstance() or x.type() instead
x = torch.DoubleTensor([1, 1, 1])
print(type(x)) # was torch.DoubleTensor
# "<class 'torch.Tensor'>"
print(x.type()) # OK: 'torch.DoubleTensor'
# 'torch.DoubleTensor'
print(isinstance(x, torch.DoubleTensor)) # OK: True
# True
The code below:
import theano
import numpy as np
from theano import tensor as T
h1=T.as_tensor_variable(np.zeros((1, 20), dtype=theano.config.floatX))
s1=T.as_tensor_variable(np.zeros((1, 20), dtype=theano.config.floatX))
def forward(input, h, s):
return h, s
result, update=theano.scan(fn=forward, sequences=[T.arange(10)], outputs_info=[h1, s1], go_backwards=False)
print result[0].shape.eval()
have an error:
TypeError: Cannot convert Type TensorType(float32, 3D) (of Variable IncSubtensor{Set;:int64:}.0) into Type TensorType(float32, (False, True, False)). You can try to manually convert IncSubtensor{Set;:int64:}.0 into a TensorType(float32, (False, True, False)).
But when I change 1 to any other number, for example:
h1=T.as_tensor_variable(np.zeros((2, 20), dtype=theano.config.floatX))
s1=T.as_tensor_variable(np.zeros((2, 20), dtype=theano.config.floatX))
It works fine.
I don't know what happened here. Could someone help me?
Please follow this post: https://github.com/Theano/Theano/issues/2985
Passing a tensor whose shape includes 1 as a part of outputs_info when calling theano.scan fails unless those axes of shape 1 are unbroadcasted manually using tensor.unbroadcast. This is due to the different broadcasting pattern between the actual return from the inner function of scan and the corresponding one passed via outputs_info.
Try:
h1=T.unbroadcast(T.as_tensor_variable(np.zeros((1, 20), dtype=theano.config.floatX)), 0)
s1=T.unbroadcast(T.as_tensor_variable(np.zeros((1, 20), dtype=theano.config.floatX)), 0)
to make the first dimension unbroadcastable.