Where does this timestamp come from? (LED priority signal) - python-3.x

I have this code and it runs two times fine, while on third call I get an error, as if I would have passed Timestamp. I only ever pass Tuple or None.
from pprint import pprint
from gpiozero import RGBLED
def signal(pri, newcolor):
signal.priority[pri] = newcolor
pprint(signal.priority)
try: signal.led.color = next(color for color in signal.priority if color is not None)
except StopIteration: signal.led.color = (0,0,0)
signal.led = RGBLED(red=11, green=9, blue=10)
signal.priority = 2*[None]
Output is following
[None, (0, 1, 0)]
[None, (0, 0, 1)]
[None, (1, 1, 0)]
Traceback (most recent call last):
File "/home/pi/zrcrasp.py", line 137, in <module>
signal(1, (1,1,0))
File "/home/pi/zrcrasp.py", line 10, in signal
try: signal.led.color = next(color for color in signal.priority if color is not None)
TypeError: 'Timestamp' object is not callable
Background
Function is used by various part of program to signal any kind of error, but if high priority short red blink ends, the lower priority green should continue. If there is no lower priority - None , then led will be turned off (this is the except statement for)
Lowest index is the highest priority.

Related

How to pad an numpy.array so it can be np.vsplit()?

I am trying to build my own Bubble sheet OMR engine with Python 3.8 and OpenCV.
Despite several days of debugging, I can't beat that error which occurs when I am cropping the bubbles individualy:
Traceback (most recent call last):
File "C:\Users\hsolatges\AppData\Local\Programs\Python\Python38\lib\site-packages\numpy\lib\shape_base.py", line 867, in split
len(indices_or_sections)
TypeError: object of type 'int' has no len()
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "c:/autoQuiz/omr.py", line 80, in <module>
anwsers_array = tb.parse_anwsers(anwsers_field, MCQ.get('QUESTIONS'), MCQ.get('CHOICES'))
bubbles = get_bubbles(padded_img, questions, choices)
File "c:\autoQuiz\toolbox.py", line 81, in get_bubbles
rows = np.vsplit(img, questions)
File "<__array_function__ internals>", line 5, in vsplit
File "C:\Users\hsolatges\AppData\Local\Programs\Python\Python38\lib\site-packages\numpy\lib\shape_base.py", line 991, in vsplit
return split(ary, indices_or_sections, 0)
File "<__array_function__ internals>", line 5, in split
File "C:\Users\hsolatges\AppData\Local\Programs\Python\Python38\lib\site-packages\numpy\lib\shape_base.py", line 872, in split
raise ValueError(
ValueError: array split does not result in an equal division
As the size of the bubbles region is arbitrary, I tried to edge-pad it so that its width and height are both a multiple of the number of questions / number of choices (A B C D E). Unfortunately, it doesn't work properly. Except for tests/omr-1.jpg, the others fail.
Here an excerpt of the code:
def to_next_multiple(n,b):
return int(ceil(n/b) * b)
def pad_image(img, questions, choices):
w, h = img.shape[:2]
w_final, h_final, = to_next_multiple(w, choices), to_next_multiple(h, questions)
w_padding, h_padding = max(0, w-w_final), max(0, h-h_final)
padded_img = np.pad(img, ((0, h_padding), (0, w_padding)), 'edge')
return padded_img
def get_bubbles(img, questions, choices):
bubbles = []
rows = np.vsplit(img, questions)
for row in rows:
cells = np.hsplit(row, choices)
bubbles.append(cells)
return bubbles
def parse_anwsers(img, questions, choices):
# Otsu's thresholding after Gaussian filtering
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
blurred = cv.GaussianBlur(gray, (5,5), 0)
retValue, thresh = cv.threshold(blurred, 0, 255, cv.THRESH_BINARY+cv.THRESH_OTSU)
padded_img = pad_image(thresh, questions, choices)
w, h = padded_img.shape[:2]
# Debugging
print(f'width: {w}, w_division: {w/choices}')
print(f'height: {h}, h_division: {h/questions}')
bubbles = get_bubbles(padded_img, questions, choices)
answers_array = bubbles
return answers_array
The repo can be found here: https://github.com/hsolatges/autoQuiz
How can I consistently get a ready to be np.vsplit/np.hsplit image ?
So the issues came from the following lines:
w_padding, h_padding = max(0, w-w_final), max(0, h-h_final)
padded_img = np.pad(img, ((0, h_padding), (0, w_padding)), 'edge')
Becomes:
w_padding, h_padding = w_final-w, h_final-h
padded_img = np.pad(img, ((0, w_padding), (0, h_padding)), 'edge')
I was doing crappy math and missfigured the numpy axis system. I thought padding on axis #0 was padding more rows and padding on axis #1 was padding more column; though it was the other way.

H2O Target Mean Encoder "frames are being sent in the same order" ERROR

I am following the H2O example to run target mean encoding in Sparking Water (sparking water 2.4.2 and H2O 3.22.04). It runs well in all the following paragraph
from h2o.targetencoder import TargetEncoder
# change label to factor
input_df_h2o['label'] = input_df_h2o['label'].asfactor()
# add fold column for Target Encoding
input_df_h2o["cv_fold_te"] = input_df_h2o.kfold_column(n_folds = 5, seed = 54321)
# find all categorical features
cat_features = [k for (k,v) in input_df_h2o.types.items() if v in ('string')]
# convert string to factor
for i in cat_features:
input_df_h2o[i] = input_df_h2o[i].asfactor()
# target mean encode
targetEncoder = TargetEncoder(x= cat_features, y = y, fold_column = "cv_fold_te", blending_avg=True)
targetEncoder.fit(input_df_h2o)
But when I start to use the same data set used to fit Target Encoder to run the transform code (see code below):
ext_input_df_h2o = targetEncoder.transform(frame=input_df_h2o,
holdout_type="kfold", # mean is calculating on out-of-fold data only; loo means leave one out
is_train_or_valid=True,
noise = 0, # determines if random noise should be added to the target average
seed=54321)
I will have error like
Traceback (most recent call last):
File "/tmp/zeppelin_pyspark-6773422589366407956.py", line 331, in <module>
exec(code)
File "<stdin>", line 5, in <module>
File "/usr/lib/envs/env-1101-ver-1619-a-4.2.9-py-3.5.3/lib/python3.5/site-packages/h2o/targetencoder.py", line 97, in transform
assert self._encodingMap.map_keys['string'] == self._teColumns
AssertionError
I found the code in its source code http://docs.h2o.ai/h2o/latest-stable/h2o-py/docs/_modules/h2o/targetencoder.html
but how to fix this issue? It is the same table used to run the fit.
The issue is because you are trying encoding multiple categorical features. I think that is a bug of H2O, but you can solve putting the transformer in a for loop that iterate over all categorical names.
import numpy as np
import pandas as pd
import h2o
from h2o.targetencoder import TargetEncoder
h2o.init()
df = pd.DataFrame({
'x_0': ['a'] * 5 + ['b'] * 5,
'x_1': ['c'] * 9 + ['d'] * 1,
'x_2': ['a'] * 3 + ['b'] * 7,
'y_0': [1, 1, 1, 1, 0, 1, 0, 0, 0, 0]
})
hf = h2o.H2OFrame(df)
hf['cv_fold_te'] = hf.kfold_column(n_folds=2, seed=54321)
hf['y_0'] = hf['y_0'].asfactor()
cat_features = ['x_0', 'x_1', 'x_2']
for item in cat_features:
target_encoder = TargetEncoder(x=[item], y='y_0', fold_column = 'cv_fold_te')
target_encoder.fit(hf)
hf = target_encoder.transform(frame=hf, holdout_type='kfold',
seed=54321, noise=0.0)
hf
Thanks everyone for letting us know. Assertion was a precaution as I was not sure whether there could be the case that order could be changed. Rest of the code was written with this assumption in mind and therefore safe to use with changed order anyway, but assertion was left and forgotten. Added test and removed assertion. Now this issue is fixed and merged. Should be available in the upcoming fix release. 0xdata.atlassian.net/browse/PUBDEV-6474

AttributeError in python: object has no attribute

I started learning Machine Learning and came across Neural Networks. while implementing a program i got this error. i have tried checking for every solution but no luck. here's my code:
from numpy import exp, array, random, dot
class neural_network:
def _init_(self):
random.seed(1)
self.weights = 2 * random.random((2, 1)) - 1
def train(self, inputs, outputs, num):
for iteration in range(num):
output = self.think(inputs)
error = outputs - output
adjustment = 0.01*dot(inputs.T, error)
self.weights += adjustment
def think(self, inputs):
return (dot(inputs, self.weights))
neural = neural_network()
# The training set
inputs = array([[2, 3], [1, 1], [5, 2], [12, 3]])
outputs = array([[10, 4, 14, 30]]).T
# Training the neural network using the training set.
neural.train(inputs, outputs, 10000)
# Ask the neural network the output
print(neural.think(array([15, 2])))
this is the error which i'm getting when running neural.train:
Traceback (most recent call last):
File "neural.py", line 27, in <module>
neural.train(inputs, outputs, 10000)
File "neural.py", line 10, in train
output = self.think(inputs)
File "neural.py", line 16, in think
return (dot(inputs, self.weights))
AttributeError: 'neural_network' object has no attribute 'weights'
Though its has a self attribute self.weights() still it says no such attribute.
Well, it turns out that your initialization method should be named __init__ (two underscores), not _init_...
So, changing the method to
def __init__(self):
random.seed(1)
self.weights = 2 * random.random((2, 1)) - 1
your code works OK:
neural.train(inputs, outputs, 10000)
print(neural.think(array([15, 2])))
# [ 34.]
Your initializing method is written wrong, its two underscores __init__(self): not one underscore_init_(self):
Otherwise, nice code!

pass a list as argument of the func1d in numpy.apply_along_axis(func1d, axis, arr, *args, **kwargs)

I don't manage to pass a list as arguments to func1d in numply.apply_along_axis(...).
def test(a, value):
print(value)
return a
a = np.zeros((49), dtype=list)
kwargs = {"value":[1,1,1]}
zep = np.vectorize(test)
np.apply_along_axis(zep, 0, a, **kwargs)
Out:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/ibpc/osx/lbt/numpy/1.9.2/lib/python3.4/site-packages/nump/lib/shape_base.py", line 91, in apply_along_axis
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
File "/ibpc/osx/lbt/numpy/1.9.2/lib/python3.4/site-packages/numpy/lib/function_base.py", line 1700, in __call__
return self._vectorize_call(func=func, args=vargs)
File "/ibpc/osx/lbt/numpy/1.9.2/lib/python3.4/site-packages/numpy/lib/function_base.py", line 1769, in _vectorize_call
outputs = ufunc(*inputs)
ValueError: operands could not be broadcast together with shapes (49,) (3,)
So, He want that my len(kwargs["value"])==49. But it's not what I want.
I need to change value if I want (during the numpy.apply_along_axis(func1d) I need to update my list).
How can I pass a list as argument? Or may be use another way to resolve this problem..
In real, I have a numpy.array of list of position in 3Dspace for a particle.
Like this:
dim = [49,49,49]
dx = 3
origin = [3,3,3]
nb_iter = 5
ntoto=np.load("ntoto.npy")
ntoto = ntoto.flatten()
liste_particles=np.zeros((5), dtype=list)
for i in range(len(liste_particles)):
liste_particles[i]=[[r.uniform(0,150),r.uniform(0,150),r.uniform(0,150)]]*nb_iter #nb_iter is just the number of iteration I want to do in calcTrajs.
vtraj=np.vectorize(calcTrajs, otypes=[list])
np.apply_along_axis(vtraj, 0, liste_particules)
Here, I have five particles randomly place. Moreover, I have another numpy.array (shape==(49,49,49)) which contains a vector_field.
Here the func1d which I need to run:
def calcTrajs(a):
global ntoto, dim, dx, origin #ntoto is my vector_field
for b in range(1,len(a)):
ijk = s2g(a[b-1], dx, origin, dim) # function to have on which vector my particle is.(space to grid, because my vector_field is like a grid).
value = np.asarray(ntoto[flatten3Dto1D(ijk, dim[1], dim[2])]) # so value contains the vector who influence my particle.
try:
a[b] = list(a[b-1] + value*1000)
except:
print("error")
break
return a
this function permits me to launch a particle in my vector_field and calculate its trajectory.
As you can see, I put global variables. But I want to pass this variables as arguments and not as global. ntoto is a numpy.array, dim is a list (dimension of my vector field), dx is the cell spacing (because my vector_field is in a grid which contains many cells and each cell contains a vector) and origin is the first point of my grid.
Best regards,
Adam
As I commented, neither vectorize or apply... is a speed tool. vectorize can be useful for broadcasting several arrays against each other. apply ... can be useful for iterating over more than 2 dimensions. With only one or two it is overkill. Both are tools that beginners often misuse.
It looks like the apply_along_axis part is ok, though I haven't tested it. The error lies in broadcasting in vectorize.
Especially since you are defining a as object dtype, you should specify like return dtype for vectorize. Otherwise it performs a test calc to determine it.
In [223]: def test(a, value):
...: print(value)
...: return a
In [224]: zep = np.vectorize(test, otypes=['O'])
In [225]: a = np.array([[1,2,3],[4,5]])
In [226]: a
Out[226]: array([list([1, 2, 3]), list([4, 5])], dtype=object)
zep works with a and a scalar
In [227]: zep(a,1)
1
1
Out[227]: array([list([1, 2, 3]), list([4, 5])], dtype=object)
But when a has 2 items, and value as 3 items, I get the same sort of error as you did:
In [228]: zep(a,[1,2,3])
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-228-382aaa7a2dc6> in <module>()
----> 1 zep(a,[1,2,3])
/usr/local/lib/python3.6/dist-packages/numpy/lib/function_base.py in __call__(self, *args, **kwargs)
2753 vargs.extend([kwargs[_n] for _n in names])
2754
-> 2755 return self._vectorize_call(func=func, args=vargs)
2756
2757 def _get_ufunc_and_otypes(self, func, args):
/usr/local/lib/python3.6/dist-packages/numpy/lib/function_base.py in _vectorize_call(self, func, args)
2829 for a in args]
2830
-> 2831 outputs = ufunc(*inputs)
2832
2833 if ufunc.nout == 1:
ValueError: operands could not be broadcast together with shapes (2,) (3,)
(2,) and (2,) is fine:
In [229]: zep(a,['a','b'])
a
b
Out[229]: array([list([1, 2, 3]), list([4, 5])], dtype=object)
So is (2,) with (2,1), producing a (2,2) output. This is an example of the kind of broadcasting where vectoring can help.
In [230]: zep(a,[['a'],['b']])
a
a
b
b
Out[230]:
array([[list([1, 2, 3]), list([4, 5])],
[list([1, 2, 3]), list([4, 5])]], dtype=object)

I'm a newbie in image processing getting an error "Don't know how to convert parameter 1"

I'm using opencv on windows. This error occurs only after detecting green colour (which obviously is the task of the code).
CODE:
import cv2
import numpy as np
from pynput.mouse import Button, Controller
import wx
mouse=Controller()
app=wx.App(False)
(sx,sy)=wx.GetDisplaySize()
(camx,camy)=(640,480)
lowerBound=np.array([33,80,40])
upperBound=np.array([102,255,255])
cam= cv2.VideoCapture(0)
kernelOpen=np.ones((5,5))
kernelClose=np.ones((20,20))
pinchFlag=0
while True:
ret, img=cam.read()
img=cv2.resize(img,(640,480))
#convert BGR to HSV
imgHSV= cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
# create the Mask
mask=cv2.inRange(imgHSV,lowerBound,upperBound)
#morphology
maskOpen=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernelOpen)
maskClose=cv2.morphologyEx(maskOpen,cv2.MORPH_CLOSE,kernelClose)
maskFinal=maskClose
if(len(conts)==2):
if(pinchFlag==1):
pinchFlag=0
mouse.release(Button.left)
x1,y1,w1,h1=cv2.boundingRect(conts[0])
x2,y2,w2,h2=cv2.boundingRect(conts[1])
cv2.rectangle(img,(x1,y1),(x1+w1,y1+h1),(255,0,0),2)
cv2.rectangle(img,(x2,y2),(x2+w2,y2+h2),(255,0,0),2)
cx1=int(x1+w1/2)
cy1=int(y1+h1/2)
cx2=int(x2+w2/2)
cy2=int(y2+h2/2)
cx=int((cx1+cx2)/2)
cy=int((cy1+cy2)/2)
cv2.line(img, (cx1,cy1),(cx2,cy2),(255,0,0),2)
cv2.circle(img, (cx,cy),2,(0,0,255),2)
mouseLoc=(sx-(cx*sx/camx), cy*sy/camy)
mouse.position=mouseLoc
while mouse.position!=mouseLoc:
pass
elif(len(conts)==1):
x,y,w,h=cv2.boundingRect(conts[0])
if(pinchFlag==0):
pinchFlag=1
mouse.press(Button.left)
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cx=int(x+w/2)
cy=int(y+h/2)
cv2.circle(img,(cx,cy),int((w+h)/4),(0,0,255),2)
mouseLoc=(sx-(cx*sx/camx), cy*sy/camy)
mouse.position = mouseLoc
while mouse.position != mouseLoc:
pass
cv2.imshow("cam",img)
cv2.waitKey(5)
Error:
Traceback (most recent call last):
File "virtual_mouse.py", line 65, in <module>
mouse.position = mouseLoc
File "C:\Users\dell\Anaconda3\envs\kj\lib\site-packages\pynput\mouse\_base.py", line 65, in position
self._position_set(pos)
File "C:\Users\dell\Anaconda3\envs\kj\lib\site-packages\pynput\mouse\_win32.py", line 66, in _position_set
self.__SetCursorPos(*pos)
ctypes.ArgumentError: argument 1: <class 'TypeError'>: Don't know how to convert parameter 1
If you're using openCV3, then the formula mouseLoc = (sx-(cx*sx/camx), cy*sy/camy) returns float values which are not shown in the iPython console. So you have to convert those into integer values and this will surely work out. Hence do the following changes:
mouseLoc = ( int(sx-(cx*sx/camx)) , int(cy*sy/camy) )

Resources