Why i get Generator object at xxxxxx? - sensors

Hey i wrote this code from the library CT sensor written for micropython, this is the link to the library:
https://github.com/alisonsalmeida/emonlib-micropython
from machine import Pin, ADC
import utime
from emonlib import Emonlib
from time import sleep
adc = ADC(0)
ct = Emonlib
ct.current(object, adc, 10)
cd= ct.calc_current_rms(10,2)
while True:
print(cd)
sleep(0.5)
i can not figure out how to get the value of the current RMS, i am quite new to python,
Could you help me to learn how to work on that library?
i have not found anything yet online, i am trying since weeks :(

i am lucky to have a friend that helped me.
This should be enough for a starting point
from Emonlib import Emonlib
import uasyncio
from machine import Pin
async def main():
emon = Emonlib()
p1 = Pin(1)
await emon.current(p1,30)
amps = await emon.calc_current_rms(1480)
watt = amps * 220
print(watt)
uasyncio.run(main())

Related

Pymongo, Motor memory leak

Background: I use tornado + motor, and found the mem_usage increase.
Then I code the test.py. The db.tasks "size" : 12192854 (10+M). After one minute, MEM USAGE / LIMIT is 1.219GiB / 8GiB
env:
python 3.7.5
motor 2.5.0 (2.1.0 before upgrade)
multidict 4.7.5
pymongo 3.12.0
Here are my code
import os
import gc
import time
import logging
import asyncio
import uvloop
import pdb
import pymongo
import base64
from tornado.platform.asyncio import AsyncIOMainLoop
from guppy import hpy
from motor import motor_asyncio
mongo_auth = 'xxxxx='
runtime_mongos = arch_mongos = {
"host": f"mongodb://{base64.b64decode(mongo_auth).decode()}#" + ','.join(
[
"1xxx:27024",
"2xxx:27024",
"3xxx:27024",
]),
"readPreference": "secondaryPreferred"
}
table = motor_asyncio.AsyncIOMotorClient(**runtime_mongos)["db"]["tasks"]
async def get_data():
return await table.find().sort([
("priority", pymongo.ASCENDING),
("start_uts", pymongo.ASCENDING),
]).to_list(None)
async def test():
while True:
a = await get_data()
print(len(a))
await asyncio.sleep(1)
gc.collect() # no use!
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(test())
Finally, I found the python process has a lot of threads, then I get a clue about the motor 'ThreadPoolExecutor'.
code in motor 2.1:
if 'MOTOR_MAX_WORKERS' in os.environ:
max_workers = int(os.environ['MOTOR_MAX_WORKERS'])
else:
max_workers = tornado.process.cpu_count() * 5
_EXECUTOR = ThreadPoolExecutor(max_workers=max_workers)
I set MOTOR_MAX_WORKERS=1 and the mem_usage keeps in low level.
I deploy my project in docker.But, the cpu of the container is not exclusive.I guess this is the reason of 'max_workers' is irrational.
My fault...

Using Azure Face Api in Python, How to Return a single faceId or a group of FaceIds if the same person is detected in Video Stream?

I am using Azure Face APi to detect faces in video stream, but for each detected face Azure returns a unique faceId( which is exactly what the documentation says).
The problem is, Let's say Mr.ABC appears in 20 video frames, 20 unique faceIds gets generated. I want something that Azure Face should return me a single faceId or a group of FaceIds generated particularly for Mr.ABC so that I can know that its the same person that stays in front of camera for x amount of time.
I have read the documentation of Azure Facegrouping and Azure FindSimilar, but didn't understand how can I make it work in case of live video stream.
The code I am using for detecting faces using Azure face is given below:
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
from azure.cognitiveservices.vision.face.models import TrainingStatusType, Person, SnapshotObjectType, OperationStatusType
import cv2
import os
import requests
import sys,glob, uuid,re
from PIL import Image, ImageDraw
from urllib.parse import urlparse
from io import BytesIO
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient,__version__
face_key = 'XABC' #API key
face_endpoint = 'https://XENDPOINT.cognitiveservices.azure.com' #endpoint, e.g. 'https://westus.api.cognitive.microsoft.com'
credentials = CognitiveServicesCredentials(face_key)
face_client = FaceClient(face_endpoint, credentials)
camera = cv2.VideoCapture(0)
samplenum =1
im = ""
work_dir = os.getcwd()
person_group_id = 'test02-group'
target_person_group_id = str(uuid.uuid4())
face_ids = []
#cv2 font
font = cv2.FONT_HERSHEY_SIMPLEX
#empty tuple
width = ()
height = ()
left=0
bottom=0
def getRectangle(faceDictionary):
rect = faceDictionary.face_rectangle
left = rect.left
top = rect.top
right = left + rect.width
bottom = top + rect.height
return ((left, top), (right, bottom))
while True:
check,campic = camera.read()
samplenum=samplenum+1
cv2.imwrite("live_pics/"+str(samplenum)+".jpg",campic)
path = work_dir+"/live_pics/"+str(samplenum)+".jpg"
#im = cv2.imread("pics/"+str(samplenum)+".jpg")
stream = open(path, "r+b")
detected_faces = face_client.face.detect_with_stream(
stream,
return_face_id=True,
return_face_attributes=['age','gender','emotion'],recognitionModel="recognition_03")
for face in detected_faces:
width,height = getRectangle(face)
cv2.rectangle(campic,width,height,(0,0,170),2)
face_ids.append(face.face_id)
#cv2.waitKey(100);
if(samplenum>10):
break
cv2.imshow("campic", campic)
if cv2.waitKey(1) == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
There is no magic on Face API: you have to process it with 2 steps for each face found.
What I suggest is to use "Find similar":
at the beginning, create a "FaceList"
then process your video:
Face detect on each frame
For each face found, use find similar operation on the face list created. If there is no match (with a sufficient confidence), add the face to the facelist.
At the end, your facelist will contain all the different people found on the video.
For your realtime use-case, don't use "Identify" operation with PersonGroup / LargePersonGroup (the choice between those 2 depends on the size of the group), because you will be stuck by the need of training on the group. Example, you would be doing the following:
Step 1, 1 time: generate the PersonGroup / LargePersonGroup for this execution
Step 2, N times (for each image where you want to identify the face):
Step 2a: face detect
Step 2b: face "identify" on each detected face based on the PersonGroup / LargePersonGroup
Step 2c: for each unidentified face, add it to the PersonGroup / LargePersonGroup.
Here the issue is the fact that after 2c, you have to train your group again. Even if it is not so long, it cannot be used in real time as it will be too long.
Per my understanding, you want to show a person's name/identity instead of the face ID detected from Face API.
If so, after you get face ids via Face Detect API, you should use the Face Identify API to do this. You can get a person ID if faces could be recognized by Azure Face service, With this ID, you can just use PersonGroup Person API to get this person's information.
I also wrote a simple demo for you, in this demo, there is only 1 image, we can just image it as a video frame. I created a person group with one superman person and added some faces to him.
This is the code below :
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from PIL import Image
import numpy as np
import asyncio
import io
import glob
import os
import sys
import time
import uuid
import requests
from urllib.parse import urlparse
from io import BytesIO
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
imPath = "<image path>";
ENDPOINT = '<endpoint>'
KEY = '<key>'
PERSON_GROUP_ID = '<person group name>'
face_client = FaceClient(ENDPOINT, CognitiveServicesCredentials(KEY))
im = np.array(Image.open(imPath), dtype=np.uint8)
faces = face_client.face.detect_with_stream(open(imPath, 'r+b'),recognition_model='recognition_03');
# Create figure and axes
fig,ax = plt.subplots()
# Display the image
ax.imshow(im)
for i in range(len(faces)):
face = faces[i]
rect =patches.Rectangle((face.face_rectangle.left,face.face_rectangle.top),face.face_rectangle.height,face.face_rectangle.width,linewidth=1,edgecolor='r',facecolor='none')
detected_person = face_client.face.identify([face.face_id],PERSON_GROUP_ID)[0]
if(len(detected_person.candidates) > 0):
person_id = detected_person.candidates[0].person_id
person = face_client.person_group_person.get(PERSON_GROUP_ID,person_id)
plt.text(face.face_rectangle.left,face.face_rectangle.top,person.name,color='r')
else:
plt.text(face.face_rectangle.left,face.face_rectangle.top,'unknown',color='r')
ax.add_patch(rect)
plt.show()
Result:

Python Auto Click bot not working and I cant figure out why

I made a python 'aimbot' type program that looks for targets in the aim trainer and clicks on them. The problem is it clicks but not on the target and I'm pretty sure I did everything right. Here's the program:
from pyautogui import *
import pyautogui
import time
import keyboard
import random
import win32api, win32con
time.sleep(2)
def click(x,y):
win32api.SetCursorPos((x,y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
while keyboard.is_pressed('q')== False:
pic = pyautogui.screenshot(region=(0,0,1080,1920))
width, height = pic.size
for x in range(0,width,5):
for y in range(0,height,5):
r,g,b = pic.getpixel((x,y))
if g == 154:
click(x,y)
time.sleep(1)
break

Python import pygame does not play an audio

the code just print the name of the song and dont stop after he finish
my code:
import glob
import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
songs = glob.glob("C:\\Users\zivsi\Music\\*.mp3")
import random
song = random.choice(songs)
song_name = song.replace("C:\\Users\zivsi\Music\\", "").replace(".mp3", "")
print("song: ", song_name)
pygame.init()
pygame.mixer.music.load(song)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
pygame.time.Clock().tick(10)
I did not use from pygame import *
because it cannot be done in def
Use pygame.mixer.music.stop() when you want to stop the music. The pygame.time.Clock().tick(10) computes the time since it was last called and stalls the program until 1/framerate (in your case framerate=10) seconds have passed. Therefore, your code will run until the song is done playing. If instead you want to pause the program for a set amount of time and stop the music from playing, use time.sleep(), which takes seconds as an argument. Possible example:
import glob
import os
import time
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
songs = glob.glob("C:\\Users\zivsi\Music\\*.mp3")
import random
song = random.choice(songs)
song_name = song.replace("C:\\Users\zivsi\Music\\", "").replace(".mp3", "")
print("song: ", song_name)
pygame.init()
pygame.mixer.music.load(song)
pygame.mixer.music.play()
time.sleep(10) #sleep for 10 seconds before moving on
pygame.mixer.music.stop()
thank you. i understand now. my mistake was:
pygame.init() -
i need
pygame.mixer.init()

faster FFT issue in Python using pyqtgraph

As you can see below I have a program that uses pyqtgraph to display a spectrum of two signals on some noise using numpy. It may well be I am pushing the limits here. I am using a sample rate of 300000 to put some pressure on things. None-the-less I saw no improvements in splitting up this app using multiprocess or threading from putting the code lines in the signal() function from having it all under the update() function. I also tried pyfftw, which showed no improvement when substituting that in for np.fft. I show 1 core always at 100% so I suspect multiprocess (or threading) may not really be working the way I expect it either. Depending on your computer at aa rate 300000 it updates and pauses and updates and pauses. I would like to hit something like 2400000 smoothly without the pauses between updates. Anyone know how I can speed this up please?
# -*- coding: utf-8 -*-
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
from threading import Thread
from queue import Queue
from numpy import arange, sin, cos, pi
from scipy.fftpack import fft, rfft
import pyqtgraph as pg
import sys
import multiprocessing
class Plot2D():
def __init__(self):
self.traces = dict()
#QtGui.QApplication.setGraphicsSystem('raster')
self.app = QtGui.QApplication([])
#mw = QtGui.QMainWindow()
#mw.resize(800,800)
self.win = pg.GraphicsWindow(title="Basic plotting examples")
self.win.resize(1000,600)
self.win.setWindowTitle('pyqtgraph example: Plotting')
# Enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
self.canvas = self.win.addPlot(title="Pytelemetry")
self.canvas.setYRange(-10, 100, padding=0)
def start(self):
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
def trace(self,name,dataset_x,dataset_y):
if name in self.traces:
self.traces[name].setData(dataset_x,dataset_y)
else:
self.traces[name] = self.canvas.plot(pen='y')
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
p = Plot2D()
i = 0
def signal():
rate = 300000 # sampling rate
t = np.arange(0, 10, 1/rate)
sig = np.sin(2000*np.pi*4*t) + np.sin(2000*np.pi*7*t) + np.random.randn(len(t))*0.02 #4k + 7k tone + noise
return sig
def update():
rate = 300000 # sampling rate
z = 20*np.log10(np.abs(np.fft.rfft(signal()))) #rfft trims imag and leaves real values
f = np.linspace(0, rate/2, len(z))
p.trace("Amplitude", f, z)
timer = QtCore.QTimer()
timer.timeout.connect(lambda: update())
timer.start(10)
p.start()
t1 = multiprocessing.Process(target=signal)
t1.start

Resources