Multithreading issue for two separate sources - python-3.x

I am trying to run two functions at the same time. One function is supposed to detect scenes from local video and the other should detect scenes from my webcam. Then I make some processing with these scenes. Here is the code snippet:
scenes_1 = []
scenes_2 = []
scene_manager1 = SceneManager()
scene_manager1.add_detector(ContentDetector(threshold=30.0))
scene_manager2 = SceneManager()
scene_manager2.add_detector(ContentDetector(threshold=30.0))
def callback_1(image, frame_num):
global scenes_1
print("callback_1: Found a scene on video 1.")
scenes_1 += [(image, frame_num)]
def callback_2(image, frame_num):
global scenes_2
print("callback_2: Found a scene on video 2.")
scenes_2 += [(image, frame_num)]
def function_1():
video_capture_0 = BrowserVideoCapture(src=0)
while True:
ret0,frame0=video_capture_0.read()
if(ret0):
scene_manager1.detect_scenes(frame0, callback=callback_1)
else:
break
video_capture_0.release()
def function_2():
video_capture_1 = cv2.VideoCapture("Our Story.mp4")
while True:
ret1,frame1=video_capture_1.read()
if(ret1):
scene_manager2.detect_scenes(frame1, callback=callback_2)
if cv2.waitKey(34) & 0xFF == ord('q'):
break
else:
break
video_capture_1.release()
cv2.destroyAllWindows()
t1=threading.Thread(target=function_1)
t2=threading.Thread(target=function_2)
t1.start()
t2.start()
t1.join()
t2.join()
while True:
for im, frame_num in scenes_2:
tar_image_example = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
tar_image_example = tar_image_example.astype(np.uint8)
print("Found scene at frame %d in video 2." % frame_num)
tar_image_example += 1
for im, frame_num in scenes_1:
src_image_example = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
src_image_example = src_image_example.astype(np.uint8)
print("Found scene at frame %d in video 1." % frame_num)
But I get the following error:
Exception in thread Thread-29:
Traceback (most recent call last):
File "/usr/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/usr/lib/python3.7/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "<ipython-input-25-1d65acb7c14c>", line 71, in function_2
scene_manager2.detect_scenes(frame1, callback=callback_2)
File "/usr/local/lib/python3.7/dist-packages/scenedetect/scene_manager.py", line 626, in detect_scenes
timecode=0, fps=frame_source.get(cv2.CAP_PROP_FPS))
AttributeError: 'numpy.ndarray' object has no attribute 'get'
Exception in thread Thread-28:
Traceback (most recent call last):
File "/usr/lib/python3.7/threading.py", line 926, in _bootstrap_inner
self.run()
File "/usr/lib/python3.7/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "<ipython-input-25-1d65acb7c14c>", line 54, in function_1
scene_manager1.detect_scenes(frame0, callback=callback_1)
File "/usr/local/lib/python3.7/dist-packages/scenedetect/scene_manager.py", line 626, in detect_scenes
timecode=0, fps=frame_source.get(cv2.CAP_PROP_FPS))
AttributeError: 'numpy.ndarray' object has no attribute 'get'
---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
<ipython-input-25-1d65acb7c14c> in <module>()
112 tar_image_example = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
113 tar_image_example = tar_image_example.astype(np.uint8)
--> 114 print("Found scene at frame %d in video 2." % frame_num)
115 for im, frame_num in scenes_1:
KeyboardInterrupt:
The code continuously switches between for loops without any output...
What am I doing wrong, please?

Related

Why shouldn't Event instance be put into Queue in python multiple process?

Why shouldn't Event instance be put to Queue in pyton multiple process?
When I try to put Event instance into Queue, and then the python interceptor raise Runtime Error as below!
RuntimeError: Condition objects should only be shared between processes through inheritance
My Example Code:
import time
from multiprocessing import Process, Queue, Event
def slaver(q: Queue, e:Event):
while True:
print("do1", e)
_, _ = q.get(block=True)
time.sleep(3)
e.set()
print("do2")
def start():
q = Queue()
e = Event()
p = Process(target=slaver, args=(q, e))
p.start()
while True:
print("1")
q.put((1, e))
print("2", e)
wait = e.wait(timeout=1)
print("3", wait)
e.clear()
print("4")
time.sleep(5)
if __name__ == '__main__':
start()
Output
1
2 <multiprocessing.synchronize.Event object at 0x1028d8df0>
Traceback (most recent call last):
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.8/lib/python3.8/multiprocessing/queues.py", line 239, in _feed
obj = _ForkingPickler.dumps(obj)
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.8/lib/python3.8/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.8/lib/python3.8/multiprocessing/synchronize.py", line 220, in __getstate__
context.assert_spawning(self)
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.8/lib/python3.8/multiprocessing/context.py", line 359, in assert_spawning
raise RuntimeError(
RuntimeError: Condition objects should only be shared between processes through inheritance
do1 <multiprocessing.synchronize.Event object at 0x1075b6eb0>
3 False
4
1
2 <multiprocessing.synchronize.Event object at 0x1028d8df0>
Traceback (most recent call last):
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.8/lib/python3.8/multiprocessing/queues.py", line 239, in _feed
obj = _ForkingPickler.dumps(obj)
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.8/lib/python3.8/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.8/lib/python3.8/multiprocessing/synchronize.py", line 220, in __getstate__
context.assert_spawning(self)
File "/Library/Developer/CommandLineTools/Library/Frameworks/Python3.framework/Versions/3.8/lib/python3.8/multiprocessing/context.py", line 359, in assert_spawning
raise RuntimeError(
RuntimeError: Condition objects should only be shared between processes through inheritance
And if I replate q.put((1, e)) with q.put((1, 2)), the exception will disappear.
But in , there is an example for using Event in multiple threading. the different is my code is in process. Event in process is clone from threading, what's the different?

'str' object is not callable error while trying to create threads and set up a pipe between them

I am trying to create two threads and set up a pipe to communicate between the threads. It is giving an error: 'TypeError: 'str' object is not callable'
I saw that the most common problem was that the arguments were not being passed as a tuple online but I fixed it. It's still throwing the same error.
# Create two threads and communicate between them using pipes
import os
#import random
#import string
import time
import threading
def task_for_thread1(r,w):
os.close(r)
write_obj=os.fdopen(w,'w')
for _ in range(1000):
char='a'
os.write(write_obj,'Thread 1 writes: '+char)
time.sleep(0.5)
write_obj.close()
def task_for_thread2(r,w):
os.close(w)
read_obj=os.fdopen(r)
while(True):
char_read=read_obj.read()
print('Received character',char_read,'from the pipe')
if __name__=='__main__':
print('Starting operation in main')
r,w=os.pipe()
thread1=threading.Thread(target='task_for_thread1',name='Thread1',args=(r,w,))
thread2=threading.Thread(target='task_for_thread2',name='Thread2',args=(r,w,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
And this is the output I get in the console.
Starting operation in main
18
19
Exception in thread Thread1:
Traceback (most recent call last):
File "C:\Users\vivek\Anaconda3\lib\threading.py", line 916, in _bootstrap_inner
self.run()
File "C:\Users\vivek\Anaconda3\lib\threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
TypeError: 'str' object is not callable
Exception in thread Thread2:
Traceback (most recent call last):
File "C:\Users\vivek\Anaconda3\lib\threading.py", line 916, in _bootstrap_inner
self.run()
File "C:\Users\vivek\Anaconda3\lib\threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
TypeError: 'str' object is not callable
I edited the code as per the comment.
# Create two threads and communicate between them using pipes
import os
#import random
#import string
import time
import threading
def task_for_thread1(r,w):
os.close(r)
for _ in range(1000):
char2='a'
os.write(w,char2)
time.sleep(0.5)
os.close(w)
def task_for_thread2(r,w):
os.close(w)
read_obj=os.fdopen(r)
while(True):
char_read=read_obj.read()
print('Received character',char_read,'from the pipe')
if __name__=='__main__':
print('Starting operation in main')
r,w=os.pipe()
print(r)
print(w)
thread1=threading.Thread(target=task_for_thread1,name='Thread1',args=(r,w,))
thread2=threading.Thread(target=task_for_thread2,name='Thread2',args=(r,w,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
But now I get the following error:
Starting operation in main
24
25
Exception in thread Thread1:
Traceback (most recent call last):
File "C:\Users\vivek\Anaconda3\lib\threading.py", line 916, in _bootstrap_inner
self.run()
File "C:\Users\vivek\Anaconda3\lib\threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "E:/PythonScripts/multi_threading_pipe.py", line 11, in task_for_thread1
os.write(w,char2)
TypeError: a bytes-like object is required, not 'str'
Exception in thread Thread2:
Traceback (most recent call last):
File "C:\Users\vivek\Anaconda3\lib\threading.py", line 916, in _bootstrap_inner
self.run()
File "C:\Users\vivek\Anaconda3\lib\threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "E:/PythonScripts/multi_threading_pipe.py", line 16, in task_for_thread2
read_obj=os.fdopen(r)
File "C:\Users\vivek\Anaconda3\lib\os.py", line 1015, in fdopen
return io.open(fd, *args, **kwargs)
OSError: [WinError 6] The handle is invalid
Anu pointers on what I may be doing wrong?

Python threading causing issues with google api

I'm running through a list of locations and trying to find places along my route. This is my first attempt at threading, so any tips would be appreciated! When i run this it'll work fine for the first few iterations, but then i start getting a KeyError and the API response says route is not found (even though it should be). If I search along a shorter route, everything runs fine. When I extend the route past a couple of hours of drive time I start getting these errors. Is it possible that I'm overloading it or does my code look off?
import pandas as pd
from threading import Thread
import threading
import requests
start_input = input("start: ")
end_input = input("end: ")
out_way = input("out of the way: ")
out_way_secs = int(out_way) * 60
thread_local = threading.local()
def get_session():
if not getattr(thread_local, "session", None):
thread_local.session = requests.Session()
return thread_local.session
def get_routes(url, start, end, waypoint, idx):
session = get_session()
with session.get(url, params={'origins': f'{start}|{waypoint}', 'destinations': f'{start}|{end}',
'key': '# key'}) as response:
route = response.json()
if route['rows'][1]['elements'][0]['status'] != 'OK':
results[idx] = {'# info'}
else:
nonstop_route = route['rows'][0]['elements'][1]['duration']['value']
leg1 = route['rows'][1]['elements'][0]['duration']['value']
leg2 = route['rows'][1]['elements'][1]['duration']['value']
time_added = (leg1 + leg2) - nonstop_route
time_added_mins = str(datetime.timedelta(seconds=(leg1 + leg2) - nonstop_route))
more_time = time_added_mins.split(':')
added_time_str = str(f'{more_time[0]}:{more_time[1]}:{more_time[2]} away!')
if time_added < allowable_time:
results[idx] = {# info to return}
return results[idx]
if __name__ == "__main__":
start_time = time.time()
output_df = pd.DataFrame(columns=['Location', 'Added Time', 'Notes'])
threads = [None] * coords[0]
results = [None] * coords[0]
for i in range(len(threads)):
threads[i] = Thread(target=get_routes, args=('https://maps.googleapis.com/maps/api/distancematrix/json',
start_input, end_input, stops[i], i))
threads[i].start()
for i in range(len(threads)):
threads[i].join()
for x in range(len(results)):
output_df = output_df.append(results[x], ignore_index=True)
output_df = output_df.sort_values(['Added Time'], ascending=True)
output_df.to_csv('output.csv', index=False)
there are 3 errors that it will get, this first one pops up by itself and the last 2 will come together. The code is the same when I run it, so not sure why i'm getting different errors.
This is the most common error that comes by itself (the routing duration works fine when run individually):
Exception in thread Thread-171:
Traceback (most recent call last):
File "C:\Python37-32\lib\threading.py", line 917, in _bootstrap_inner
self.run()
File "C:\Python37-32\lib\threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "C:program.py", line 46, in get_routes
nonstop_route = route['rows'][0]['elements'][1]['duration']['value']
KeyError: 'duration'
The two below I get together and are less common:
Exception in thread Thread-436:
Traceback (most recent call last):
File "C:\Python37-32\lib\threading.py", line 917, in _bootstrap_inner
self.run()
File "C:\Python37-32\lib\threading.py", line 865, in run
self._target(*self._args, **self._kwargs)
File "C:/program.py", line 40, in get_routes
route = response.json()
File "C:\requests\models.py", line 897, in json
return complexjson.loads(self.text, **kwargs)
File "C:\Python37-32\lib\json\__init__.py", line 348, in loads
return _default_decoder.decode(s)
File "C:\Python37-32\lib\json\decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "C:\Python37-32\lib\json\decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)
second error:
Exception in thread Thread-196:
Traceback (most recent call last):
File "C:\site-packages\urllib3\response.py", line 360, in _error_catcher
yield
File "C:\urllib3\response.py", line 442, in read
data = self._fp.read(amt)
File "C:\Python37-32\lib\http\client.py", line 447, in read
n = self.readinto(b)
File "C:\Python37-32\lib\http\client.py", line 491, in readinto
n = self.fp.readinto(b)
File "C:\Python37-32\lib\socket.py", line 589, in readinto
return self._sock.recv_into(b)
File "C:\Python37-32\lib\ssl.py", line 1052, in recv_into
return self.read(nbytes, buffer)
File "C:\Python37-32\lib\ssl.py", line 911, in read
return self._sslobj.read(len, buffer)
ConnectionAbortedError: [WinError 10053] An established connection was aborted by the software in your host machine
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\site-packages\requests\models.py", line 750, in generate
for chunk in self.raw.stream(chunk_size, decode_content=True):
File "C:\site-packages\urllib3\response.py", line 494, in stream
data = self.read(amt=amt, decode_content=decode_content)
File "C:\site-packages\urllib3\response.py", line 459, in read
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
File "C:\Python37-32\lib\contextlib.py", line 130, in __exit__
self.gen.throw(type, value, traceback)
File "C:\site-packages\urllib3\response.py", line 378, in _error_catcher
raise ProtocolError('Connection broken: %r' % e, e)
urllib3.exceptions.ProtocolError: ("Connection broken: ConnectionAbortedError(10053, 'An established connection was aborted by the software in your host machine', None, 10053, None)", ConnectionAbortedError(10053, 'An established connection was aborted by the software in your host machine', None, 10053, None))

Using threading.timer to delay sub-procedure

def emailCheck(self):
n=0
(retcode, messages) = mail.search(None, '(UNSEEN)')
if retcode == 'OK':
for num in messages[0].split() :
n=n+1
typ, data = mail.fetch(num,'(RFC822)')
for response_part in data:
if isinstance(response_part, tuple):
original = email.message_from_bytes(response_part[1])
print (original['From'])
print (original['Subject'])
typ, data = mail.store(num,'+FLAGS','\\Seen')
print (n)
t = threading.Timer(10.0, emailCheck)
t.start()
I am trying to delay the sub-procedure using threading.timer(), but I think the error is to do with the inclusion of self in the brackets. I am using PyQt so all of this is contained within the class MainWindow.
The error:
Exception in thread Thread-1:
Traceback (most recent call last):
File "C:\Python33\lib\threading.py", line 637, in _bootstrap_inner
self.run()
File "C:\Python33\lib\threading.py", line 823, in run
self.function(*self.args, **self.kwargs)
TypeError: emailCheck() missing 1 required positional argument: 'self'
t = threading.Timer(10.0, self.emailCheck)

How to handle threading expetion in tornado?

I am working with white board application. When I load the page first time it gives the perfect output and second time also gives the perfect output. But when I refresh the page third time it gives the below error as shown below,
Exception in thread Thread-3:
Traceback (most recent call last):
File "/usr/lib/python3.2/threading.py", line 740, in _bootstrap_inner
self.run()
File "/usr/lib/python3.2/threading.py", line 693, in run
self._target(*self._args, **self._kwargs)
File "/home/nyros/Desktop/python3/whiteboard/websockethandler.py", line 45, in redis_listener
listener.send_message(message['data'])
File "/home/nyros/Desktop/python3/whiteboard/websockethandler.py", line 150, in send_message
self.write_message(message)
File "/home/nyros/Desktop/python3/venv3/lib/python3.2/site-packages/tornado/websocket.py", line 165, in write_message
self.ws_connection.write_message(message, binary=binary)
AttributeError: 'NoneType' object has no attribute 'write_message'
Code As below:
def redis_listener(self, room_name, page_no):
self.logger.info("Starting listener thread for room %s" % room_name)
rr = redis.Redis(host=config.REDIS_IP_ADDRESS, port=config.REDIS_PORT, db=1)
r = rr.pubsub()
r.subscribe(self.construct_key(room_name, page_no))
for message in r.listen():
print(message, "ok ok ok .......")
for listener in self.application.LISTENERS.get(room_name, {}).get(page_no, []):
self.logger.debug("Sending message to room %s" % room_name)
listener.send_message(message['data'])
def send_message(self, message):
if type(message) == type(b''):
self.logger.info("Decoding binary string")
message = message.decode('utf-8')
elif type(message) != type(''):
self.logger.info("Converting message from %s to %s" % (type(message),
type('')))
message = str(message)
message = b64encode(compress(bytes(quote(message), 'utf-8'), 9))
self.write_message(message)
Looks like you have different listeners in array. It's hard to tell exact reason of problem, but try to log them like this:
if hasattr(self, 'write_message'):
self.write_message(message)
else:
self.logger.debug(self)
It will prevent error.

Resources