Here is a sample code which I'm running.
def function(elem):
var1 = elem[0]
var2 = elem[1]
length = nx.shortest_path_length(G, source=var1, target=var2)
return length
p = mp.Pool(processes=4)
results = p.map_async(function, iterable=elements)
track_job(results)
p.close()
p.join()
The error which I'm facing is -
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/local/lib/python3.8/threading.py", line 932, in _bootstrap_inner
self.run()
File "/usr/local/lib/python3.8/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.8/multiprocessing/managers.py", line 192, in accepter
t.start()
File "/usr/local/lib/python3.8/threading.py", line 852, in start
_start_new_thread(self._bootstrap, ())
RuntimeError: can't start new thread
Post this error entire process is halted/paused.
Two Questions:
Is this issue at the hardware level? Or can this be avoided?
How to handle this error, so that the other processes are still running? Also retry the faulty process?
TIA
I typically do stuff like this, maybe it will work for you:
import psutil
p = mp.Pool(psutil.cpu_count(logical=False))
with Pool((psutil.cpu_count()-1) or 1) as p:
try:
results = [r for r in p.map_async(function, elements)]
except Exception as e1:
print(f'{e1}')
finally:
p.close()
p.join()
For the track_job(results), you may want to multiprocess that as well depending on what it's doing.
The try / except / finally error handling could be used to help your program continue when it hits any exception as well, using Exception as the base exception case.
Related
I am coding a userbot in pyrogram but I want to run multiple Clients with different telegram account but I am stuck here.
I want to run userbot on multiple accounts using one script if I will run it separately then I have to host it so many time I want to host it for one time and run for every account I have.
I think this will help to understand what I am saying.
from pyrogram import Client, filters, handlers, idle
import threading
from pyrogram.handlers import MessageHandler
app1 = Client(
session, api_hash, api_id)
app2 = Client(session,
api_hash, api_id)
accounts = [app1, app2]
async def handlngmessage(client, message):
print(message)
print("\nstarted ")
await client.send_message("me", "recived")
def runner(c):
c.run()
for ac in accounts:
ac.add_handler(handlers.MessageHandler(unmutedtest))
t = threading.Thread(target=runner, args=(ac,))
t.start()
t.join()
When I run this I am just getting error
Output:
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.9/threading.py", line 954, in _bootstrap_inner
self.run()
File "/usr/lib/python3.9/threading.py", line 892, in run
self._target(*self._args, **self._kwargs)
File "/home/ak/Desktop/development/bots/pyrogramplugins/userbot/main.py", line 30, in runner
c.run()
File "/home/ak/.local/lib/python3.9/site-packages/pyrogram/methods/utilities/run.py", line 50, in run
loop = asyncio.get_event_loop()
File "/usr/lib/python3.9/asyncio/events.py", line 642, in get_event_loop
raise RuntimeError('There is no current event loop in thread %r.'
RuntimeError: There is no current event loop in thread 'Thread-1'.
Exception in thread Thread-2:
Traceback (most recent call last):
File "/usr/lib/python3.9/threading.py", line 954, in _bootstrap_inner
self.run()
File "/usr/lib/python3.9/threading.py", line 892, in run
self._target(*self._args, **self._kwargs)
File "/home/ak/Desktop/development/bots/pyrogramplugins/userbot/main.py", line 30, in runner
c.run()
File "/home/ak/.local/lib/python3.9/site-packages/pyrogram/methods/utilities/run.py", line 50, in run
loop = asyncio.get_event_loop()
File "/usr/lib/python3.9/asyncio/events.py", line 642, in get_event_loop
raise RuntimeError('There is no current event loop in thread %r.'
RuntimeError: There is no current event loop in thread 'Thread-2'.
With Pyrogram you don't need to use Threading. The internal code is already entirely asynchronous and you can just start the clients one after another, then call Client.idle() to keep them all "alive".
from pyrogram import Client
app1 = Client("first account")
app2 = Client("second account")
# You can either stack decorators ...
#app1.on_message()
#app2.on_message()
async def m_func(_, message):
pass
# ... or use multiple add_handler calls
app1.add_handler(MessageHandler(m_func))
app2.add_handler(MessageHandler(m_func))
# Then start all Clients and call idle() to keep them running
app1.start()
app2.start()
Client.idle()
app1.stop()
app2.stop()
Alternatively, here's a Gist with some more explanation.
https://gist.github.com/pokurt/96afa69e86725850b2101099461609ed
from asyncio import FIRST_EXCEPTION
from concurrent.futures.thread import ThreadPoolExecutor
from queue import Queue
from concurrent.futures import wait
import os
def worker(i: int, in_queue: Queue) -> None:
while 1:
data = in_queue.get()
if data is None:
in_queue.put(data)
print(f'worker {i} exit')
return
print(os.path.exists(data))
def main():
with ThreadPoolExecutor(max_workers=2) as executor:
queue = Queue(maxsize=2)
workers = [executor.submit(worker, i, queue) for i in range(2)]
for obj in [{'fn': '/path/to/sth'}, {}]:
fn = obj['fn'] # here is the exception
queue.put((fn,))
queue.put(None)
done, error = wait(workers, return_when=FIRST_EXCEPTION)
print(done, error)
main()
This program get stuck when exception happens.
From the log:
Traceback (most recent call last):
File "test.py", line 34, in <module>
main()
File "test.py", line 31, in main
print(done, error)
File "/.pyenv/versions/3.7.4/lib/python3.7/concurrent/futures/_base.py", line 623, in __exit__
self.shutdown(wait=True)
File "/.pyenv/versions/3.7.4/lib/python3.7/concurrent/futures/thread.py", line 216, in shutdown
t.join()
File "/.pyenv/versions/3.7.4/lib/python3.7/threading.py", line 1044, in join
self._wait_for_tstate_lock()
File "/.pyenv/versions/3.7.4/lib/python3.7/threading.py", line 1060, in _wait_for_tstate_lock
elif lock.acquire(block, timeout):
KeyboardInterrupt
It happens because wait function keep locked, but it's weird because the exception happens before the wait function. It should be returned when exception happens!!
Why it doesn't return immediately when exception happens?
Yes, I find the reason. When the exception happens, executor will exit, and in with statement, it calls self.shutdown(wait=True), so main thread waits for sub thread to exit, however sub thread keep running.
So the solution is that, shutdown the executor manually with wait=False:
try:
## code here
except Exception as e:
traceback.print_exc()
executor.shutdown(False)
This snippet of code (a minimal server running in a thread, code taken from here) works fine with Python3.8.3 but raises an error message with Python3.9.0:
import asyncio
import threading
from aiohttp import web
def aiohttp_server():
def say_hello(request):
return web.Response(text='Hello, world')
app = web.Application()
app.add_routes([web.get('/', say_hello)])
runner = web.AppRunner(app)
return runner
def run_server(runner):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(runner.setup())
site = web.TCPSite(runner, 'localhost', 8080)
loop.run_until_complete(site.start())
loop.run_forever()
t = threading.Thread(target=run_server, args=(aiohttp_server(),))
t.start()
The error message:
Exception in thread Thread-1:
Traceback (most recent call last):
File "/usr/lib/python3.9/threading.py", line 954, in _bootstrap_inner
self.run()
File "/usr/lib/python3.9/threading.py", line 892, in run
self._target(*self._args, **self._kwargs)
File "/home/alkhinoos/nikw/nikw/z2.py", line 21, in run_server
loop.run_until_complete(site.start())
File "/usr/lib/python3.9/asyncio/base_events.py", line 642, in run_until_complete
return future.result()
File "/usr/lib/python3.9/site-packages/aiohttp/web_runner.py", line 121, in start
self._server = await loop.create_server(
File "/usr/lib/python3.9/asyncio/base_events.py", line 1460, in create_server
infos = await tasks.gather(*fs, loop=self)
File "/usr/lib/python3.9/asyncio/base_events.py", line 1400, in _create_server_getaddrinfo
infos = await self._ensure_resolved((host, port), family=family,
File "/usr/lib/python3.9/asyncio/base_events.py", line 1396, in _ensure_resolved
return await loop.getaddrinfo(host, port, family=family, type=type,
File "/usr/lib/python3.9/asyncio/base_events.py", line 856, in getaddrinfo
return await self.run_in_executor(
File "/usr/lib/python3.9/asyncio/base_events.py", line 809, in run_in_executor
executor = concurrent.futures.ThreadPoolExecutor(
File "/usr/lib/python3.9/concurrent/futures/__init__.py", line 49, in __getattr__
from .thread import ThreadPoolExecutor as te
File "/usr/lib/python3.9/concurrent/futures/thread.py", line 37, in <module>
threading._register_atexit(_python_exit)
File "/usr/lib/python3.9/threading.py", line 1374, in _register_atexit
raise RuntimeError("can't register atexit after shutdown")
RuntimeError: can't register atexit after shutdown
What's going on ? The same problem appears with Python 3.9.1. Is this problem solved with Python 3.9.2 ? Maybe a relative issue here.
As mentioned in Python manual - Thread Objects
Other threads can call a thread’s join() method. This blocks the calling thread until the thread whose join() method is called is terminated.
After calling t.start() in main thread, the main thread will exit. Then the process is ended.
If you want to run the child thread forever or until it exits, you should call t.join() in main thread after t.start().
Not sure what you did, but I used 127.0.0.1 instead of localhost and the error is resolved!
I managed to get this sorted by importing the ThreadPoolExecutor module, before calling any of my main application code - so at the top of my main.py start-up script.
thread_pool_ref = concurrent.futures.ThreadPoolExecutor
Just the act of importing the module early (before any threads are initialised) was enough to fix the error. There is an on-going issue around how this module must be present in the main thread, before any child threads import or use any related threading library code.
The inspiration for this fix came from this post on the Python bugs site. My issue was specifically around boto3 library, but the fix is applicable across the board.
Good day,
I am running some long-running async jobs using PubSub to trigger a function. Occasionally, the task may fail. In such cases, I simply want to log the exception, acknowledge the message, and restart the subscription to ensure that the subscriber is still pulling new messages after the failure has occurred.
I have placed some simplified code to demonstrate my current set up below:
try:
while True:
streaming_pull_future = workers.subscriber.subscribe(
subscription_path, callback=worker_task <- includes logic to ack() the message if it's failed before
)
print(f'Listening for messages on {subscription_path}')
try:
streaming_pull_future.result()
except Exception as e:
print(streaming_pull_future.cancelled()) #<-- this evaluates to false
streaming_pull_future.cancel() #<-- this results in RunTimeError: set_result can only be called once.
print(e)
except KeyboardInterrupt: # seems to be an issue as per Github PubSub issue #17. No keyboard interrupt
streaming_pull_future.cancel()
I keep seeing a RuntimeError: set_result can only be called oncewhen I execute the streaming_pull_future.cancel() in the exception handler. I checked whether perhaps the subscriber had already been cancelled but when I logged out the status it evaluated to False. Yet when I then call the cancel() method I get the error. I want to ensure that any threads are cleaned up before making a new subscription in the case where I could have several errors. Does anyone know why this is happening and a way around it?
I am running Python 3.7.4 with PubSub 1.2.0 and grpcio 1.27.1.
Update:
As per comments, please see a reproducible example. The stack trace raised is included:
Listening for messages on projects/trigger-web-app/subscriptions/load-job-sub
968432700946405
Top-level exception occurred in callback while processing a message
Traceback (most recent call last):
File "C:\..\lib\site-packages\google\cloud\pubsub_v1\subscriber\_protocol\streaming_pull_manager.py", line
71, in _wrap_callback_errors
callback(message)
File "test.py", line 19, in worker_task
a = 1/0 # cause an exception to be raised
ZeroDivisionError: division by zero
968424309156485
Top-level exception occurred in callback while processing a message
Traceback (most recent call last):
File "C:\...\lib\site-packages\google\cloud\pubsub_v1\subscriber\_protocol\streaming_pull_manager.py", line
71, in _wrap_callback_errors
callback(message)
File "test.py", line 19, in worker_task
a = 1/0 # cause an exception to be raised
ZeroDivisionError: division by zero
Traceback (most recent call last):
File "test.py", line 29, in main
streaming_pull_future.result()
File "C:...\lib\site-packages\google\cloud\pubsub_v1\futures.py", line 105, in result
raise err
File "C:\...\lib\site-packages\google\cloud\pubsub_v1\subscriber\_protocol\streaming_pull_manager.py", line
71, in _wrap_callback_errors
callback(message)
File "test.py", line 19, in worker_task
a = 1/0 # cause an exception to be raised
ZeroDivisionError: division by zero
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "test.py", line 35, in <module>
main()
File "test.py", line 31, in main
streaming_pull_future.cancel()
File "C:\...\lib\site-packages\google\cloud\pubsub_v1\subscriber\futures.py", line 46, in cancel
return self._manager.close()
File "C:\...\lib\site-packages\google\cloud\pubsub_v1\subscriber\_protocol\streaming_pull_manager.py", line
496, in close
callback(self, reason)
File "C:\...\lib\site-packages\google\cloud\pubsub_v1\subscriber\futures.py", line 37, in _on_close_callback
self.set_result(True)
File "C:\...\lib\site-packages\google\cloud\pubsub_v1\futures.py", line 155, in set_result
raise RuntimeError("set_result can only be called once.")
RuntimeError: set_result can only be called once.
import os
from google.cloud import pubsub_v1
subscriber = pubsub_v1.SubscriberClient()
project_id=os.environ['GOOGLE_CLOUD_PROJECT']
subscription_name=os.environ['GOOGLE_CLOUD_PUBSUB_SUBSCRIPTION_NAME']
subscription_path = f'projects/{project_id}/subscriptions/{subscription_name}'
def worker_task( message ):
job_id = message.message_id
print(job_id)
a = 1/0 # cause an exception to be raised
message.ack()
def main():
streaming_pull_future = subscriber.subscribe(
subscription_path, callback=worker_task
)
print(f'Listening for messages on {subscription_path}')
try:
streaming_pull_future.result()
except Exception as e:
streaming_pull_future.cancel() # if exception in callback handler, this will raise a RunTimError
print(e)
if __name__ == '__main__':
main()
Thank you.
I am trying to make a code that will ask for a user input and if that input is not given in a set amount of time will assume a default value and continue through the rest of the code without requiring the user to hit enter. I am running in Python 3.5.1 on Windows 10.
I have looked through: Keyboard input with timeout in Python, How to set time limit on raw_input, Timeout on a function call, and Python 3 Timed Input black boxing the answers but none of the answers are suitable as they are not usable on Windows (principally use of signal.SIGALRM which is only available on linux), or require a user to hit enter in order to exit the input.
Based upon the above answers however i have attempted to scrap together a solution using multiprocessing which (as i think it should work) creates one process to ask for the input and creates another process to terminate the first process after the timeout period.
import multiprocessing
from time import time,sleep
def wait(secs):
if secs == 0:
return
end = time()+secs
current = time()
while end>current:
current = time()
sleep(.1)
return
def delay_terminate_process(process,delay):
wait(delay)
process.terminate()
process.join()
def ask_input(prompt,term_queue,out_queue):
command = input(prompt)
process = term_queue.get()
process.terminate()
process.join()
out_queue.put(command)
##### this doesn't even remotly work.....
def input_with_timeout(prompt,timeout=15.0):
print(prompt)
astring = 'no input'
out_queue = multiprocessing.Queue()
term_queue = multiprocessing.Queue()
worker1 = multiprocessing.Process(target=ask_input,args=(prompt,term_queue,out_queue))
worker2 = multiprocessing.Process(target=delay_terminate_process,args=(worker1,timeout))
worker1.daemon = True
worker2.daemon = True
term_queue.put(worker2)
print('Through overhead')
if __name__ == '__main__':
print('I am in if statement')
worker2.start()
worker1.start()
astring = out_queue.get()
else:
print('I have no clue what happened that would cause this to print....')
return
print('returning')
return astring
please = input_with_timeout('Does this work?',timeout=10)
But this fails miserably and yields:
Does this work?
Through overhead
I am in if statement
Traceback (most recent call last):
File "C:\Anaconda3\lib\multiprocessing\queues.py", line 241, in _feed
obj = ForkingPickler.dumps(obj)
File "C:\Anaconda3\lib\multiprocessing\reduction.py", line 50, in dumps
cls(buf, protocol).dump(obj)
File "C:\Anaconda3\lib\multiprocessing\queues.py", line 58, in __getstate__
context.assert_spawning(self)
File "C:\Anaconda3\lib\multiprocessing\context.py", line 347, in assert_spawning
' through inheritance' % type(obj).__name__
RuntimeError: Queue objects should only be shared between processes through inheritance
Does this work?
Through overhead
I have no clue what happened that would cause this to print....
Does this work?Process Process-1:
Traceback (most recent call last):
File "C:\Anaconda3\lib\multiprocessing\queues.py", line 241, in _feed
obj = ForkingPickler.dumps(obj)
File "C:\Anaconda3\lib\multiprocessing\reduction.py", line 50, in dumps
cls(buf, protocol).dump(obj)
File "C:\Anaconda3\lib\multiprocessing\process.py", line 287, in __reduce__
'Pickling an AuthenticationString object is '
TypeError: Pickling an AuthenticationString object is disallowed for security reasons
Traceback (most recent call last):
File "C:\Anaconda3\lib\multiprocessing\process.py", line 254, in _bootstrap
self.run()
File "C:\Anaconda3\lib\multiprocessing\process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "C:\Anaconda3\saved_programs\a_open_file4.py", line 20, in ask_input
command = input(prompt)
EOFError: EOF when reading a line
Does this work?
Through overhead
I have no clue what happened that would cause this to print....
Traceback (most recent call last):
File "C:\Anaconda3\lib\multiprocessing\queues.py", line 241, in _feed
obj = ForkingPickler.dumps(obj)
File "C:\Anaconda3\lib\multiprocessing\reduction.py", line 50, in dumps
cls(buf, protocol).dump(obj)
File "C:\Anaconda3\lib\multiprocessing\queues.py", line 58, in __getstate__
context.assert_spawning(self)
File "C:\Anaconda3\lib\multiprocessing\context.py", line 347, in assert_spawning
' through inheritance' % type(obj).__name__
RuntimeError: Queue objects should only be shared between processes through inheritance
Process Process-2:
Traceback (most recent call last):
File "C:\Anaconda3\lib\multiprocessing\process.py", line 254, in _bootstrap
self.run()
File "C:\Anaconda3\lib\multiprocessing\process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "C:\Anaconda3\saved_programs\a_open_file4.py", line 16, in delay_terminate_process
process.terminate()
File "C:\Anaconda3\lib\multiprocessing\process.py", line 113, in terminate
self._popen.terminate()
AttributeError: 'NoneType' object has no attribute 'terminate'
I really don't understand the multiprocessing module well and although I have read the official docs am unsure why this error occurred or why it appears to have ran through the function call 3 times in the process. Any help on how to either resolve the error or achieve an optional user input in a cleaner manner will be much appreciated by a noob programmer. Thanks!