async def start(channel):
while True:
m = await client.send_message(channel, "Generating... ")
generator.makeFile()
with open('tmp.png', 'rb') as f:
await client.send_file(channel, f)
await client.delete_message(m)
await asyncio.sleep(2)
I have a discord bot that runs a task every 2 seconds. I tried using an infinite loop for this, but the script crashes with a Task was destroyed but it is still pending! I have read about asyncio's coroutines, but none of the examples that I found use await in them. Is it possible avoid this error, by running a coroutine with await, for example?
Task was destroyed but it is still pending! is warning that you receive when you call loop.close() when some of tasks in your script aren't finished. Usually you should avoid this situation because unfinished task may not release some resources. You need either to await task done or cancel it before event loop closed.
Since you have infinite loop you probably would need to cancel task, example:
import asyncio
from contextlib import suppress
async def start():
# your infinite loop here, for example:
while True:
print('echo')
await asyncio.sleep(1)
async def main():
task = asyncio.Task(start())
# let script some thime to work:
await asyncio.sleep(3)
# cancel task to avoid warning:
task.cancel()
with suppress(asyncio.CancelledError):
await task # await for task cancellation
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(main())
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
See also this answer for more information about tasks.
Related
async def check():
print("Check...")
while 1:
if ...
await bot.send_message(...)
def startup():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(check())
loop.close()
async def on_startup(dp):
threading.Thread(target=startup).start()
I'm running an async function in a new thread. As a result, when it is necessary to send a message, an error occurs:
timeout context manager should be used inside a task
what to do?
I'd like to establish an SSH SOCKs tunnel (using asyncssh) during the execution of a synchronous function. When the function is done I want to tear down the tunnel and exit.
Apparently some async function has to be awaited to keep the tunnel working so the important thing is that conn.wait_closed() and the synchronous function are executed concurrently. So I am quite sure that I actually need a second thread.
I first tried some saner things using a ThreadPoolExecutor with run_in_executor but then ended up with the abysmal multihreaded variant below.
#! /usr/bin/env python3
import traceback
from threading import Thread
from concurrent.futures import ThreadPoolExecutor
import asyncio, asyncssh, sys
_server="127.0.0.1"
_port=22
_proxy_port=8080
async def run_client():
conn = await asyncio.wait_for(
asyncssh.connect(
_server,
port=_port,
options=asyncssh.SSHClientConnectionOptions(client_host_keysign=True),
),
10,
)
listener = await conn.forward_socks('127.0.0.1', _proxy_port)
return conn
async def do_stuff(func):
try:
conn = await run_client()
print("SSH tunnel active")
def start_loop(loop):
asyncio.set_event_loop(loop)
try:
loop.run_forever()
except Exception as e:
print(f"worker loop: {e}")
async def thread_func():
ret=await func()
print("Func done - tearing done worker thread and SSH connection")
conn.close()
# asyncio.get_event_loop().stop()
return ret
func_loop = asyncio.new_event_loop()
func_thread = Thread(target=start_loop, args=(func_loop,))
func_thread.start()
print("thread started")
fut = asyncio.run_coroutine_threadsafe(thread_func(), func_loop)
print(f"fut scheduled: {fut}")
done = await asyncio.gather(asyncio.wrap_future(fut), conn.wait_closed())
print("wait done")
for ret in done:
print(f"ret={ret}")
# Canceling pending tasks and stopping the loop
# asyncio.gather(*asyncio.Task.all_tasks()).cancel()
print("stopping func_loop")
func_loop.call_soon_threadsafe(func_loop.stop())
print("joining func_thread")
func_thread.join()
print("joined func_thread")
except (OSError, asyncssh.Error) as exc:
sys.exit('SSH connection failed: ' + str(exc))
except (Exception) as exc:
sys.exit('Unhandled exception: ' + str(exc))
traceback.print_exc()
async def just_wait():
print("starting just_wait")
input()
print("ending just_wait")
return 42
asyncio.get_event_loop().run_until_complete(do_stuff(just_wait))
It actually "works" "correctly" till the end where I get an exception while joining the worker thread. I presume because something I do is not threadsafe.
Exception in callback None()
handle: <Handle>
Traceback (most recent call last):
File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
self._context.run(self._callback, *self._args)
TypeError: 'NoneType' object is not callable
To test the code you must have a local SSH server running with key files setup for your user. You may want to change the _port variable.
I am looking for the reason of the exception and/or a version of the program that requires less manual intervention in the threading and possibly uses just a single event loop. I don't know how to achieve the latter when I want to await the two things (as in the asyncio.gather call).
The immediate cause of your error is this line:
# incorrect
func_loop.call_soon_threadsafe(func_loop.stop())
The intention is to call func_loop.stop() in the thread that runs the func_loop event loop. But as written, it invokes func_loop.stop() in the current thread and passes its return value (None) to call_soon_threadsafe as the function to invoke. This causes call_soon_threadsafe to complain that None is not callable. To fix the immediate problem, you should drop the extra parentheses and invoke the method as:
# correct
func_loop.call_soon_threadsafe(func_loop.stop)
However, the code is definitely over-complicated as written:
it doesn't make sense to create a new event loop when you are already inside an event loop
just_wait shouldn't be async def since it doesn't await anything, so it's clearly not async.
sys.exit takes an integer exit status, not a string. Also, it doesn't make much sense to attempt to print a backtrace after the call to sys.exit.
To run a non-async function from asyncio, just use run_in_executor with the function and pass it the non-async function as-is. You don't need an extra thread nor an extra event loop, run_in_executor will take care of the thread and connect it with your current event loop, effectively making the sync function awaitable. For example (untested):
async def do_stuff(func):
conn = await run_client()
print("SSH tunnel active")
loop = asyncio.get_event_loop()
ret = await loop.run_in_executor(None, func)
print(f"ret={ret}")
conn.close()
await conn.wait_closed()
print("wait done")
def just_wait():
# just_wait is a regular function; it can call blocking code,
# but it cannot await
print("starting just_wait")
input()
print("ending just_wait")
return 42
asyncio.get_event_loop().run_until_complete(do_stuff(just_wait))
If you need to await things in just_wait, you can make it async and use run_in_executor for the actual blocking code inside it:
async def do_stuff():
conn = await run_client()
print("SSH tunnel active")
loop = asyncio.get_event_loop()
ret = await just_wait()
print(f"ret={ret}")
conn.close()
await conn.wait_closed()
print("wait done")
async def just_wait():
# just_wait is an async function, it can await, but
# must invoke blocking code through run_in_executor
print("starting just_wait")
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, input)
print("ending just_wait")
return 42
asyncio.run(do_stuff())
I have a problem at work where I have to wait for 10 seconds when InstrInstallSucceeded event comes in, without blocking the main thread, I should wait for InstrInstallFailed to appear, so in other words 'ToolOn', 'ToolOn', 'ToolOn' should appear without any wait.
import asyncio
from threading import Thread
import time
FLAG = True
async def sleep_loop(t, event):
global FLAG
print(event)
if event == 'InstrInstallSucceeded':
# spwan a seperate thread here such that
# toolon events are not blocked by the sleep
await asyncio.sleep(t)
FLAG = True
if event == 'InstrInstallFailed':
# and I want to update the FLAG whenever I see event == 'InstrInstallFailed'
FLAG = False
async def keep_print():
print(f'Beginning FLAG:: {FLAG}')
while FLAG:
pass
print(f'End FLAG:: {FLAG}')
def start_loop(loop, t):
print("in start loop")
asyncio.set_event_loop(loop)
for i in ['InstrInstallSucceeded', 'ToolOn','ToolOn', 'ToolOn', 'InstrInstallFailed']:
loop.run_until_complete(asyncio.sleep(1))
loop.run_until_complete(sleep_loop(t, i))
loop = asyncio.get_event_loop()
new_loop = asyncio.new_event_loop()
t = Thread(target=start_loop, args=(new_loop,10))
t.start()
coro = keep_print()
loop.run_until_complete(coro)
output
in start loop
Beginning FLAG:: True
Executing <Task pending coro=<sleep() running at /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/tasks.py:482> wait_for=<Future pending cb=[<TaskWakeupMethWrapper object at 0x1043f2be8>()] created at /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/base_events.py:284> cb=[_run_until_complete_cb() at /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/base_events.py:185] created at /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/base_events.py:452> took 0.118 seconds
InstrInstallSucceeded
ToolOn
ToolOn
ToolOn
InstrInstallFailed
End FLAG:: False
Executing <Task finished coro=<keep_print() done, defined at fut.py:21> result=None created at /Library/Frameworks/Python.framework/Versions/3.6/lib/python3.6/asyncio/base_events.py:452> took 15.756 seconds
EDIT: using python 3.6.7
import asyncio
async def dispatch_event(event, alert):
print(event)
if event == 'InstrInstallSucceeded':
# spawn a coroutine if you need something done in parallel
#asyncio.create_task(xxx())
await asyncio.sleep(10)
if event == 'InstrInstallFailed':
await asyncio.sleep(.5)
# alert the watcher(s) of the event that was dispatched
alert.last_event = event
alert.set()
async def keep_print(alert):
while True:
print(f'Beginning FLAG:: {alert.last_event}')
await alert.wait()
alert.clear()
print(f'End FLAG:: {alert.last_event}')
async def main():
alert = asyncio.Event()
alert.last_event = None
# spawn keep_print in the "background"
loop = asyncio.get_event_loop()
t = loop.create_task(keep_print(alert))
for i in ['InstrInstallSucceeded', 'ToolOn','ToolOn', 'ToolOn', 'InstrInstallFailed']:
await asyncio.sleep(1)
await dispatch_event(i, alert)
await asyncio.sleep(1)
t.cancel()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
edit as suggested by #user418.....
async def dispatch_event(event,alert):
alert.last_event = event
alert.set()
print(event)
if event == 'InstrInstallSucceeded':
# spawn a coroutine if you need something done in parallel
#asyncio.create_task(xxx())
await asyncio.sleep(10)
if event == 'InstrInstallFailed':
await asyncio.sleep(.5)
# alert the watcher(s) of the event that was dispatched
Threads and asyncio don't go together, except in specific circumstances (e.g. the implementation of run_in_executor). Instead of spawning new threads, spawn new coroutines.
For example:
import asyncio
async def dispatch_event(event, alert):
print(event)
if event == 'InstrInstallSucceeded':
# spawn a coroutine if you need something done in parallel
#asyncio.create_task(xxx())
await asyncio.sleep(1)
if event == 'InstrInstallFailed':
await asyncio.sleep(.5)
# alert the watcher(s) of the event that was dispatched
alert.last_event = event
alert.set()
async def keep_print(alert):
while True:
print(f'Beginning FLAG:: {alert.last_event}')
await alert.wait()
alert.clear()
print(f'End FLAG:: {alert.last_event}')
async def main():
alert = asyncio.Event()
alert.last_event = None
# spawn keep_print in the "background"
t = asyncio.create_task(keep_print(alert))
for i in ['InstrInstallSucceeded', 'ToolOn','ToolOn', 'ToolOn', 'InstrInstallFailed']:
await asyncio.sleep(1)
await dispatch_event(i, alert)
await asyncio.sleep(1)
t.cancel()
asyncio.run(main())
In my code I have a class with properties, that occasionally need to run asynchronous code. Sometimes I need to access the property from asynchronous function, sometimes from synchronous - that's why I don't want my properties to be asynchronous. Besides, I have an impression that asynchronous properties in general is a code smell. Correct me if I'm wrong.
I have a problem with executing the asynchronous method from the synchronous property and blocking the further execution until the asynchronous method will finish.
Here is a sample code:
import asyncio
async def main():
print('entering main')
synchronous_property()
print('exiting main')
def synchronous_property():
print('entering synchronous_property')
loop = asyncio.get_event_loop()
try:
# this will raise an exception, so I catch it and ignore
loop.run_until_complete(asynchronous())
except RuntimeError:
pass
print('exiting synchronous_property')
async def asynchronous():
print('entering asynchronous')
print('exiting asynchronous')
asyncio.run(main())
Its output:
entering main
entering synchronous_property
exiting synchronous_property
exiting main
entering asynchronous
exiting asynchronous
First, the RuntimeError capturing seems wrong, but if I won't do that, I'll get RuntimeError: This event loop is already running exception.
Second, the asynchronous() function is executed last, after the synchronous one finish. I want to do some processing on the data set by asynchronous method so I need to wait for it to finish.
If I'll add await asyncio.sleep(0) after calling synchronous_property(), it will call asynchronous() before main() finish, but it doesn't help me. I need to run asynchronous() before synchronous_property() finish.
What am I missing? I'm running python 3.7.
Asyncio is really insistent on not allowing nested loops, by design. However, you can always run another event loop in a different thread. Here is a variant that uses a thread pool to avoid having to create a new thread each time around:
import asyncio, concurrent.futures
async def main():
print('entering main')
synchronous_property()
print('exiting main')
pool = concurrent.futures.ThreadPoolExecutor()
def synchronous_property():
print('entering synchronous_property')
result = pool.submit(asyncio.run, asynchronous()).result()
print('exiting synchronous_property', result)
async def asynchronous():
print('entering asynchronous')
await asyncio.sleep(1)
print('exiting asynchronous')
return 42
asyncio.run(main())
This code creates a new event loop on each sync->async boundary, so don't expect high performance if you're doing that a lot. It could be improved by creating only one event loop per thread using asyncio.new_event_loop, and caching it in a thread-local variable.
The easiest way is using an existing "wheel",
like
asgiref.async_to_sync
from asgiref.sync import async_to_sync
then:
async_to_sync(main)()
in general:
async_to_sync(<your_async_func>)(<.. arguments for async function ..>)
This is a caller class which turns an awaitable that only works on the thread with
the event loop into a synchronous callable that works in a subthread.
If the call stack contains an async loop, the code runs there.
Otherwise, the code runs in a new loop in a new thread.
Either way, this thread then pauses and waits to run any thread_sensitive
code called from further down the call stack using SyncToAsync, before
finally exiting once the async task returns.
There appears to a problem with the question as stated. Restating the question:
How to communicate between a thread (containing no async processes and hence considered sync) and an async proces (running in some event loop). One approach is to use two sync Queues. The sync process puts its request/parameters into the QtoAsync, and waits on the QtoSync. The async process reads the QtoAsync WITHOUT wait, and if it finds a request/parameters, executes the request, and places the result in QtoSync.
import queue
QtoAsync = queue.Queue()
QtoSync = queue.Queue()
...
async def asyncProc():
while True:
try:
data=QtoAsync.get_nowait()
result = await <the async that you wish to execute>
QtoAsync.put(result) #This can block if queue is full. you can use put_nowait and handle the exception.
except queue.Empty:
await asyncio.sleep(0.001) #put a nominal delay forcing this to wait in event loop
....
#start the sync process in a different thread here..
asyncio.run(main()) #main invokes the async tasks including the asyncProc
The sync thread puts it request to async using:
req = <the async that you wish to execute>
QtoAsync.put(req)
result = QtoSync.get()
This should work.
Problem with the question as stated:
1. When the async processes are started with asyncio.run (or similar) execution blocks until the async processes are completed. A separate sync thread has to be started explicity before calling asyncio.run
2. In general asyncio processes depend on other asyncio processes in that loop. So calling a async process from another thread is not permitted directly. The interaction should be with the event loop, and using two queues is one approach.
I want to make the async call to execute from sync and block it's execution
Just make the sync func async and await the asynchronous function. Async functions are just like normal functions and you can put whatever code you want in them. If you still have a problem modify your question using actual code you are trying to run.
import asyncio
async def main():
print('entering main')
await synchronous_property()
print('exiting main')
async def synchronous_property():
print('entering synchronous_property')
await asynchronous()
# Do whatever sync stuff you want who cares
print('exiting synchronous_property')
async def asynchronous():
print('entering asynchronous')
print('exiting asynchronous')
asyncio.run(main())
If I have the following code sample
async def coro():
# Cancelled error could be raised here
await asyncio.sleep(1)
# Or here
await asyncio.shield(
another_coro()
)
# Or here
async def wait_on_it(loop):
f = loop.create_task(coro())
# Pretend f may or may not happen, I just sleep in this example
await asyncio.sleep(1)
if not f.done():
f.cancel() # Will raise CancelledError when some await finishes in coro()
How can I determine whether or not the shielded task actually ran? I have important logic that must be run iff the shielded task did run. Maybe shielding that function is not the correct method?
coro() can transfer the information to the caller by modifying a mutable object it receives from the caller:
class Ref:
def __init__(self, **kwargs):
self.__dict__.update(**kwargs)
async def coro(run_ref):
await asyncio.sleep(1)
run_ref.ran_another_coro = True
await asyncio.shield(another_coro())
async def wait_on_it(loop):
run_ref = Ref(ran_another_coro=False)
f = loop.create_task(coro(run_ref))
await asyncio.sleep(1)
if not f.done():
f.cancel()
if run_ref.ran_another_coro:
# ... another_coro() was started
Since asyncio.shield can't suspend, if wait_on_it observes a true value of run_ref.ran_another_coro, then another_coro() is guaranteed to have been started.