Asyncio with threading no exception throwing - python-3.x

I have following code which will use asyncio and thread together. And the code works fine. But the problem is the code not throw exception some error happens.
import os
import time
from threading import Thread
import queue
import asyncio
def start_loop(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
async def startProcess(name):
try:
#dfdf
print(' Started-----------> ',name)
while True:
print("Sleeping...",name)
time.sleep(5)
except Exception as e:
print('Caught exception in worker thread')
raise e
async def main_app():
loop1 = asyncio.new_event_loop()
process1_Tread = Thread(target=start_loop, args=(loop1,))
process1_Tread.start()
asyncio.run_coroutine_threadsafe(startProcess("Thread1"), loop1)
while True:
print("Sleeping main thread...")
time.sleep(5)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main_app())
finally:
print("Exit...")
For example if I add the line
try:
dfdf # to create exception
print(' Started-----------> ',name)
The thread not works but no error message shows. How can I enable if some error happens.

I resolved the problem by referring the answer here
Asyncio exception handler: not getting called until event loop thread stopped
Here is the full code
import os
import time
from threading import Thread
import queue
from pprint import pprint
import asyncio
def start_loop(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
def exception_handler(loop, context):
print('Exception handler called')
pprint(context)
async def startProcess(name):
try:
dfdf
print(' Started-----------> ',name)
while True:
print("Sleeping...",name)
time.sleep(5)
return "Thread finish"
except Exception as e:
raise e
async def main_app():
loop1 = asyncio.new_event_loop()
loop1.set_exception_handler(exception_handler)
process1_Tread = Thread(target=start_loop, args=(loop1,))
process1_Tread.start()
fut = asyncio.run_coroutine_threadsafe(startProcess("Thread1"), loop1)
try:
print("success:", fut.result())
except:
print("exception:", fut.exception())
#consume signaling
while True:
print("Sleeping main thread...")
time.sleep(5)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main_app())
finally:
print("Exit...")

Related

Mocking REST APIs with Flask_restful using threading

I'm looking to mock a set of REST APIs for some tests. The following main() function works fine (i.e. it returns {"some-data": 1234} as json to the browser when I GET localhost:8099). The issue is it blocks the main thread:
from gevent import monkey, sleep, pywsgi
monkey.patch_all()
import flask
from flask_restful import reqparse, abort, Api, Resource
import queue
import sys
import threading
STUFFS = {"some-data": 1234}
class Stuff(Resource):
def get(self):
return flask.jsonify(STUFFS)
class ControlThread(threading.Thread):
def __init__(self, http_server, stop_event):
threading.Thread.__init__(self)
self.stop_event = stop_event
self.http_server = http_server
self.running = False
def run(self):
try:
while not self.stop_event.is_set():
if not self.running:
self.http_server.start()
self.running = True
sleep(0.001)
except (KeyboardInterrupt, SystemExit):
pass
self.http_server.stop()
class StuffMock:
def __init__(self, port, name=None):
if name is None:
name = __name__
self.app = flask.Flask(name)
self.api = Api(self.app)
self.api.add_resource(Stuff, "/stuff/")
self.stop_event = threading.Event()
self.http_server = pywsgi.WSGIServer(('', port), self.app)
self.serving_thread = ControlThread(self.http_server,
self.stop_event)
self.serving_thread.daemon = True
def start(self):
self.serving_thread.start()
def stop(self):
self.stop_event.set()
self.serving_thread.join()
def main():
mocker = StuffMock(8099)
mocker.start()
try:
while True:
sleep(0.01)
except (KeyboardInterrupt, SystemExit):
mocker.stop()
sys.exit()
if __name__ == "__main__":
main()
Without the sleep() call in the while loop above, nothing resolves. Here is a more succinct usage to demonstrate:
import time
from stuff_mock import StuffMock
mocker = StuffMock(8099)
mocker.start()
while True:
user_text = input("let's do some work on the main thread: ")
# will only resolve the GET request after user input
# (i.e. when the main thread executes this sleep call)
time.sleep(0.1)
if user_text == "q":
break
mocker.stop()
The gevent threading module seems to work differently from the core one. Does anyone have any tips or ideas about what's going on under the hood?
Found that if I switch out threading for multiprocessing (and threading.Thread for multiprocessing.Process), everything works as expected, and I can spin up arbitrary numbers of mockers without blocking.

Thread and asyncio: Task was destroyed but it is pending

I have a thread that runs an asyncio loop. I start a future task that does things which are irrelevant here. When I stop the thread, I stop the asyncio loop as well. However, I cannot seem to cancel the pool task and get Task was destroyed but it is pending!
Here is a toy example:
from contextlib import suppress
from threading import Thread
from time import sleep
import asyncio
class Hardware(Thread):
def __init__(self, *args, **kwargs):
super(Hardware, self).__init__(*args, **kwargs)
self.loop = None
self._poll_task = None
def run(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.create_task(self._poll())
self.loop.run_forever()
async def _poll(self):
print('ook')
await asyncio.sleep(1.0)
self._poll_task = asyncio.ensure_future(self._poll())
return self._poll_task
def stop(self):
if self._poll_task is not None:
self.loop.call_soon_threadsafe(self._poll_task.cancel)
with suppress(asyncio.CancelledError):
self.loop.call_soon_threadsafe(self.loop.stop)
hw = Hardware()
try:
hw.start()
while True:
sleep(.1)
except KeyboardInterrupt:
hw.stop()
hw.join()
Running it outputs:
; python ook.py
ook
ook
^CTask was destroyed but it is pending!
task: <Task pending coro=<Hardware._poll() running at ook.py:22> wait_for=<Future cancelled>>
What am I doing wrong?
You should not only call cancel() on task, but also await its cancellation instead of just stopping loop as you do.
from contextlib import suppress
from threading import Thread
from time import sleep
import asyncio
class Hardware(Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.loop = None
self._poll_task = None
def run(self):
self.loop = asyncio.new_event_loop()
loop = self.loop
asyncio.set_event_loop(loop)
try:
# create task:
self._poll_task = asyncio.ensure_future(self._poll())
# run loop:
loop.run_forever()
loop.run_until_complete(loop.shutdown_asyncgens())
# cancel task:
self._poll_task.cancel()
with suppress(asyncio.CancelledError):
loop.run_until_complete(self._poll_task)
finally:
loop.close()
def stop(self):
self.loop.call_soon_threadsafe(self.loop.stop)
async def _poll(self):
while True: # you don't need to create new task each time
print('ook')
await asyncio.sleep(1.0)
hw = Hardware()
try:
hw.start()
while True:
sleep(.1)
except KeyboardInterrupt:
hw.stop()
hw.join()

asyncio get result from coroutine

I have a task make communication between coroutines with help asyncio and python3.
Please tell me how to do it,if one coroutine,in while tru cycle , return value at different intervals, and the other coroutines receives this data
import asyncio
#asyncio.coroutine
def write(future):
i=0
while True:
yield from asyncio.sleep(1)
future.set_result('data: '.format(i))
i+=1
def got_result(future):
print(future.result())
loop = asyncio.get_event_loop()
future = asyncio.Future()
asyncio.ensure_future(write(future))
future.add_done_callback(got_result)
try:
loop.run_forever()
finally:
loop.close()
The solution was found with the help of the asyncio.Queue()
import asyncio
#asyncio.coroutine
def get_work(task, work_queue):
while not work_queue.empty():
print(task)
queue_item = yield from work_queue.get()
print('{0} grabbed item: {1}'.format(task, queue_item))
yield from asyncio.sleep(0.5)
asyncio.async(get_work(task, work_queue))
# #asyncio.coroutine
i = 0
async def do_work(task, work_queue):
global i
print(task)
while work_queue.empty():
work_queue.put_nowait(i)
i += 1
await asyncio.sleep(2)
break
# asyncio.async(do_work())
print("Dfgdfg")
asyncio.ensure_future(do_work(task, work_queue))
if __name__ == "__main__":
queue_obj = asyncio.Queue()
loop = asyncio.get_event_loop()
tasks = [
asyncio.async(do_work('Run do_work', queue_obj)),
asyncio.async(get_work('Run get_work', queue_obj))]
loop.run_until_complete(asyncio.wait(tasks))
loop.run_forever()

tornado server is incompatible with threading module

I'm using tornado with threads.
In short, each time the websocket handler receives a requests, it start to execute a task, which might take a few minutes.
However, once a client is connected, no other client can be connected, until the first one disconnects.
Any ideas?
I've attached a minimal example that uses time.sleep to simulate long running tasks.
import tornado.web
import tornado.websocket
import tornado.httpserver
import tornado.ioloop
import time
import json
import threading
class TaskHandler(tornado.websocket.WebSocketHandler):
def open(self):
pass
def check_origin(self, origin):
return True
def on_message(self, message):
try:
print 'received: ', message
self.write_message(json.dumps({'status': 'running'}))
def worker_A(kwargs):
time.sleep(100)
pass
def worker_B(kwargs):
time.sleep(100)
pass
threads = []
for target in [worker_A, worker_B]:
t = threading.Thread(target = target, args = ({'xxx': 'yyy'}, ))
t.daemon = True
t.start()
threads.append(t)
for t in threads:
t.join()
except Exception, e:
print 'TaskHandler: exception: ', e
pass
self.write_message(json.dumps({'status': 'done'}))
def on_close(self):
pass
class Server(tornado.web.Application):
def __init__(self):
handlers = [
('/task', TaskHandler),
]
tornado.web.Application.__init__(self, handlers)
if __name__ == '__main__':
server = tornado.httpserver.HTTPServer(Server())
server.listen(8765, address = '127.0.0.1')
tornado.ioloop.IOLoop.instance().start()
You block the whole Tornado event loop for 100 seconds in t.join. Unless you have a yield statement or schedule a callback and exit a function, then your function is not asynchronous. Notice how your function "on_message" begins two threads and then calls t.join on each -- how can Tornado's event loop accomplish any other work while your function is waiting for t.join?
Instead, use a ThreadPoolExecutor something like this:
thread_pool = ThreadPoolExecutor(4)
class TaskHandler(tornado.websocket.WebSocketHandler):
# Make this an asynchronous coroutine
#gen.coroutine
def on_message_coroutine(self, message):
print 'received: ', message
self.write_message(json.dumps({'status': 'running'}))
def worker_A(kwargs):
time.sleep(100)
pass
def worker_B(kwargs):
time.sleep(100)
pass
futures = []
for target in [worker_A, worker_B]:
f = thread_pool.submit(target, {'xxx': 'yyy'})
futures.append(future)
# Now the event loop can do other things
yield futures
def on_message(self, message):
IOLoop.current().spawn_callback(self.on_message_coroutine,
message)

Python asyncio - Loop exits with Task was destroyed but it is pending

This is the relevant code of my python program:
import discord
import asyncio
class Bot(discord.Client):
def __init__(self):
super().__init__()
#asyncio.coroutine
def my_background_task(self):
yield from self.wait_until_ready()
while not self.is_closed:
yield from asyncio.sleep(3600*24) # <- This is line 76 where it fails
doSomething()
bot = Bot()
loop = asyncio.get_event_loop()
try:
loop.create_task(bot.my_background_task())
loop.run_until_complete(bot.login('username', 'password'))
loop.run_until_complete(bot.connect())
except Exception:
loop.run_until_complete(bot.close())
finally:
loop.close()
The program occasionally quits (on its own, while it should not) with no other errors or warning other than
Task was destroyed but it is pending!
task: <Task pending coro=<my_background_task() running at bin/discordBot.py:76> wait_for=<Future pending cb=[Task._wakeup()]>>
How to ensure the program won't randomly quit? I have Python 3.4.3+ on Xubuntu 15.10.
This is because the discord client module needs control once every minute or so.
This means that any function that steals control for more than a certain time causes discord's client to enter an invalid state (which will manifest itself as an exception some point later, perhaps upon next method call of client).
To ensure that the discord module client can ping the discord server, you should use a true multi-threading solution.
One solution is to offload all heavy processing onto a separate process (a separate thread will not do, because Python has a global interpreter lock) and use the discord bot as a thin layer whose responsibility is to populate work queues.
Related reading:
https://discordpy.readthedocs.io/en/latest/faq.html#what-does-blocking-mean
Example solution... this is WAY beyond the scope of the problem, but I already had the code mostly written. If I had more time, I would write a shorter solution :)
2 parts, discord interaction and processing server:
This is the discord listener.
import discord
import re
import asyncio
import traceback
import websockets
import json
# Call a function on other server
async def call(methodName, *args, **kwargs):
async with websockets.connect('ws://localhost:9001/meow') as websocket:
payload = json.dumps( {"method":methodName, "args":args, "kwargs": kwargs})
await websocket.send(payload)
#...
resp = await websocket.recv()
#...
return resp
client = discord.Client()
tok = open("token.dat").read()
#client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
#client.event
async def on_error(event, *args, **kwargs):
print("Error?")
#client.event
async def on_message(message):
try:
if message.author.id == client.user.id:
return
m = re.match("(\w+) for (\d+).*?", message.content)
if m:
g = m.groups(1)
methodName = g[0]
someNumber = int(g[1])
response = await call(methodName, someNumber)
if response:
await client.send_message(message.channel, response[0:2000])
except Exception as e:
print (e)
print (traceback.format_exc())
client.run(tok)
This is the worker server for processing heavy requests. You can make this part sync or async.
I chose to use some magic called a websocket to send data from one python process to another one. But you can use anything you want. You could make one script write files into a dir, and the other script could read the files out and process them, for example.
import tornado
import tornado.websocket
import tornado.httpserver
import json
import asyncio
import inspect
import time
class Handler:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def consume(self, text):
return "You said {0} and I say hiya".format(text)
async def sweeps(self, len):
await asyncio.sleep(len)
return "Slept for {0} seconds asynchronously!".format(len)
def sleeps(self, len):
time.sleep(len)
return "Slept for {0} seconds synchronously!".format(len)
class MyService(Handler, tornado.websocket.WebSocketHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def stop(self):
Handler.server.stop()
def open(self):
print("WebSocket opened")
def on_message(self, message):
print (message)
j = json.loads(message)
methodName = j["method"]
args = j.get("args", ())
method = getattr(self, methodName)
if inspect.iscoroutinefunction(method):
loop = asyncio.get_event_loop()
task = loop.create_task(method(*args))
task.add_done_callback( lambda res: self.write_message(res.result()))
future = asyncio.ensure_future(task)
elif method:
resp = method(*args)
self.write_message(resp)
def on_close(self):
print("WebSocket closed")
application = tornado.web.Application([
(r'/meow', MyService),
])
if __name__ == "__main__":
from tornado.platform.asyncio import AsyncIOMainLoop
AsyncIOMainLoop().install()
http_server = tornado.httpserver.HTTPServer(application)
Handler.server = http_server
http_server.listen(9001)
asyncio.get_event_loop().run_forever()
Now, if you run both processes in separate python scripts, and tell your bot "sleep for 100", it will sleep for 100 seconds happily!
The asyncio stuff functions as a make-shift work queue, and you can properly separate the listener from the backend processing by running them as separate python scripts.
Now, no matter how long your functions run in the 'server' part, the client part will never be prevented from pinging the discord server.
Image failed to upload, but... anyway, this is how to tell the bot to sleep and reply... note that the sleep is synchronous.
http://i.imgur.com/N4ZPPbB.png
I don't think problem happens while asyncio.sleep. Anyway you shouldn't suppress exception you got:
bot = Bot()
loop = asyncio.get_event_loop()
try:
# ...
except Exception as e:
loop.run_until_complete(bot.close())
raise e # <--- reraise exception you got while execution to see it (or log it here)
finally:
# ...
You have to manually stop your task on exit:
import discord
import asyncio
class Bot(discord.Client):
def __init__(self):
super().__init__()
#asyncio.coroutine
def my_background_task(self):
yield from self.wait_until_ready()
while not self.is_closed:
yield from asyncio.sleep(3600*24) # <- This is line 76 where it fails
doSomething()
bot = Bot()
loop = asyncio.get_event_loop()
try:
task = loop.create_task(bot.my_background_task())
loop.run_until_complete(bot.login('username', 'password'))
loop.run_until_complete(bot.connect())
except Exception:
loop.run_until_complete(bot.close())
finally:
task.cancel()
try:
loop.run_until_complete(task)
except Exception:
pass
loop.close()

Resources