Thread and asyncio: Task was destroyed but it is pending - python-3.x

I have a thread that runs an asyncio loop. I start a future task that does things which are irrelevant here. When I stop the thread, I stop the asyncio loop as well. However, I cannot seem to cancel the pool task and get Task was destroyed but it is pending!
Here is a toy example:
from contextlib import suppress
from threading import Thread
from time import sleep
import asyncio
class Hardware(Thread):
def __init__(self, *args, **kwargs):
super(Hardware, self).__init__(*args, **kwargs)
self.loop = None
self._poll_task = None
def run(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.loop.create_task(self._poll())
self.loop.run_forever()
async def _poll(self):
print('ook')
await asyncio.sleep(1.0)
self._poll_task = asyncio.ensure_future(self._poll())
return self._poll_task
def stop(self):
if self._poll_task is not None:
self.loop.call_soon_threadsafe(self._poll_task.cancel)
with suppress(asyncio.CancelledError):
self.loop.call_soon_threadsafe(self.loop.stop)
hw = Hardware()
try:
hw.start()
while True:
sleep(.1)
except KeyboardInterrupt:
hw.stop()
hw.join()
Running it outputs:
; python ook.py
ook
ook
^CTask was destroyed but it is pending!
task: <Task pending coro=<Hardware._poll() running at ook.py:22> wait_for=<Future cancelled>>
What am I doing wrong?

You should not only call cancel() on task, but also await its cancellation instead of just stopping loop as you do.
from contextlib import suppress
from threading import Thread
from time import sleep
import asyncio
class Hardware(Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.loop = None
self._poll_task = None
def run(self):
self.loop = asyncio.new_event_loop()
loop = self.loop
asyncio.set_event_loop(loop)
try:
# create task:
self._poll_task = asyncio.ensure_future(self._poll())
# run loop:
loop.run_forever()
loop.run_until_complete(loop.shutdown_asyncgens())
# cancel task:
self._poll_task.cancel()
with suppress(asyncio.CancelledError):
loop.run_until_complete(self._poll_task)
finally:
loop.close()
def stop(self):
self.loop.call_soon_threadsafe(self.loop.stop)
async def _poll(self):
while True: # you don't need to create new task each time
print('ook')
await asyncio.sleep(1.0)
hw = Hardware()
try:
hw.start()
while True:
sleep(.1)
except KeyboardInterrupt:
hw.stop()
hw.join()

Related

Python3: Subclass of Process, call methods in run()

I've tried to extend the multiprocessing.Process class to use it in a Command pattern way... There is a scheduler instance, where the client invokes commands and calls for execution. But the command code never terminates after self.execute() is called. Here is the command class:
class Command(Process):
def __init__(self):
super().__init__()
self.result = None
self.command_name = type(self).__name__
self.shell = False
#from Process
def run(self):
super().run()
print("running "+self.command_name)
sys.stdout.flush()
self.execute()
print("finished "+self.command_name)
sys.stdout.flush()
sys.exit(0)
def execute(self):
pass
the idea is simple that each sub class of Command provides its own code in the execute() method. For instances:
class LoadCommand(Command):
def __init__(self,parameterA,...):
super().__init__()
...
def execute(self):
print("executing LoadCommand")
....
return
this is my scheduler:
class Scheduler:
_instance = None
_history_queue = []
_command_queue = []
_logger = None
#IPC, negative maxsize means infinite size
_pipe = Queue(maxsize=-1)
def __init__(self):
raise RuntimeError('Call getInstance() instead')
#classmethod
def getInstance(cls):
if cls._instance is None:
cls._instance = cls.__new__(cls)
return cls._instance
def getPipe(self):
print(self._pipe)
return self._pipe
def enqueueCommand(self,command):
# if isinstance(command,Command):
self._command_queue.append(command)
def executeQueue(self, synchronicMode):
while len(self._command_queue) > 0:
command = self._command_queue.pop(0)
command.start()
if synchronicMode:
#wait until this process is done
print("Waiting\n")
command.join(10)
if command.is_alive():
print("process isn't finished")
else:
print("process finished")
self._history_queue.append(command)
I've tried to call sys.exit(0) immediately after the run begins with success (process terminates). So maybe there is an error in the inheritance hierarchy, but I can't see it.

Mocking REST APIs with Flask_restful using threading

I'm looking to mock a set of REST APIs for some tests. The following main() function works fine (i.e. it returns {"some-data": 1234} as json to the browser when I GET localhost:8099). The issue is it blocks the main thread:
from gevent import monkey, sleep, pywsgi
monkey.patch_all()
import flask
from flask_restful import reqparse, abort, Api, Resource
import queue
import sys
import threading
STUFFS = {"some-data": 1234}
class Stuff(Resource):
def get(self):
return flask.jsonify(STUFFS)
class ControlThread(threading.Thread):
def __init__(self, http_server, stop_event):
threading.Thread.__init__(self)
self.stop_event = stop_event
self.http_server = http_server
self.running = False
def run(self):
try:
while not self.stop_event.is_set():
if not self.running:
self.http_server.start()
self.running = True
sleep(0.001)
except (KeyboardInterrupt, SystemExit):
pass
self.http_server.stop()
class StuffMock:
def __init__(self, port, name=None):
if name is None:
name = __name__
self.app = flask.Flask(name)
self.api = Api(self.app)
self.api.add_resource(Stuff, "/stuff/")
self.stop_event = threading.Event()
self.http_server = pywsgi.WSGIServer(('', port), self.app)
self.serving_thread = ControlThread(self.http_server,
self.stop_event)
self.serving_thread.daemon = True
def start(self):
self.serving_thread.start()
def stop(self):
self.stop_event.set()
self.serving_thread.join()
def main():
mocker = StuffMock(8099)
mocker.start()
try:
while True:
sleep(0.01)
except (KeyboardInterrupt, SystemExit):
mocker.stop()
sys.exit()
if __name__ == "__main__":
main()
Without the sleep() call in the while loop above, nothing resolves. Here is a more succinct usage to demonstrate:
import time
from stuff_mock import StuffMock
mocker = StuffMock(8099)
mocker.start()
while True:
user_text = input("let's do some work on the main thread: ")
# will only resolve the GET request after user input
# (i.e. when the main thread executes this sleep call)
time.sleep(0.1)
if user_text == "q":
break
mocker.stop()
The gevent threading module seems to work differently from the core one. Does anyone have any tips or ideas about what's going on under the hood?
Found that if I switch out threading for multiprocessing (and threading.Thread for multiprocessing.Process), everything works as expected, and I can spin up arbitrary numbers of mockers without blocking.

How to gracefully terminate a multithreaded Python application that uses queue.Queue

I have been trying to get my application to terminate gracefully for quite some time now, but so far none of the answers I have found worked.
The sample code below illustrates the structure of my application. It basically is a chain of threads that passes data to one another using Queues.
from abc import abstractmethod
from time import sleep
from threading import Thread, Event
from queue import Queue
import signal
import sys
class StoppableThread(Thread):
def __init__(self):
super().__init__()
self.stopper = Event()
self.queue = Queue()
#abstractmethod
def actual_job(self):
pass
def stop_running(self):
self.stopper.set()
def run(self):
while not self.stopper.is_set():
print(self.stopper.is_set())
self.actual_job()
self.queue.join()
class SomeObjectOne(StoppableThread):
def __init__(self, name, some_object_two):
super().__init__()
self.name = name
self.obj_two = some_object_two
def actual_job(self):
# print('{} is currently running'.format(self.name))
input_string = 'some string'
print('{} outputs {}'.format(self.name, input_string))
self.obj_two.queue.put(input_string)
sleep(2)
class SomeObjectTwo(StoppableThread):
def __init__(self, name, some_object_three):
super().__init__()
self.name = name
self.some_object_three = some_object_three
def actual_job(self):
# print('{} is currently running'.format(self.name))
some_string = self.queue.get()
inverted = some_string[::-1]
print('{} outputs {}'.format(self.name , inverted))
self.some_object_three.queue.put(inverted)
sleep(2)
class SomeObjectThree(StoppableThread):
def __init__(self, name):
super().__init__()
self.name = name
def actual_job(self):
print('{} is currently running'.format(self.name))
some_string = self.queue.get()
print('{} outputs {}'.format(self.name ,some_string[::-1]))
sleep(2)
class ServiceExit(Exception):
"""
Custom exception which is used to trigger the clean exit
of all running threads and the main program.
"""
pass
def service_shutdown(signum, frame):
print('Caught signal %d' % signum)
raise ServiceExit
signal.signal(signal.SIGTERM, service_shutdown)
signal.signal(signal.SIGINT, service_shutdown)
if __name__ == '__main__':
thread_three = SomeObjectThree('SomeObjectThree')
thread_two = SomeObjectTwo('SomeObjectTwo', thread_three)
thread_one = SomeObjectOne('SomeObjectOne', thread_two)
try:
thread_three.start()
thread_two.start()
thread_one.start()
# Keep the main thread running, otherwise signals are ignored.
while True:
sleep(0.5)
except ServiceExit:
print('Running service exit')
thread_three.stop_running()
thread_two.stop_running()
thread_one.stop_running()
thread_one.join()
thread_two.join()
thread_three.join()
sys.exit(0)
Now, if I run this code and ctrl-C to terminate, thread_one seems to join as expected, but the code gets stuck at thread_two.join().
Because thread_one is the only thread with a continuous empty queue, I expect it has something to do with the queue.
Any ideas?
In the run() method of StoppableThread you have this:
self.queue.join()
join() is a blocking method:
Blocks until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls
task_done() to indicate that the item was retrieved and all work on it
is complete. When the count of unfinished tasks drops to zero, join()
unblocks.
So in order for join() to return, it's not enough to get() an item in the other thread, you must also indicate that it's been processed with task_done():
from abc import abstractmethod
from time import sleep
from threading import Thread, Event
from queue import Queue
import signal
import sys
class StoppableThread(Thread):
def __init__(self):
super().__init__()
self.stopper = Event()
self.queue = Queue()
#abstractmethod
def actual_job(self):
pass
def stop_running(self):
self.stopper.set()
def run(self):
while not self.stopper.is_set():
print(self.stopper.is_set())
self.actual_job()
self.queue.join()
class SomeObjectOne(StoppableThread):
def __init__(self, name, some_object_two):
super().__init__()
self.name = name
self.obj_two = some_object_two
def actual_job(self):
# print('{} is currently running'.format(self.name))
input_string = 'some string'
print('{} outputs {}'.format(self.name, input_string))
self.obj_two.queue.put(input_string)
sleep(2)
class SomeObjectTwo(StoppableThread):
def __init__(self, name, some_object_three):
super().__init__()
self.name = name
self.some_object_three = some_object_three
def actual_job(self):
# print('{} is currently running'.format(self.name))
some_string = self.queue.get()
inverted = some_string[::-1]
print('{} outputs {}'.format(self.name , inverted))
self.queue.task_done()
self.some_object_three.queue.put(inverted)
sleep(2)
class SomeObjectThree(StoppableThread):
def __init__(self, name):
super().__init__()
self.name = name
def actual_job(self):
print('{} is currently running'.format(self.name))
some_string = self.queue.get()
print('{} outputs {}'.format(self.name ,some_string[::-1]))
self.queue.task_done()
sleep(2)
class ServiceExit(Exception):
"""
Custom exception which is used to trigger the clean exit
of all running threads and the main program.
"""
pass
def service_shutdown(signum, frame):
print('Caught signal %d' % signum)
raise ServiceExit
signal.signal(signal.SIGTERM, service_shutdown)
signal.signal(signal.SIGINT, service_shutdown)
if __name__ == '__main__':
thread_three = SomeObjectThree('SomeObjectThree')
thread_two = SomeObjectTwo('SomeObjectTwo', thread_three)
thread_one = SomeObjectOne('SomeObjectOne', thread_two)
try:
thread_three.start()
thread_two.start()
thread_one.start()
# Keep the main thread running, otherwise signals are ignored.
while True:
sleep(0.5)
except ServiceExit:
print('Running service exit')
thread_three.stop_running()
thread_two.stop_running()
thread_one.stop_running()
thread_one.join()
thread_two.join()
thread_three.join()

asyncio get result from coroutine

I have a task make communication between coroutines with help asyncio and python3.
Please tell me how to do it,if one coroutine,in while tru cycle , return value at different intervals, and the other coroutines receives this data
import asyncio
#asyncio.coroutine
def write(future):
i=0
while True:
yield from asyncio.sleep(1)
future.set_result('data: '.format(i))
i+=1
def got_result(future):
print(future.result())
loop = asyncio.get_event_loop()
future = asyncio.Future()
asyncio.ensure_future(write(future))
future.add_done_callback(got_result)
try:
loop.run_forever()
finally:
loop.close()
The solution was found with the help of the asyncio.Queue()
import asyncio
#asyncio.coroutine
def get_work(task, work_queue):
while not work_queue.empty():
print(task)
queue_item = yield from work_queue.get()
print('{0} grabbed item: {1}'.format(task, queue_item))
yield from asyncio.sleep(0.5)
asyncio.async(get_work(task, work_queue))
# #asyncio.coroutine
i = 0
async def do_work(task, work_queue):
global i
print(task)
while work_queue.empty():
work_queue.put_nowait(i)
i += 1
await asyncio.sleep(2)
break
# asyncio.async(do_work())
print("Dfgdfg")
asyncio.ensure_future(do_work(task, work_queue))
if __name__ == "__main__":
queue_obj = asyncio.Queue()
loop = asyncio.get_event_loop()
tasks = [
asyncio.async(do_work('Run do_work', queue_obj)),
asyncio.async(get_work('Run get_work', queue_obj))]
loop.run_until_complete(asyncio.wait(tasks))
loop.run_forever()

Python asyncio - Loop exits with Task was destroyed but it is pending

This is the relevant code of my python program:
import discord
import asyncio
class Bot(discord.Client):
def __init__(self):
super().__init__()
#asyncio.coroutine
def my_background_task(self):
yield from self.wait_until_ready()
while not self.is_closed:
yield from asyncio.sleep(3600*24) # <- This is line 76 where it fails
doSomething()
bot = Bot()
loop = asyncio.get_event_loop()
try:
loop.create_task(bot.my_background_task())
loop.run_until_complete(bot.login('username', 'password'))
loop.run_until_complete(bot.connect())
except Exception:
loop.run_until_complete(bot.close())
finally:
loop.close()
The program occasionally quits (on its own, while it should not) with no other errors or warning other than
Task was destroyed but it is pending!
task: <Task pending coro=<my_background_task() running at bin/discordBot.py:76> wait_for=<Future pending cb=[Task._wakeup()]>>
How to ensure the program won't randomly quit? I have Python 3.4.3+ on Xubuntu 15.10.
This is because the discord client module needs control once every minute or so.
This means that any function that steals control for more than a certain time causes discord's client to enter an invalid state (which will manifest itself as an exception some point later, perhaps upon next method call of client).
To ensure that the discord module client can ping the discord server, you should use a true multi-threading solution.
One solution is to offload all heavy processing onto a separate process (a separate thread will not do, because Python has a global interpreter lock) and use the discord bot as a thin layer whose responsibility is to populate work queues.
Related reading:
https://discordpy.readthedocs.io/en/latest/faq.html#what-does-blocking-mean
Example solution... this is WAY beyond the scope of the problem, but I already had the code mostly written. If I had more time, I would write a shorter solution :)
2 parts, discord interaction and processing server:
This is the discord listener.
import discord
import re
import asyncio
import traceback
import websockets
import json
# Call a function on other server
async def call(methodName, *args, **kwargs):
async with websockets.connect('ws://localhost:9001/meow') as websocket:
payload = json.dumps( {"method":methodName, "args":args, "kwargs": kwargs})
await websocket.send(payload)
#...
resp = await websocket.recv()
#...
return resp
client = discord.Client()
tok = open("token.dat").read()
#client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
#client.event
async def on_error(event, *args, **kwargs):
print("Error?")
#client.event
async def on_message(message):
try:
if message.author.id == client.user.id:
return
m = re.match("(\w+) for (\d+).*?", message.content)
if m:
g = m.groups(1)
methodName = g[0]
someNumber = int(g[1])
response = await call(methodName, someNumber)
if response:
await client.send_message(message.channel, response[0:2000])
except Exception as e:
print (e)
print (traceback.format_exc())
client.run(tok)
This is the worker server for processing heavy requests. You can make this part sync or async.
I chose to use some magic called a websocket to send data from one python process to another one. But you can use anything you want. You could make one script write files into a dir, and the other script could read the files out and process them, for example.
import tornado
import tornado.websocket
import tornado.httpserver
import json
import asyncio
import inspect
import time
class Handler:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def consume(self, text):
return "You said {0} and I say hiya".format(text)
async def sweeps(self, len):
await asyncio.sleep(len)
return "Slept for {0} seconds asynchronously!".format(len)
def sleeps(self, len):
time.sleep(len)
return "Slept for {0} seconds synchronously!".format(len)
class MyService(Handler, tornado.websocket.WebSocketHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def stop(self):
Handler.server.stop()
def open(self):
print("WebSocket opened")
def on_message(self, message):
print (message)
j = json.loads(message)
methodName = j["method"]
args = j.get("args", ())
method = getattr(self, methodName)
if inspect.iscoroutinefunction(method):
loop = asyncio.get_event_loop()
task = loop.create_task(method(*args))
task.add_done_callback( lambda res: self.write_message(res.result()))
future = asyncio.ensure_future(task)
elif method:
resp = method(*args)
self.write_message(resp)
def on_close(self):
print("WebSocket closed")
application = tornado.web.Application([
(r'/meow', MyService),
])
if __name__ == "__main__":
from tornado.platform.asyncio import AsyncIOMainLoop
AsyncIOMainLoop().install()
http_server = tornado.httpserver.HTTPServer(application)
Handler.server = http_server
http_server.listen(9001)
asyncio.get_event_loop().run_forever()
Now, if you run both processes in separate python scripts, and tell your bot "sleep for 100", it will sleep for 100 seconds happily!
The asyncio stuff functions as a make-shift work queue, and you can properly separate the listener from the backend processing by running them as separate python scripts.
Now, no matter how long your functions run in the 'server' part, the client part will never be prevented from pinging the discord server.
Image failed to upload, but... anyway, this is how to tell the bot to sleep and reply... note that the sleep is synchronous.
http://i.imgur.com/N4ZPPbB.png
I don't think problem happens while asyncio.sleep. Anyway you shouldn't suppress exception you got:
bot = Bot()
loop = asyncio.get_event_loop()
try:
# ...
except Exception as e:
loop.run_until_complete(bot.close())
raise e # <--- reraise exception you got while execution to see it (or log it here)
finally:
# ...
You have to manually stop your task on exit:
import discord
import asyncio
class Bot(discord.Client):
def __init__(self):
super().__init__()
#asyncio.coroutine
def my_background_task(self):
yield from self.wait_until_ready()
while not self.is_closed:
yield from asyncio.sleep(3600*24) # <- This is line 76 where it fails
doSomething()
bot = Bot()
loop = asyncio.get_event_loop()
try:
task = loop.create_task(bot.my_background_task())
loop.run_until_complete(bot.login('username', 'password'))
loop.run_until_complete(bot.connect())
except Exception:
loop.run_until_complete(bot.close())
finally:
task.cancel()
try:
loop.run_until_complete(task)
except Exception:
pass
loop.close()

Resources