asyncio.Task.all_tasks() gives a list of all tasks for an event loop, but I can't find anything similar for sockets, and in particular, datagram sockets associated with a loop?
The absence of sockets & tasks could then signal "end of life" for the loop.
The question is, in the following example, what to put in loop_not_empty() that makes it return False when the task set is empty and there are no associated sockets (ie after two seconds)
Example:
import asyncio
import socket
import threading
class Handler(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
print("connection made")
def datagram_received(self, data, addr):
if data == b'die':
print("shutting down")
self.transport.abort()
#asyncio.coroutine
def sometask():
yield from asyncio.sleep(1)
print("task done")
def loop_not_empty(l):
# if asyncio.Task.all_tasks() == set() and WHAT_GOES_HERE
# return False
return True
def main():
a,b = socket.socketpair(socket.AF_UNIX, socket.SOCK_DGRAM)
l = asyncio.get_event_loop()
asyncio.ensure_future(sometask(), loop=l)
asyncio.ensure_future(l.create_datagram_endpoint(Handler, sock=a), loop=l)
threading.Timer(2, lambda: b.send(b'die')).start()
while loop_not_empty(l):
l.run_until_complete(asyncio.sleep(1, loop=l))
main()
Here is a solution that uses a simple class and asyncio.Event() to count the number of active jobs and signals the loop to stop when all jobs are done:
import asyncio
import random
class UseCounter:
def __init__(self, loop=None):
self.loop = loop
self.event = asyncio.Event(loop=loop)
self.n = 0 # The number of active jobs
def __enter__(self):
self.enter()
def __exit__(self, exc_type, exc_val, exc_tb):
self.exit()
def enter(self):
self.n += 1
def exit(self):
self.n -= 1
if self.n == 0:
self.event.set()
async def wait(self):
return await self.event.wait()
async def my_coroutine(counter, term):
with counter:
print("start", term)
n = random.uniform(0.2, 1.5)
await asyncio.sleep(n)
print("end", term)
loop = asyncio.get_event_loop()
counter = UseCounter(loop)
terms = ["apple", "banana", "melon"]
for term in terms:
asyncio.ensure_future(my_coroutine(counter, term))
loop.run_until_complete(counter.wait())
loop.close()
For your example above, add .enter() to connection_made() and .exit() to connection_lost().
Related
I am trying to get websockets, asyncio and multiprocess to work together. I have been stuck on this for 2 days and could appreciate some help.
I have searched for websockets asyncio and multiprocessing on stackoverflow as well as general internet searches. I have found threading examples, which I can make work.
import asyncio
import websockets
import threading
class Connection():
def __init__(self):
self.loop = asyncio.new_event_loop()
sock_thread = threading.Thread(target=self.new_loop)
sock_thread.start()
self.x = 0
async def connect_to_socket(self):
self.websocket = await websockets.connect('ws://demos.kaazing.com/echo')
await self.websocket.send("hello")
response = await self.websocket.recv()
print(response)
async def listen_to_socket(self):
while True:
await asyncio.sleep(0)
print('Listening for a message...')
while self.x < 5:
message = await self.websocket.recv()
print("< {}".format(message))
print('\n\n')
print(self.x)
self.x += 1
self.task.cancel()
self.loop.close()
def stop(self):
print('canceling task\n\n')
self.x = 0
self.task.cancel()
def new_loop(self):
self.task = self.loop.create_task(self.connect_to_socket())
self.loop.run_forever()
def make_task(self):
self.task = self.loop.create_task(self.listen_to_socket())
if __name__ == '__main__':
conn=Connection()
This works with no issues. I have seen examples where multiprocessing opens a process in an event loop, this is not what I want. I want to ope However, this is not what I want. I want to open a new process and run an event loop in the new process. Inside the event loop, I want to run my sockets. I want to free my main process from listening to sockets and use a child process to listen to the sockets while I do computationally expensive work on my main process.
When I try the following code. I get nothing.
import asyncio
import websockets
import multiprocessing
class Connection(multiprocessing.Process):
def __init__(self, tasks, results):
super().__init__()
self.tasks = tasks
self.results = results
self.loop = asyncio.new_event_loop()
print('create event loop')
self.x = 0
self.task = self.loop.create_task(self.test())
print('done with connecting')
#connect to socket and get response
async def test(self):
self.ws = await websockets.connect('ws://demos.kaazing.com/echo')
await self.websocket.send("hello")
response = await self.websocket.recv()
print(response)
#listen to socket long term after connection
async def listen_to_socket(self):
while True:
await asyncio.sleep(0)
print('Listening for a message...')
while self.x < 5:
await self.websocket.send("hello")
message = await self.websocket.recv()
print("< {}".format(message))
print('\n\n')
print(self.x)
self.x += 1
self.results.put(message)
self.task.cancel()
self.loop.close()
#stop task
def stop(self):
print('canceling task\n\n')
self.x = 0
self.task.cancel()
# listen to socket long term
#I have not called this as I can't even get a response from test()
def make_task(self):
self.task = self.loop.create_task(self.listen_to_socket())
if __name__ == '__main__':
tasks = multiprocessing.JoinableQueue()
results = multiprocessing.Queue()
process = Connection(tasks, results)
if tasks.empty():
print('empty')
else:
print(tasks.get())
I expect to connect with the socket and receive a response. However, I get nothing. I get no error messages,no printout from the connection, I get an empty queue and that's all. How do I get the return values from my websocket?
I am still new enough, I am not sure what I am doing wrong. Any advice would help me out.
Thank you
Anyone interested, I got this to work. It is very much a work in progress and I am adding to it, and since this is for me and relatively simple, I didn't comment it.
I started with the code from this answer and built on it.
Python3 Websockets / Multithreading issue
import asyncio
import websockets
import sys
import time
import multiprocessing
class connect():
def __init__(self, results, tasks):
self.x = 0
self.results = results
self.tasks = tasks
self.loop = asyncio.new_event_loop()
async def commander_start(self):
while not self.tasks.empty():
self.uri = self.tasks.get()
self.tasks.task_done()
self.ws = await websockets.connect(self.uri)
while True:
await asyncio.sleep(0.1)
print('Listening for a message...')
while self.x < 5:
await self.ws.send("hello")
message = await self.ws.recv()
message = message+str(self.x)
print("< {}".format(message))
print('\n\n')
print(self.x)
self.x += 1
self.results.put(message)
self.ws.close()
self.x = 0
print('ws clsed')
self.task.cancel()
await asyncio.sleep(1)
self.loop.close()
def run_commander(self):
self.task = self.loop.create_task(self.commander_start())
self.loop.run_forever()
def main(self):
self.commander = multiprocessing.Process(target=self.run_commander)
self.commander.start()
time.sleep(3)
self.commander.kill()
print('is alive:', self.commander, self.commander.is_alive())
if __name__ == "__main__":
size_q = 10
tasks = multiprocessing.JoinableQueue(maxsize=size_q)
results = multiprocessing.Queue(maxsize=size_q)
conn = connect(results,tasks)
tasks.put('ws://demos.kaazing.com/echo')
conn.main()
print('tasks2 put')
tasks.put('wss://echo.websocket.org')
conn.main()
if not results.empty():
for x in range(size_q):
print(results.get())
There is a bunch I am going to change and improve, I just wanted the base system to work so I could build from there, so that anyone that uses this will need to modify it to suit their needs. For instance, I spawn a new process and kill it, instead of running a continuous process and giving it work to do, I also am trying to figure out the specifics of the joinable queue and how to use it to add jobs after the process and event loop has been created.
I have been trying to get my application to terminate gracefully for quite some time now, but so far none of the answers I have found worked.
The sample code below illustrates the structure of my application. It basically is a chain of threads that passes data to one another using Queues.
from abc import abstractmethod
from time import sleep
from threading import Thread, Event
from queue import Queue
import signal
import sys
class StoppableThread(Thread):
def __init__(self):
super().__init__()
self.stopper = Event()
self.queue = Queue()
#abstractmethod
def actual_job(self):
pass
def stop_running(self):
self.stopper.set()
def run(self):
while not self.stopper.is_set():
print(self.stopper.is_set())
self.actual_job()
self.queue.join()
class SomeObjectOne(StoppableThread):
def __init__(self, name, some_object_two):
super().__init__()
self.name = name
self.obj_two = some_object_two
def actual_job(self):
# print('{} is currently running'.format(self.name))
input_string = 'some string'
print('{} outputs {}'.format(self.name, input_string))
self.obj_two.queue.put(input_string)
sleep(2)
class SomeObjectTwo(StoppableThread):
def __init__(self, name, some_object_three):
super().__init__()
self.name = name
self.some_object_three = some_object_three
def actual_job(self):
# print('{} is currently running'.format(self.name))
some_string = self.queue.get()
inverted = some_string[::-1]
print('{} outputs {}'.format(self.name , inverted))
self.some_object_three.queue.put(inverted)
sleep(2)
class SomeObjectThree(StoppableThread):
def __init__(self, name):
super().__init__()
self.name = name
def actual_job(self):
print('{} is currently running'.format(self.name))
some_string = self.queue.get()
print('{} outputs {}'.format(self.name ,some_string[::-1]))
sleep(2)
class ServiceExit(Exception):
"""
Custom exception which is used to trigger the clean exit
of all running threads and the main program.
"""
pass
def service_shutdown(signum, frame):
print('Caught signal %d' % signum)
raise ServiceExit
signal.signal(signal.SIGTERM, service_shutdown)
signal.signal(signal.SIGINT, service_shutdown)
if __name__ == '__main__':
thread_three = SomeObjectThree('SomeObjectThree')
thread_two = SomeObjectTwo('SomeObjectTwo', thread_three)
thread_one = SomeObjectOne('SomeObjectOne', thread_two)
try:
thread_three.start()
thread_two.start()
thread_one.start()
# Keep the main thread running, otherwise signals are ignored.
while True:
sleep(0.5)
except ServiceExit:
print('Running service exit')
thread_three.stop_running()
thread_two.stop_running()
thread_one.stop_running()
thread_one.join()
thread_two.join()
thread_three.join()
sys.exit(0)
Now, if I run this code and ctrl-C to terminate, thread_one seems to join as expected, but the code gets stuck at thread_two.join().
Because thread_one is the only thread with a continuous empty queue, I expect it has something to do with the queue.
Any ideas?
In the run() method of StoppableThread you have this:
self.queue.join()
join() is a blocking method:
Blocks until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls
task_done() to indicate that the item was retrieved and all work on it
is complete. When the count of unfinished tasks drops to zero, join()
unblocks.
So in order for join() to return, it's not enough to get() an item in the other thread, you must also indicate that it's been processed with task_done():
from abc import abstractmethod
from time import sleep
from threading import Thread, Event
from queue import Queue
import signal
import sys
class StoppableThread(Thread):
def __init__(self):
super().__init__()
self.stopper = Event()
self.queue = Queue()
#abstractmethod
def actual_job(self):
pass
def stop_running(self):
self.stopper.set()
def run(self):
while not self.stopper.is_set():
print(self.stopper.is_set())
self.actual_job()
self.queue.join()
class SomeObjectOne(StoppableThread):
def __init__(self, name, some_object_two):
super().__init__()
self.name = name
self.obj_two = some_object_two
def actual_job(self):
# print('{} is currently running'.format(self.name))
input_string = 'some string'
print('{} outputs {}'.format(self.name, input_string))
self.obj_two.queue.put(input_string)
sleep(2)
class SomeObjectTwo(StoppableThread):
def __init__(self, name, some_object_three):
super().__init__()
self.name = name
self.some_object_three = some_object_three
def actual_job(self):
# print('{} is currently running'.format(self.name))
some_string = self.queue.get()
inverted = some_string[::-1]
print('{} outputs {}'.format(self.name , inverted))
self.queue.task_done()
self.some_object_three.queue.put(inverted)
sleep(2)
class SomeObjectThree(StoppableThread):
def __init__(self, name):
super().__init__()
self.name = name
def actual_job(self):
print('{} is currently running'.format(self.name))
some_string = self.queue.get()
print('{} outputs {}'.format(self.name ,some_string[::-1]))
self.queue.task_done()
sleep(2)
class ServiceExit(Exception):
"""
Custom exception which is used to trigger the clean exit
of all running threads and the main program.
"""
pass
def service_shutdown(signum, frame):
print('Caught signal %d' % signum)
raise ServiceExit
signal.signal(signal.SIGTERM, service_shutdown)
signal.signal(signal.SIGINT, service_shutdown)
if __name__ == '__main__':
thread_three = SomeObjectThree('SomeObjectThree')
thread_two = SomeObjectTwo('SomeObjectTwo', thread_three)
thread_one = SomeObjectOne('SomeObjectOne', thread_two)
try:
thread_three.start()
thread_two.start()
thread_one.start()
# Keep the main thread running, otherwise signals are ignored.
while True:
sleep(0.5)
except ServiceExit:
print('Running service exit')
thread_three.stop_running()
thread_two.stop_running()
thread_one.stop_running()
thread_one.join()
thread_two.join()
thread_three.join()
I have a task make communication between coroutines with help asyncio and python3.
Please tell me how to do it,if one coroutine,in while tru cycle , return value at different intervals, and the other coroutines receives this data
import asyncio
#asyncio.coroutine
def write(future):
i=0
while True:
yield from asyncio.sleep(1)
future.set_result('data: '.format(i))
i+=1
def got_result(future):
print(future.result())
loop = asyncio.get_event_loop()
future = asyncio.Future()
asyncio.ensure_future(write(future))
future.add_done_callback(got_result)
try:
loop.run_forever()
finally:
loop.close()
The solution was found with the help of the asyncio.Queue()
import asyncio
#asyncio.coroutine
def get_work(task, work_queue):
while not work_queue.empty():
print(task)
queue_item = yield from work_queue.get()
print('{0} grabbed item: {1}'.format(task, queue_item))
yield from asyncio.sleep(0.5)
asyncio.async(get_work(task, work_queue))
# #asyncio.coroutine
i = 0
async def do_work(task, work_queue):
global i
print(task)
while work_queue.empty():
work_queue.put_nowait(i)
i += 1
await asyncio.sleep(2)
break
# asyncio.async(do_work())
print("Dfgdfg")
asyncio.ensure_future(do_work(task, work_queue))
if __name__ == "__main__":
queue_obj = asyncio.Queue()
loop = asyncio.get_event_loop()
tasks = [
asyncio.async(do_work('Run do_work', queue_obj)),
asyncio.async(get_work('Run get_work', queue_obj))]
loop.run_until_complete(asyncio.wait(tasks))
loop.run_forever()
I have a python 3.4.3, postgreSQL 9.4, aiopg-0.7.0. An example of multi-threaded applications, was taken from this site. How to use the pool? The thread hangs when the operation of the select.
import time
import asyncio
import aiopg
import functools
from threading import Thread, current_thread, Event
from concurrent.futures import Future
class B(Thread):
def __init__(self, start_event):
Thread.__init__(self)
self.loop = None
self.tid = None
self.event = start_event
def run(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
self.tid = current_thread()
self.loop.call_soon(self.event.set)
self.loop.run_forever()
def stop(self):
self.loop.call_soon_threadsafe(self.loop.stop)
def add_task(self, coro):
"""this method should return a task object, that I
can cancel, not a handle"""
def _async_add(func, fut):
try:
ret = func()
fut.set_result(ret)
except Exception as e:
fut.set_exception(e)
f = functools.partial(asyncio.async, coro, loop=self.loop)
if current_thread() == self.tid:
return f() # We can call directly if we're not going between threads.
else:
# We're in a non-event loop thread so we use a Future
# to get the task from the event loop thread once
# it's ready.
fut = Future()
self.loop.call_soon_threadsafe(_async_add, f, fut)
return fut.result()
def cancel_task(self, task):
self.loop.call_soon_threadsafe(task.cancel)
#asyncio.coroutine
def test(pool, name_task):
while True:
print(name_task, 'running')
with (yield from pool.cursor()) as cur:
print(name_task, " select. ")
yield from cur.execute("SELECT count(*) FROM test")
count = yield from cur.fetchone()
print(name_task, ' Result: ', count)
yield from asyncio.sleep(3)
#asyncio.coroutine
def connect_db():
dsn = 'dbname=%s user=%s password=%s host=%s' % ('testdb', 'user', 'passw', '127.0.0.1')
pool = yield from aiopg.create_pool(dsn)
print('create pool type =', type(pool))
# future.set_result(pool)
return (pool)
event = Event()
b = B(event)
b.start()
event.wait() # Let the loop's thread signal us, rather than sleeping
loop_db = asyncio.get_event_loop()
pool = loop_db.run_until_complete(connect_db())
time.sleep(2)
t = b.add_task(test(pool, 'Task1')) # This is a real task
t = b.add_task(test(pool, 'Task2'))
while True:
time.sleep(10)
b.stop()
Not return result in 'yield from cur.execute("SELECT count(*) FROM test")'
Long story short: you cannot share aiopg pool object from different event loops.
Every aiopg.Pool is coupled to event loop. If you don't specify loop parameter explicitly it is taken from asyncio.get_event_loop() call.
So it your example you have a pool coupled to event loop from main thread.
When you execute db query from separate thread you trying to accomplish it by executing thread's loop, not the main one. It doesn't work.
For the producer-consumer problem I have come up with this solution:
import threading
import random
import time
class Bucket:
def __init__(self, size):
self.size = size
self.current_size = 0
self.cond_var = threading.Condition()
def available_for_put(self):
return self.current_size < self.size
def available_for_get(self):
return self.current_size > 0
def put(self):
self.current_size = self.current_size + 1
print(self)
self.cond_var.notify_all()
def get(self):
self.current_size = self.current_size - 1
print(self)
self.cond_var.notify_all()
def acquire(self):
self.cond_var.acquire()
def release(self):
self.cond_var.release()
def wait(self):
self.cond_var.wait()
def __str__(self):
return "Size is {0}".format(self.current_size)
class Worker(threading.Thread):
PRODUCER = 1
CONSUMER = 0
def __init__(self, bucket, kind):
threading.Thread.__init__(self)
self.kind = kind
self.bucket = bucket
def run(self):
while(1):
self.bucket.acquire()
while(((self.kind == Worker.PRODUCER) and (not self.bucket.available_for_put())) or \
((self.kind == Worker.CONSUMER) and (not self.bucket.available_for_get()))):
self.bucket.wait()
### self.bucket.acquire()
if self.kind == Worker.PRODUCER:
self.bucket.put()
else:
self.bucket.get()
time.sleep(0.1)
self.bucket.release()
bucket = Bucket(10)
workers = []
for i in range(10):
workers.append(Worker(bucket, i % 2))
for w in workers:
w.start()
print("Thread started")
for w in workers:
w.join()
Apparently if I remove the while(1) loop and make every thread run the block inside the loop only once it reaches a deadlock and I can't understand why.
The Producer-Consumer pattern can be readily implemented using Python's built in Queue support:
queue - a synchronised queue class
This could simplify your code. Also very useful is the scheduler:
schedule - event scheduler
And since your question is tagged with Python-3.x, you should definitely have a look at the concurrent.futures module:
futures - launching parallel tasks
Your workers could be tasks and the bucket could become a queue.
Apparently the problem is that after waking up from wait you reacquire the lock and thus the now commented out acquire will block.