Can't stop aiohttp websocket server - python-3.x

I can't cancel my aiohttp websocket server from within the application. I want to stop the server and shutdown when I get a "cancel" string
from the client. Yes, I get it, and I finish my co-routine (websocket_handler), but there are three co-routines from the aiohttp library which still continue working.
Of course, I can invoke asyncio.get_event_loop().stop() at the end of my co-routine, but is there a graceful way for stopping aiohttp server?
From my code one can see that I've tried to use Application().on_shutdown.append(), but it failed.
What is the right way?
#!/usr/bin/env python
# -- coding: utf-8 --
import os
import asyncio
import signal
import weakref
import aiohttp.web
from aiohttp import ClientConnectionError, WSCloseCode
# This restores the default Ctrl+C signal handler, which just kills the process
#https://stackoverflow.com/questions/27480967/why-does-the-asyncios-event-loop-suppress-the-keyboardinterrupt-on-windows
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
HOST = os.getenv('HOST', 'localhost')
PORT = int(os.getenv('PORT', 8881))
async def testhandle(request):
#Сопрограмма одрабатывающая http-запрос по адресу "http://127.0.0.1:8881/test"
print("server: into testhandle()")
return aiohttp.web.Response(text='Test handle')
async def websocket_handler(request):
#Сопрограмма одрабатывающая ws-запрос по адресу "http://127.0.0.1:8881"
print('Websocket connection starting')
ws = aiohttp.web.WebSocketResponse()
await ws.prepare(request)
request.app['websockets'].add(ws)
print('Websocket connection ready')
try:
async for msg in ws:
if msg.type == aiohttp.WSMsgType.TEXT:
if msg.data == 'close':
print(msg.data)
break
else:
print(msg.data)
await ws.send_str("You said: {}".format(msg.data))
elif msg.type == aiohttp.WSMsgType.ERROR:
print('ws connection closed with exception %s' %
ws.exception())
except (asyncio.CancelledError, ClientConnectionError):
pass # Тут оказываемся когда, клиент отвалился.
# В будущем можно тут освобождать ресурсы.
finally:
print('Websocket connection closed')
request.app['websockets'].discard(ws)
#pending = asyncio.Task.all_tasks()
#asyncio.get_event_loop().stop()
return ws
async def on_shutdown(app):
for ws in set(app['websockets']):
await ws.close(code=WSCloseCode.GOING_AWAY, message='Server shutdown')
def main():
loop = asyncio.get_event_loop()
app = aiohttp.web.Application()
app['websockets'] = weakref.WeakSet()
app.on_shutdown.append(on_shutdown)
app.add_routes([aiohttp.web.get('/', websocket_handler)]) #, aiohttp.web.get('/test', testhandle)
try:
aiohttp.web.run_app(app, host=HOST, port=PORT, handle_signals=True)
print("after run_app")
except Exception as exc:
print ("in exception")
finally:
loop.close()
if __name__ == '__main__':
main()

I believe the correct answer is simply:
raise aiohttp.web.GracefulExit()
After catching the exception, it calls all the handlers appended to the on_shutdown and on_cleanup signals and dies.
One can see in the source code the aiohttp.web.run_app() waits for two exceptions: GracefulExit and KeyboardInterrupt. While the latter is rather uninteresting, following the trace of GracefulExit can lead you to this place in web_runner.py, that registers SIGINT and SIGTERM signal handlers to a function with raise GracefulExit().
Indeed, I also managed to gracefully shut it down by raising the signal.SIGINT or signal.SIGTERM from itself, e.g.
import signal
signal.raise_signal(signal.SIGINT)
This was tested to work on Fedora Linux 34, Python 3.9.7, aiohttp 3.7.4.

https://docs.aiohttp.org/en/v3.0.1/web_reference.html#aiohttp.web.Application.shutdown
app.shutdown()
app.cleanup()
After shutdown you should also do cleanup()

Related

Flask server using asynchronous Rpc client only answer 1 request out of two

I'm trying to implement an async RPC client within a Flask server.
The idea is that each request spawn a thread with an uuid, and each request is going to wait until there is a response in the RpcClient queue attribute object with the correct uuid.
The problem is that one request out of two fails. I think that might be a problem with multi-threading, but I don't see where it comes from.
Bug can be seen here.
Using debug print, it seems that the message with the correct uuid is received in the _on_response callback and update the queue attribute in this instance correctly, but the queue attribute within the /rpc_call/<payload> endpoint doesn't synchronize (so queue[uuid] has a value of response in the RpcClient callback but still None in the scope of the endpoint).
My code:
from flask import Flask, jsonif
from gevent.pywsgi import WSGIServer
import sys
import os
import pika
import uuid
import time
import threading
class RpcClient(object):
"""Asynchronous Rpc client."""
internal_lock = threading.Lock()
queue = {}
def __init__(self):
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host='rabbitmq'))
self.channel = self.connection.channel()
self.channel.basic_qos(prefetch_count=1)
self.channel.exchange_declare(exchange='kaldi_expe', exchange_type='topic')
# Create all the queue and bind them to the corresponding routing key
self.channel.queue_declare('request', durable=True)
result = self.channel.queue_declare('answer', durable=True)
self.channel.queue_bind(exchange='kaldi_expe', queue='request', routing_key='kaldi_expe.web.request')
self.channel.queue_bind(exchange='kaldi_expe', queue='answer', routing_key='kaldi_expe.kaldi.answer')
self.callback_queue = result.method.queue
.
thread = threading.Thread(target=self._process_data_events)
thread.setDaemon(True)
thread.start()
def _process_data_events(self):
self.channel.basic_consume(self.callback_queue, self._on_response, auto_ack=True)
while True:
with self.internal_lock:
self.connection.process_data_events()
time.sleep(0.1)
def _on_response(self, ch, method, props, body):
"""On response we simply store the result in a local dictionary."""
self.queue[props.correlation_id] = body
def send_request(self, payload):
corr_id = str(uuid.uuid4())
self.queue[corr_id] = None
with self.internal_lock:
self.channel.basic_publish(exchange='kaldi_expe',
routing_key="kaldi_expe.web.request",
properties=pika.BasicProperties(
reply_to=self.callback_queue,
correlation_id=corr_id,
),
body=payload)
return corr_id
def flask_app():
app = Flask("kaldi")
#app.route('/', methods=['GET'])
def server_is_up():
return 'server is up', 200
#app.route('/rpc_call/<payload>')
def rpc_call(payload):
"""Simple Flask implementation for making asynchronous Rpc calls. """
corr_id = app.config['RPCclient'].send_request(payload)
while app.config['RPCclient'].queue[corr_id] is None:
#print("queue server: " + str(app.config['RPCclient'].queue))
time.sleep(0.1)
return app.config['RPCclient'].queue[corr_id]
if __name__ == '__main__':
while True:
try:
rpcClient = RpcClient()
app = flask_app()
app.config['RPCclient'] = rpcClient
print("Rabbit MQ is connected, starting server", file=sys.stderr)
app.run(debug=True, threaded=True, host='0.0.0.0')
except pika.exceptions.AMQPConnectionError as e:
print("Waiting for RabbitMq startup" + str(e), file=sys.stderr)
time.sleep(1)
except Exception as e:
worker.log.error(e)
exit(e)
I found where the bug came from:
Thedebug=True of the line app.run(debug=True, threaded=True, host='0.0.0.0') restart the server at the beginning.
The whole script is then restarted from the beginning. Because of it, another rpcClient is initialized and consume from the same queue. Problem is that the previous thread is also running. This cause two rpcClient to consume from the same thread, with one that is virtually useless.

Python: manager.Queue() with asyncio. How to resolve deadlock?

I am trying to figure out how to have a websocket based server listen to incoming requests, place them in a queue for another process to do work, then place the results in another queue where the websocket based server can wait for said result and send the response back to the client.
This is just me trying to learn and gain more experience with both asyncio and sharing data between processes. I am using Python 3.9.2 64bit.
Right now I am stuck with a deadlock as commented in the "producer_handler" function in the server code. Here is the code I am playing with:
Server:
import asyncio
import logging
import time
from multiprocessing import Manager, Process
import websockets
logging.root.setLevel(0)
def server(recievequeue, sendqueue):
async def consumer_handler(websocket, path):
while True:
logging.info('Waiting for request')
try:
request = await websocket.recv()
except Exception as exception:
logging.warning(f'consumer_handler Error: {exception}')
break
logging.info(f'Request: {request}')
recievequeue.put(request)
logging.info('Request placed in recievequeue')
async def producer_handler(websocket, path):
while True:
logging.info('Waiting for response')
response = sendqueue.get()# Deadlock is here.
try:
await websocket.send(response)
except Exception as exception:
logging.warning(f'producer_handler Error: {exception}')
break
logging.info('Response sent')
async def handler(websocket, path):
consumer_task = asyncio.ensure_future(consumer_handler(websocket, path))
producer_task = asyncio.ensure_future(producer_handler(websocket, path))
done, pending = await asyncio.wait([producer_task, consumer_task], return_when=asyncio.FIRST_COMPLETED)
for task in done:
logging.info(f'Canceling: {task}')
task.cancel()
for task in pending:
logging.info(f'Canceling: {task}')
task.cancel()
eventloop = asyncio.get_event_loop()
eventloop.run_until_complete(websockets.serve(handler, 'localhost', 8081, ssl=None))
eventloop.run_forever()
def message_handler(recievequeue, sendqueue):
while True:
# I just want to test getting a message from the recievequeue, and placing it in the sendqueue
request = recievequeue.get()
logging.info(f'Request: {request}')
time.sleep(3)
data = str(time.time())
logging.info(f'Work completed # {data}')
sendqueue.put(data)
def main():
logging.info('Starting Application')
manager = Manager()
sendqueue = manager.Queue()
recievequeue = manager.Queue()
test_process_1 = Process(target=server, args=(recievequeue, sendqueue), name='Server')
test_process_1.start()
test_process_2 = Process(target=message_handler, args=(recievequeue, sendqueue), name='Message Handler')
test_process_2.start()
test_process_1.join()
if __name__ == '__main__':
main()
And the client:
import asyncio
import logging
import websockets
logging.root.setLevel(0)
URI = "wss://localhost:8081"
async def test():
async def consumer_handler(connection):
while True:
try:
request = await connection.recv()
except Exception as exception:
logging.warning(f'Error: {exception}')
break
logging.info(request)
async def producer_handler(connection):
while True:
await asyncio.sleep(5)
try:
await connection.send('Hello World')
except Exception as exception:
logging.warning(f'Error: {exception}')
break
async with websockets.connect(URI, ssl=None) as connection:
consumer_task = asyncio.ensure_future(consumer_handler(connection))
producer_task = asyncio.ensure_future(producer_handler(connection))
while True:
await asyncio.wait([consumer_task, producer_task], return_when=asyncio.FIRST_COMPLETED)
def main():
logging.info('Starting Application')
eventloop = asyncio.get_event_loop()
try:
eventloop.run_until_complete(test())
eventloop.run_forever()
except Exception as exception:
logging.warning(f'Error: {exception}')
if __name__ == '__main__':
main()
If I remove the queues the server and multiple client can talk back and forth with no issues. I just can't figure out how to get() and put() the requests and responses. Any help would be appreciated!
So after looking through other posts I noticed others talking about deadlocks and using run_in_executor. After some more testing I found replacing the line causing the deadlock with the following code resolved the issue:
response = await eventloop.run_in_executor(None, sendqueue.get)

Python: concurrently pending on async coroutine and synchronous function

I'd like to establish an SSH SOCKs tunnel (using asyncssh) during the execution of a synchronous function. When the function is done I want to tear down the tunnel and exit.
Apparently some async function has to be awaited to keep the tunnel working so the important thing is that conn.wait_closed() and the synchronous function are executed concurrently. So I am quite sure that I actually need a second thread.
I first tried some saner things using a ThreadPoolExecutor with run_in_executor but then ended up with the abysmal multihreaded variant below.
#! /usr/bin/env python3
import traceback
from threading import Thread
from concurrent.futures import ThreadPoolExecutor
import asyncio, asyncssh, sys
_server="127.0.0.1"
_port=22
_proxy_port=8080
async def run_client():
conn = await asyncio.wait_for(
asyncssh.connect(
_server,
port=_port,
options=asyncssh.SSHClientConnectionOptions(client_host_keysign=True),
),
10,
)
listener = await conn.forward_socks('127.0.0.1', _proxy_port)
return conn
async def do_stuff(func):
try:
conn = await run_client()
print("SSH tunnel active")
def start_loop(loop):
asyncio.set_event_loop(loop)
try:
loop.run_forever()
except Exception as e:
print(f"worker loop: {e}")
async def thread_func():
ret=await func()
print("Func done - tearing done worker thread and SSH connection")
conn.close()
# asyncio.get_event_loop().stop()
return ret
func_loop = asyncio.new_event_loop()
func_thread = Thread(target=start_loop, args=(func_loop,))
func_thread.start()
print("thread started")
fut = asyncio.run_coroutine_threadsafe(thread_func(), func_loop)
print(f"fut scheduled: {fut}")
done = await asyncio.gather(asyncio.wrap_future(fut), conn.wait_closed())
print("wait done")
for ret in done:
print(f"ret={ret}")
# Canceling pending tasks and stopping the loop
# asyncio.gather(*asyncio.Task.all_tasks()).cancel()
print("stopping func_loop")
func_loop.call_soon_threadsafe(func_loop.stop())
print("joining func_thread")
func_thread.join()
print("joined func_thread")
except (OSError, asyncssh.Error) as exc:
sys.exit('SSH connection failed: ' + str(exc))
except (Exception) as exc:
sys.exit('Unhandled exception: ' + str(exc))
traceback.print_exc()
async def just_wait():
print("starting just_wait")
input()
print("ending just_wait")
return 42
asyncio.get_event_loop().run_until_complete(do_stuff(just_wait))
It actually "works" "correctly" till the end where I get an exception while joining the worker thread. I presume because something I do is not threadsafe.
Exception in callback None()
handle: <Handle>
Traceback (most recent call last):
File "/usr/lib/python3.7/asyncio/events.py", line 88, in _run
self._context.run(self._callback, *self._args)
TypeError: 'NoneType' object is not callable
To test the code you must have a local SSH server running with key files setup for your user. You may want to change the _port variable.
I am looking for the reason of the exception and/or a version of the program that requires less manual intervention in the threading and possibly uses just a single event loop. I don't know how to achieve the latter when I want to await the two things (as in the asyncio.gather call).
The immediate cause of your error is this line:
# incorrect
func_loop.call_soon_threadsafe(func_loop.stop())
The intention is to call func_loop.stop() in the thread that runs the func_loop event loop. But as written, it invokes func_loop.stop() in the current thread and passes its return value (None) to call_soon_threadsafe as the function to invoke. This causes call_soon_threadsafe to complain that None is not callable. To fix the immediate problem, you should drop the extra parentheses and invoke the method as:
# correct
func_loop.call_soon_threadsafe(func_loop.stop)
However, the code is definitely over-complicated as written:
it doesn't make sense to create a new event loop when you are already inside an event loop
just_wait shouldn't be async def since it doesn't await anything, so it's clearly not async.
sys.exit takes an integer exit status, not a string. Also, it doesn't make much sense to attempt to print a backtrace after the call to sys.exit.
To run a non-async function from asyncio, just use run_in_executor with the function and pass it the non-async function as-is. You don't need an extra thread nor an extra event loop, run_in_executor will take care of the thread and connect it with your current event loop, effectively making the sync function awaitable. For example (untested):
async def do_stuff(func):
conn = await run_client()
print("SSH tunnel active")
loop = asyncio.get_event_loop()
ret = await loop.run_in_executor(None, func)
print(f"ret={ret}")
conn.close()
await conn.wait_closed()
print("wait done")
def just_wait():
# just_wait is a regular function; it can call blocking code,
# but it cannot await
print("starting just_wait")
input()
print("ending just_wait")
return 42
asyncio.get_event_loop().run_until_complete(do_stuff(just_wait))
If you need to await things in just_wait, you can make it async and use run_in_executor for the actual blocking code inside it:
async def do_stuff():
conn = await run_client()
print("SSH tunnel active")
loop = asyncio.get_event_loop()
ret = await just_wait()
print(f"ret={ret}")
conn.close()
await conn.wait_closed()
print("wait done")
async def just_wait():
# just_wait is an async function, it can await, but
# must invoke blocking code through run_in_executor
print("starting just_wait")
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, input)
print("ending just_wait")
return 42
asyncio.run(do_stuff())

Python async: Waiting for stdin input while doing other stuff

I'm trying to create a WebSocket command line client that waits for messages from a WebSocket server but waits for user input at the same time.
Regularly polling multiple online sources every second works fine on the server, (the one running at localhost:6789 in this example), but instead of using Python's normal sleep() method, it uses asyncio.sleep(), which makes sense because sleeping and asynchronously sleeping aren't the same thing, at least not under the hood.
Similarly, waiting for user input and asynchronously waiting for user input aren't the same thing, but I can't figure out how to asynchronously wait for user input in the same way that I can asynchronously wait for an arbitrary amount of seconds, so that the client can deal with incoming messages from the WebSocket server while simultaneously waiting for user input.
The comment below in the else-clause of monitor_cmd() hopefully explains what I'm getting at:
import asyncio
import json
import websockets
async def monitor_ws():
uri = 'ws://localhost:6789'
async with websockets.connect(uri) as websocket:
async for message in websocket:
print(json.dumps(json.loads(message), indent=2, sort_keys=True))
async def monitor_cmd():
while True:
sleep_instead = False
if sleep_instead:
await asyncio.sleep(1)
print('Sleeping works fine.')
else:
# Seems like I need the equivalent of:
# line = await asyncio.input('Is this your line? ')
line = input('Is this your line? ')
print(line)
try:
asyncio.get_event_loop().run_until_complete(asyncio.wait([
monitor_ws(),
monitor_cmd()
]))
except KeyboardInterrupt:
quit()
This code just waits for input indefinitely and does nothing else in the meantime, and I understand why. What I don't understand, is how to fix it. :)
Of course, if I'm thinking about this problem in the wrong way, I'd be very happy to learn how to remedy that as well.
You can use the aioconsole third-party package to interact with stdin in an asyncio-friendly manner:
line = await aioconsole.ainput('Is this your line? ')
Borrowing heavily from aioconsole, if you would rather avoid using an external library you could define your own async input function:
async def ainput(string: str) -> str:
await asyncio.get_event_loop().run_in_executor(
None, lambda s=string: sys.stdout.write(s+' '))
return await asyncio.get_event_loop().run_in_executor(
None, sys.stdin.readline)
Borrowing heavily from aioconsole, there are 2 ways to handle.
start a new daemon thread:
import sys
import asyncio
import threading
from concurrent.futures import Future
async def run_as_daemon(func, *args):
future = Future()
future.set_running_or_notify_cancel()
def daemon():
try:
result = func(*args)
except Exception as e:
future.set_exception(e)
else:
future.set_result(result)
threading.Thread(target=daemon, daemon=True).start()
return await asyncio.wrap_future(future)
async def main():
data = await run_as_daemon(sys.stdin.readline)
print(data)
if __name__ == "__main__":
asyncio.run(main())
use stream reader:
import sys
import asyncio
async def get_steam_reader(pipe) -> asyncio.StreamReader:
loop = asyncio.get_event_loop()
reader = asyncio.StreamReader(loop=loop)
protocol = asyncio.StreamReaderProtocol(reader)
await loop.connect_read_pipe(lambda: protocol, pipe)
return reader
async def main():
reader = await get_steam_reader(sys.stdin)
data = await reader.readline()
print(data)
if __name__ == "__main__":
asyncio.run(main())

Tornado coroutine with websockets not working with python3

The HandlerWebsockets does work fine and is just replying with what has been send at the moment throught messageToSockets(msg). However both tries to send messages to the websocket from the coroutine of the web application are not working. Looks like everything is blocked by these attempts...
class webApplication(tornado.web.Application):
def __init__(self):
handlers = [
(r'/', HandlerIndexPage),
(r'/websocket', HandlerWebSocket, dict(msg='start')),
]
settings = {
'template_path': 'templates'
}
tornado.web.Application.__init__(self, handlers, **settings)
#gen.coroutine
def generateMessageToSockets(self):
while True:
msg = str(randint(0, 100))
print ('new messageToCon: ', msg)
yield [con.write_message(msg) for con in HandlerWebSocket.connections]
yield gen.sleep(1.0)
if __name__ == '__main__':
ws_app = webApplication()
server = tornado.httpserver.HTTPServer(ws_app)
port = 9090
print('Listening on port:' + str(port))
server.listen(port)
IOLoop.current().spawn_callback(webApplication.generateMessageToSockets)
IOLoop.current().set_blocking_log_threshold(0.5)
IOLoop.instance().start()
Here the WebSockets Handler
class HandlerWebSocket(tornado.websocket.WebSocketHandler):
connections = set()
def initialize(self, msg):
print('HWS:' + msg)
def messageToSockets(self, msg):
print ('return message: ', msg)
[con.write_message(msg) for con in self.connections]
def open(self):
self.connections.add(self)
print ('new connection was opened')
pass
def on_message(self, message):
print ('from WebSocket: ', message)
self.messageToSockets(message)
def on_close(self):
self.connections.remove(self)
print ('connection closed')
pass
I am a bit lost in the examples, questions here, documentation etc. So any hint how to properly start a continous calling websocket routine is greatly appreciated
generateMessageToSockets will loop endlessly, generating messages as fast as it can without waiting for those messages to be sent. Since it starts first and never yields, the HTTPServer will never actually be able to accept a connection.
If you really want to send messages as fast as you can, the minimal solution without blocking would be
yield [con.write_message(msg) for con in HandlerWebSocket.connections]
yield gen.moment
But it would probably be better to use gen.sleep to send messages at regular intervals, instead of "as fast as possible".
unfortunately all the gen.routines tries didn't work for me. Moved back to threads
def generateMessageToSockets():
while True:
msg = str(randint(0, 100))
print ('new messageToCon: ', msg)
[con.write_message(msg) for con in HandlerWebSocket.connections]
sleep(1.0)
class WebApplication(tornado.web.Application):
def __init__(self):
handlers = [
(r'/', HandlerIndexPage),
(r'/websocket', HandlerWebSocket, dict(msg='start')),
]
settings = {
'template_path': 'templates'
}
tornado.web.Application.__init__(self, handlers, **settings)
if __name__ == '__main__':
tGenarate = threading.Thread(target=generateMessageToSockets)
tGenarate.start()
ws_app = WebApplication()
server = tornado.httpserver.HTTPServer(ws_app)
port = 9090
print('Listening on port:' + str(port))
server.listen(port)
ioloop.IOLoop.instance().start()
which works

Resources