Tornado Request with WebSocket Callback - python-3.x

I'm trying to start a long blocking function after receiving an HTTP request. The request must be responded inmediately (200 OK or 500 Internal Error), but the process should run in the background and send a notification to a WebSocket after finished.
Also, the application should receive other requests for processing and these must also be responded inmediately, without blocking the previous ones.
I'm using add_callback, but I'm not sure if it's the correct way to use tornado, since it's blocking the incoming HTTP requests. I've tried using different threads, but I got exceptions when trying to call the send_message method inside the WebSocket handler.
import time
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.web import Application, RequestHandler, asynchronous
from tornado.websocket import WebSocketHandler
def long_process(id):
time.sleep(5)
class RequestWeb(RequestHandler):
#gen.coroutine
def process(self, id):
# Trying to call long_process, just like
# yield gen.Task(IOLoop.current().add_timeout, time.time() + 10)
# The response must be sent inmediately, but the process should run in the background
IOLoop.current().add_callback(callback=lambda: long_process(id))
#asynchronous
#gen.coroutine
def get(self, id):
IOLoop.current().add_future(self.process(id), self.process_complete)
self.write("OK")
def process_complete(self, future):
SocketHandler.send_message('Processing complete')
class SocketHandler(WebSocketHandler):
connections = set()
def open(self):
SocketHandler.connections.add(self)
#classmethod
def send_message(cls, message):
for ws in cls.connections:
ws.write_message(message)
def make_app():
return Application([
(r'/api/(?P<id>[a-zA-Z0-9]+)$', RequestWeb),
(r'/ws', SocketHandler)
])
if __name__ == "__main__":
app = make_app()
app.listen(8000)
IOLoop.current().start()

Related

How to send ros2 messages from a websocket server to connected clients in tornado

I have a ros2 publisher script that sends custom messages from ros2 nodes. What I need to do is to have a subscriber (which is also my websocket server) to listen to the message that the pulisher sends then convert it to a dictionary and send it as a json from the websocket server to a connected websocket client. I have already checked the rosbridge repo but I could not make it work. It doesn't have enough documentation and I am new to ros.
I need something like this:
import rclpy
import sys
from rclpy.node import Node
import tornado.ioloop
import tornado.httpserver
import tornado.web
import threading
from custom.msg import CustomMsg
from .convert import message_to_ordereddict
wss = []
class wsHandler(tornado.websocket.WebSocketHandler):
def open(self):
print 'Online'
if self not in wss:
wss.append(self)
def on_close(self):
print 'Offline'
if self in wss:
wss.remove(self)
def wsSend(message):
for ws in wss:
ws.write_message(message)
class MinimalSubscriber(Node):
def __init__(self):
super().__init__('minimal_subscriber')
self.subscription = self.create_subscription(CustomMsg, 'topic', self.CustomMsg_callback, 10)
self.subscription # prevent unused variable warning
def CustomMsg_callback(self, msg):
ws_message = message_to_ordereddict(msg)
wsSend(ws_message)
if __name__ == "__main__":
http_server = tornado.httpserver.HTTPServer(tornado.web.Application(wsHandler))
http_server.listen(8888)
main_loop = tornado.ioloop.IOLoop.instance()
# Start main loop
main_loop.start()
so the callback function in MinimalSubscriber class, receives the ros message, converts it to dictionary and sends it to websocket client. I am a bit confused how to make these two threads (ros and websocket) to communicate with each other.
So I think I got a bit confused myself going through the threading. So I changed my approach and made it work using the tornado periodic callback and the spin_once function of rclpy as the callback function. I would post my solution as it might help some people who has the same issue.
import queue
import rclpy
from rclpy.node import Node
import tornado.ioloop
import tornado.httpserver
import tornado.web
from custom.msg import CustomMsg
from .convert import message_to_ordereddict
wss = []
class wsHandler(tornado.websocket.WebSocketHandler):
#classmethod
def route_urls(cls):
return [(r'/',cls, {}),]
def open(self):
print 'Online'
if self not in wss:
wss.append(self)
def on_close(self):
print 'Offline'
if self in wss:
wss.remove(self)
def make_app():
myWebHandler = wsHandler.route_urls()
return tornado.web.Application(myWebHandler)
message_queue = queue.Queue
class MinimalSubscriber(Node):
def __init__(self):
super().__init__('minimal_subscriber')
self.subscription = self.create_subscription(CustomMsg, 'topic', self.CustomMsg_callback, 10)
self.subscription # prevent unused variable warning
def CustomMsg_callback(self, msg):
msg_dict = message_to_ordereddict(msg)
msg_queue.put(msg_dict)
if __name__ == "__main__":
rclpy.init(args=args)
minimal_subscriber = MinimalSubscriber()
def send_ros_to_clients():
rclpy.spin_once(minimal_subscriber)
my_msg = msg_queue.get()
for client in ws_clients:
client.write_message(my_msg)
app = make_app()
server = tornado.httpserver.HTTPServer(app)
server.listen(8888)
tornado.ioloop.PeriodicCallback(send_ros_to_clients, 1).start()
tornado.ioloop.IOLoop.current().start()
minimal_subscriber.destroy_node()
rclpy.shutdown()
I also implemented the wsSend function into the send_ros_to_clients function. Some might say that using a global queue is not the best practice but I could not come up with another solution. I would appreciate any suggestions or corrections on my solution.

Flask server using asynchronous Rpc client only answer 1 request out of two

I'm trying to implement an async RPC client within a Flask server.
The idea is that each request spawn a thread with an uuid, and each request is going to wait until there is a response in the RpcClient queue attribute object with the correct uuid.
The problem is that one request out of two fails. I think that might be a problem with multi-threading, but I don't see where it comes from.
Bug can be seen here.
Using debug print, it seems that the message with the correct uuid is received in the _on_response callback and update the queue attribute in this instance correctly, but the queue attribute within the /rpc_call/<payload> endpoint doesn't synchronize (so queue[uuid] has a value of response in the RpcClient callback but still None in the scope of the endpoint).
My code:
from flask import Flask, jsonif
from gevent.pywsgi import WSGIServer
import sys
import os
import pika
import uuid
import time
import threading
class RpcClient(object):
"""Asynchronous Rpc client."""
internal_lock = threading.Lock()
queue = {}
def __init__(self):
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host='rabbitmq'))
self.channel = self.connection.channel()
self.channel.basic_qos(prefetch_count=1)
self.channel.exchange_declare(exchange='kaldi_expe', exchange_type='topic')
# Create all the queue and bind them to the corresponding routing key
self.channel.queue_declare('request', durable=True)
result = self.channel.queue_declare('answer', durable=True)
self.channel.queue_bind(exchange='kaldi_expe', queue='request', routing_key='kaldi_expe.web.request')
self.channel.queue_bind(exchange='kaldi_expe', queue='answer', routing_key='kaldi_expe.kaldi.answer')
self.callback_queue = result.method.queue
.
thread = threading.Thread(target=self._process_data_events)
thread.setDaemon(True)
thread.start()
def _process_data_events(self):
self.channel.basic_consume(self.callback_queue, self._on_response, auto_ack=True)
while True:
with self.internal_lock:
self.connection.process_data_events()
time.sleep(0.1)
def _on_response(self, ch, method, props, body):
"""On response we simply store the result in a local dictionary."""
self.queue[props.correlation_id] = body
def send_request(self, payload):
corr_id = str(uuid.uuid4())
self.queue[corr_id] = None
with self.internal_lock:
self.channel.basic_publish(exchange='kaldi_expe',
routing_key="kaldi_expe.web.request",
properties=pika.BasicProperties(
reply_to=self.callback_queue,
correlation_id=corr_id,
),
body=payload)
return corr_id
def flask_app():
app = Flask("kaldi")
#app.route('/', methods=['GET'])
def server_is_up():
return 'server is up', 200
#app.route('/rpc_call/<payload>')
def rpc_call(payload):
"""Simple Flask implementation for making asynchronous Rpc calls. """
corr_id = app.config['RPCclient'].send_request(payload)
while app.config['RPCclient'].queue[corr_id] is None:
#print("queue server: " + str(app.config['RPCclient'].queue))
time.sleep(0.1)
return app.config['RPCclient'].queue[corr_id]
if __name__ == '__main__':
while True:
try:
rpcClient = RpcClient()
app = flask_app()
app.config['RPCclient'] = rpcClient
print("Rabbit MQ is connected, starting server", file=sys.stderr)
app.run(debug=True, threaded=True, host='0.0.0.0')
except pika.exceptions.AMQPConnectionError as e:
print("Waiting for RabbitMq startup" + str(e), file=sys.stderr)
time.sleep(1)
except Exception as e:
worker.log.error(e)
exit(e)
I found where the bug came from:
Thedebug=True of the line app.run(debug=True, threaded=True, host='0.0.0.0') restart the server at the beginning.
The whole script is then restarted from the beginning. Because of it, another rpcClient is initialized and consume from the same queue. Problem is that the previous thread is also running. This cause two rpcClient to consume from the same thread, with one that is virtually useless.

http.server - send data to the client

Greatings everyone,
I have a main that runs a thread. This thread manages a HTTP server using http.server like this:
import binascii, threading
from http.server import BaseHTTPRequestHandler, HTTPServer
class Com(threading.Thread):
def __init__(self, queue):
MyHandler.queue = queue
threading.Thread.__init__(self)
def run(self):
httpd = HTTPServer(('10.0.0.254', 8000), MyHandler)
httpd.serve_forever()
class MyHandler(BaseHTTPRequestHandler):
queue = None
def do_POST(self):
" Receive a POST and put the received frame in the queue "
def send(self, toSend):
self.send_response(200, "OK")
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(toSend)
The do_POST method works well, it receive data and send it to the main thanks to the queue.
What I want is: sometimes when I want, the main orders the server to send some data. Normally, the code to send is good. My problem is that I can't, from the main, access to the HTTP server instance to call the send method, because it is managed by http.server.
In Com class (instance in the main), I tried something like:
def sendThroughServer(self, toSend):
MyHandler.send(toSend)
Also using class method, but it doesn't seems to work, because it requieres "self".
How can I do that pls ?
Hope it is clear. Thanks in advance.

How can I proxy big contents with tornado on Python3?

I am trying to implement asynchronous http reverse proxy with tornado on Python3.
Handler class is as follows:
class RProxyHandler(tornado.web.RequestHandler):
#tornado.web.asynchronous
def get(self):
backend_url = 'http://backend-host/content.html' # temporary fixed
req = tornado.httpclient.HTTPRequest(
url=backend_url)
http_client = tornado.httpclient.AsyncHTTPClient()
http_client.fetch(req, self.backend_callback)
def backend_callback(self, response):
self.write(response.body)
self.finish()
When content.html is small, this code works fine. But with large content.html, this code raises Exception:
ERROR:tornado.general:Reached maximum read buffer size
I found the way to handle large contents with pycurl. Though, it seems does not work with Python3.
In addition, I added streaming_callback option to HTTPRequest. But the callback won't be called when disabled chunked response by backend server.
How can I handle large contents?
Thanks.
You should be able to pass max_buffer_size to the tornado.httpclient.AsyncHTTPClient()
call to set the max buffer size. for a 50MB buffer:
import tornado.ioloop
import tornado.web
from tornado.httpclient import AsyncHTTPClient
from tornado import gen
from tornado.web import asynchronous
class MainHandler(tornado.web.RequestHandler):
client = AsyncHTTPClient(max_buffer_size=1024*1024*150)
#gen.coroutine
#asynchronous
def get(self):
response = yield self.client.fetch("http://test.gorillaservers.com/100mb.bin", request_timeout=180)
self.finish("%s\n" % len(response.body))
application = tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
Update: Now a full example program.

how to use epoll on tornado

i am trying to make epoll work on tornado
import tornado.ioloop
import tornado.web
from tornado.platform.epoll import EPollIOLoop
from tornado import web, gen
class MainHandler(tornado.web.RequestHandler):
#web.asynchronous
#gen.engine
def get(self):
self.write("Hello, world")
application = tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
application.listen(8888)
EPollIOLoop().start()
but when i start the program and visit the url localhost:8888/ it didn't return anything.
is that my system didn't meet the requirement?my linux version was Ubuntu 12.04.1 LTS.
Just use tornado.ioloop.IOLoop.instance(). It choose best IOLoop for your platform.
if __name__ == "__main__":
application.listen(8888)
ioloop = tornado.ioloop.IOLoop.instance()
print ioloop # prints <tornado.platform.epoll.EPollIOLoop object at ..>
ioloop.start()
You should call self.finish() if you use asynchronous decorator:
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call self.finish()
to finish the HTTP request. Without this decorator, the request is
automatically finished when the get() or post() method returns.
class MainHandler(tornado.web.RequestHandler):
#web.asynchronous
#gen.engine
def get(self):
self.write("Hello, world")
self.finish()

Resources