Uvicorn not running sanic "before_server_start" - python-3.x

I have a sanic app like so:
from functools import wraps
import os
from sanic import Sanic
from sanic.response import json
from es_api.client import ElasticEngine
from utils import cleanup
app = Sanic(__name__)
async def setup_es_client(app, loop):
app.es_client = ElasticEngine.from_bonsai("xkcd_production", test_instance=False)
#app.route("/", methods=["GET"])
async def home(request):
return json({"hello": "worldddd"})
#app.route("/all", methods=["GET"])
async def display_all_docs(request):
results = app.es_client.search_all().results()
return json(
{"results": results}
)
if __name__ == "__main__":
app.register_listener(setup_es_client,
'before_server_start')
app.run(host="0.0.0.0", port=8000, debug=True, workers=20)
When I run uvicorn myapp, it serves the homepage fine: I see the expected json.
But when I hit /all, it says
"app has not attribute es_client"
, which presumably indicates that the before_server_start function hasn't been run.
How do I fix this? I've looked into the sanic doc but I couldn't find any references to this issue
(It works fine when I run the app as is -- i.e, python3 myapp.py)

Fixed.
Move app.register_listener(setup_es_client,
'before_server_start') above if __name__=="__main__"
Better yet, just use sanic's builtin decorator for nicer ergonomics: https://sanic.readthedocs.io/en/latest/sanic/middleware.html

Related

How to send ros2 messages from a websocket server to connected clients in tornado

I have a ros2 publisher script that sends custom messages from ros2 nodes. What I need to do is to have a subscriber (which is also my websocket server) to listen to the message that the pulisher sends then convert it to a dictionary and send it as a json from the websocket server to a connected websocket client. I have already checked the rosbridge repo but I could not make it work. It doesn't have enough documentation and I am new to ros.
I need something like this:
import rclpy
import sys
from rclpy.node import Node
import tornado.ioloop
import tornado.httpserver
import tornado.web
import threading
from custom.msg import CustomMsg
from .convert import message_to_ordereddict
wss = []
class wsHandler(tornado.websocket.WebSocketHandler):
def open(self):
print 'Online'
if self not in wss:
wss.append(self)
def on_close(self):
print 'Offline'
if self in wss:
wss.remove(self)
def wsSend(message):
for ws in wss:
ws.write_message(message)
class MinimalSubscriber(Node):
def __init__(self):
super().__init__('minimal_subscriber')
self.subscription = self.create_subscription(CustomMsg, 'topic', self.CustomMsg_callback, 10)
self.subscription # prevent unused variable warning
def CustomMsg_callback(self, msg):
ws_message = message_to_ordereddict(msg)
wsSend(ws_message)
if __name__ == "__main__":
http_server = tornado.httpserver.HTTPServer(tornado.web.Application(wsHandler))
http_server.listen(8888)
main_loop = tornado.ioloop.IOLoop.instance()
# Start main loop
main_loop.start()
so the callback function in MinimalSubscriber class, receives the ros message, converts it to dictionary and sends it to websocket client. I am a bit confused how to make these two threads (ros and websocket) to communicate with each other.
So I think I got a bit confused myself going through the threading. So I changed my approach and made it work using the tornado periodic callback and the spin_once function of rclpy as the callback function. I would post my solution as it might help some people who has the same issue.
import queue
import rclpy
from rclpy.node import Node
import tornado.ioloop
import tornado.httpserver
import tornado.web
from custom.msg import CustomMsg
from .convert import message_to_ordereddict
wss = []
class wsHandler(tornado.websocket.WebSocketHandler):
#classmethod
def route_urls(cls):
return [(r'/',cls, {}),]
def open(self):
print 'Online'
if self not in wss:
wss.append(self)
def on_close(self):
print 'Offline'
if self in wss:
wss.remove(self)
def make_app():
myWebHandler = wsHandler.route_urls()
return tornado.web.Application(myWebHandler)
message_queue = queue.Queue
class MinimalSubscriber(Node):
def __init__(self):
super().__init__('minimal_subscriber')
self.subscription = self.create_subscription(CustomMsg, 'topic', self.CustomMsg_callback, 10)
self.subscription # prevent unused variable warning
def CustomMsg_callback(self, msg):
msg_dict = message_to_ordereddict(msg)
msg_queue.put(msg_dict)
if __name__ == "__main__":
rclpy.init(args=args)
minimal_subscriber = MinimalSubscriber()
def send_ros_to_clients():
rclpy.spin_once(minimal_subscriber)
my_msg = msg_queue.get()
for client in ws_clients:
client.write_message(my_msg)
app = make_app()
server = tornado.httpserver.HTTPServer(app)
server.listen(8888)
tornado.ioloop.PeriodicCallback(send_ros_to_clients, 1).start()
tornado.ioloop.IOLoop.current().start()
minimal_subscriber.destroy_node()
rclpy.shutdown()
I also implemented the wsSend function into the send_ros_to_clients function. Some might say that using a global queue is not the best practice but I could not come up with another solution. I would appreciate any suggestions or corrections on my solution.

Cathing flask abort status code within a test_request_context

I want to understand how I can catch an HTTPException raised by flask.abort while using a test_request_context in a test for the calling method only.
# example.py
import flask
#api.route('/', methods=['POST'])
def do_stuff():
param_a = get_param()
return jsonify(a=param_a)
# check if request is json, return http error codes otherwise
def get_param():
if flask.request.is_json():
try:
data = flask.request.get_json()
a = data('param_a')
except(ValueError):
abort(400)
else:
abort(405)
# test_example.py
from app import app # where app = Flask(__name__)
from example import get_param
import flask
def test_get_param(app):
with app.test_request_context('/', data=flask.json.dumps(good_json), content_type='application/json'):
assert a == get_param()
In the get_param method above, I try to abort if the is_json() or the get_json() fail. To test this, I pass test_request_context without a content_type and, based on this blog and this answer, I tried adding a nested context manager like so:
# test_example.py
from app import app # where app = Flask(__name__)
from example import get_param
from werkzeug.exceptions import HTTPException
import flask
def test_get_param_aborts(app):
with app.test_request_context('/', data=flask.json.dumps('http://example', 'nope'), content_type=''):
with pytest.raises(HTTPException) as httperror:
get_param()
assert 405 == httperror
but I get a assert 405 == <ExceptionInfo for raises contextmanager> assertion error.
Can someone please explain this and suggest a way to test the abort in this get_param method?
Update:
Based on #tmt's answer, I modified the test. However, even though the test passes, while debugging I notice that the two assertions are never reached!
# test_example.py
from app import app # where app = Flask(__name__)
from example import get_param
from werkzeug.exceptions import HTTPException
import flask
def test_get_param_aborts(app):
with app.test_request_context('/', data=flask.json.dumps('http://example', 'nope'), content_type=''):
with pytest.raises(HTTPException) as httperror:
get_param() # <-- this line is reached
assert 405 == httperror.value.code
assert 1 ==2
httperror is an instance of ExceptionInfo which is pytest's own class that describes the exception. Once it happens, httperror would also contain value property which would be the instance of the HTTPException itself. If my memory is correct HTTPException contains code property that equals to the HTTP status code so you can use it to do the assertion:
# test_example.py
from app import app
from example import get_param
from werkzeug.exceptions import HTTPException
import flask
def test_get_param_aborts(app):
with app.test_request_context('/', data=flask.json.dumps(), content_type=''):
with pytest.raises(HTTPException) as httperror:
get_param()
assert 405 == httperror.value.code
Notes:
get_param() needs to be called within pytest.raises() context manager.
The assertion needs to be done outside of the context manager because once an exception is raised the context ends.
I don't know if pytest.raise is your typo or if it really existed in older versions of pytest. AFAIK it should be pytest.raises.

Flask-SocketIO emit not working from different module?

When I invoke socket.emit('someevent','blahblah') from server.py file, everything works as intended. But when I try to invoke the same method from bot.py, nothing happens.
Code:
server.py:
import eventlet
eventlet.monkey_patch()
import eventlet.wsgi
from flask import Flask, render_template, jsonify, request, abort
from flask_cors import CORS, cross_origin
import threading
from thread_manager import ThreadManager
from flask_socketio import SocketIO, emit, send
cho = Flask(__name__, static_folder="client/dist", template_folder="client/dist")
socketio = SocketIO(cho)
cors = CORS(cho)
threadmanager = ThreadManager() # Start the thread manager
import bot as bot_module
#cho.route('/api/start_bot', methods=['POST'])
#cross_origin()
def startBot():
"""
Begins the execution
:return:
"""
if request.method == 'POST':
request_json = request.get_json()
.... more code
bot = bot_module.Bot(some_args_from_request_above)
bot_thread = threading.Thread(target=bot.run)
bot_thread.start()
if threadmanager.check_thread_status(bot_name):
print('Thread is alive!')
return ok_res
else:
print('Thread seems inactive')
return bad_res
if __name__ == "__main__":
eventlet.wsgi.server(eventlet.listen(('0.0.0.0', 5000)), cho, debug=True)
bot.py
import server
class Bot:
.....
def run(self):
server.socketio.emit('someevent', 'w0w') # <-- nothing happens
I know I'm using the standard threading mechanism but it seems to not be related to threads whatsoever as I can create a random static method inside the Bot class, invoke it before creating a separate thread from the main file and nothing will happen. The thread_manager module contains nothing that would interfere, but I've even removed it completely from the picture and nothing changed. Any clues?
Turns out this was completely related to the circular import. Splitting the app declaration from the entrypoint worked, so that I'd have a third reference file which to import socketio from.

How can I proxy big contents with tornado on Python3?

I am trying to implement asynchronous http reverse proxy with tornado on Python3.
Handler class is as follows:
class RProxyHandler(tornado.web.RequestHandler):
#tornado.web.asynchronous
def get(self):
backend_url = 'http://backend-host/content.html' # temporary fixed
req = tornado.httpclient.HTTPRequest(
url=backend_url)
http_client = tornado.httpclient.AsyncHTTPClient()
http_client.fetch(req, self.backend_callback)
def backend_callback(self, response):
self.write(response.body)
self.finish()
When content.html is small, this code works fine. But with large content.html, this code raises Exception:
ERROR:tornado.general:Reached maximum read buffer size
I found the way to handle large contents with pycurl. Though, it seems does not work with Python3.
In addition, I added streaming_callback option to HTTPRequest. But the callback won't be called when disabled chunked response by backend server.
How can I handle large contents?
Thanks.
You should be able to pass max_buffer_size to the tornado.httpclient.AsyncHTTPClient()
call to set the max buffer size. for a 50MB buffer:
import tornado.ioloop
import tornado.web
from tornado.httpclient import AsyncHTTPClient
from tornado import gen
from tornado.web import asynchronous
class MainHandler(tornado.web.RequestHandler):
client = AsyncHTTPClient(max_buffer_size=1024*1024*150)
#gen.coroutine
#asynchronous
def get(self):
response = yield self.client.fetch("http://test.gorillaservers.com/100mb.bin", request_timeout=180)
self.finish("%s\n" % len(response.body))
application = tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
Update: Now a full example program.

how to use epoll on tornado

i am trying to make epoll work on tornado
import tornado.ioloop
import tornado.web
from tornado.platform.epoll import EPollIOLoop
from tornado import web, gen
class MainHandler(tornado.web.RequestHandler):
#web.asynchronous
#gen.engine
def get(self):
self.write("Hello, world")
application = tornado.web.Application([
(r"/", MainHandler),
])
if __name__ == "__main__":
application.listen(8888)
EPollIOLoop().start()
but when i start the program and visit the url localhost:8888/ it didn't return anything.
is that my system didn't meet the requirement?my linux version was Ubuntu 12.04.1 LTS.
Just use tornado.ioloop.IOLoop.instance(). It choose best IOLoop for your platform.
if __name__ == "__main__":
application.listen(8888)
ioloop = tornado.ioloop.IOLoop.instance()
print ioloop # prints <tornado.platform.epoll.EPollIOLoop object at ..>
ioloop.start()
You should call self.finish() if you use asynchronous decorator:
If this decorator is given, the response is not finished when the
method returns. It is up to the request handler to call self.finish()
to finish the HTTP request. Without this decorator, the request is
automatically finished when the get() or post() method returns.
class MainHandler(tornado.web.RequestHandler):
#web.asynchronous
#gen.engine
def get(self):
self.write("Hello, world")
self.finish()

Resources