aiohttp test with several servers - python-3.x

Trying to do a test that communicates with several instances of a web-server (which also communicates between them). But the second one seems to override the first however I try. Any suggestions of how to solve this.
So far I have this:
import os
from aiohttp.test_utils import TestClient, TestServer, loop_context
import pytest
from http import HTTPStatus as hs
from mycode import main
#pytest.fixture
def cli(loop):
app = main(["-t", "--location", "global", "-g"])
srv = TestServer(app, port=40080)
client = TestClient(srv, loop=loop)
loop.run_until_complete(client.start_server())
return client
#pytest.fixture
def cli_edge(loop):
app = main(["-t", "--location", "edge", "-j", "http://127.0.0.1:40080"])
srv = TestServer(app, port=40081)
client = TestClient(srv, loop=loop)
loop.run_until_complete(client.start_server())
return client
async def test_edge_namespace(cli, cli_edge):
resp = await cli.put('/do/something', json={})
assert resp.status in [hs.OK, hs.CREATED, hs.NO_CONTENT]
resp = await cli_edge.get('/do/something')
assert resp.status in [hs.OK, hs.CREATED, hs.NO_CONTENT]
The above call to cli.put goes to the server intended for cli_edge. I will have several more tests that should communicate with the servers.
Using Python 3.7 and pytest with asyncio and aiohttp extensions.

The suggested code works, the error was elsewhere in the server implementation.
You can add:
def fin():
loop.run_until_complete(srv.close())
loop.run_until_complete(client.close())
request.addfinalizer(fin)
and the request param in the pytest fixtures to close connections nicely.

Related

How to send ros2 messages from a websocket server to connected clients in tornado

I have a ros2 publisher script that sends custom messages from ros2 nodes. What I need to do is to have a subscriber (which is also my websocket server) to listen to the message that the pulisher sends then convert it to a dictionary and send it as a json from the websocket server to a connected websocket client. I have already checked the rosbridge repo but I could not make it work. It doesn't have enough documentation and I am new to ros.
I need something like this:
import rclpy
import sys
from rclpy.node import Node
import tornado.ioloop
import tornado.httpserver
import tornado.web
import threading
from custom.msg import CustomMsg
from .convert import message_to_ordereddict
wss = []
class wsHandler(tornado.websocket.WebSocketHandler):
def open(self):
print 'Online'
if self not in wss:
wss.append(self)
def on_close(self):
print 'Offline'
if self in wss:
wss.remove(self)
def wsSend(message):
for ws in wss:
ws.write_message(message)
class MinimalSubscriber(Node):
def __init__(self):
super().__init__('minimal_subscriber')
self.subscription = self.create_subscription(CustomMsg, 'topic', self.CustomMsg_callback, 10)
self.subscription # prevent unused variable warning
def CustomMsg_callback(self, msg):
ws_message = message_to_ordereddict(msg)
wsSend(ws_message)
if __name__ == "__main__":
http_server = tornado.httpserver.HTTPServer(tornado.web.Application(wsHandler))
http_server.listen(8888)
main_loop = tornado.ioloop.IOLoop.instance()
# Start main loop
main_loop.start()
so the callback function in MinimalSubscriber class, receives the ros message, converts it to dictionary and sends it to websocket client. I am a bit confused how to make these two threads (ros and websocket) to communicate with each other.
So I think I got a bit confused myself going through the threading. So I changed my approach and made it work using the tornado periodic callback and the spin_once function of rclpy as the callback function. I would post my solution as it might help some people who has the same issue.
import queue
import rclpy
from rclpy.node import Node
import tornado.ioloop
import tornado.httpserver
import tornado.web
from custom.msg import CustomMsg
from .convert import message_to_ordereddict
wss = []
class wsHandler(tornado.websocket.WebSocketHandler):
#classmethod
def route_urls(cls):
return [(r'/',cls, {}),]
def open(self):
print 'Online'
if self not in wss:
wss.append(self)
def on_close(self):
print 'Offline'
if self in wss:
wss.remove(self)
def make_app():
myWebHandler = wsHandler.route_urls()
return tornado.web.Application(myWebHandler)
message_queue = queue.Queue
class MinimalSubscriber(Node):
def __init__(self):
super().__init__('minimal_subscriber')
self.subscription = self.create_subscription(CustomMsg, 'topic', self.CustomMsg_callback, 10)
self.subscription # prevent unused variable warning
def CustomMsg_callback(self, msg):
msg_dict = message_to_ordereddict(msg)
msg_queue.put(msg_dict)
if __name__ == "__main__":
rclpy.init(args=args)
minimal_subscriber = MinimalSubscriber()
def send_ros_to_clients():
rclpy.spin_once(minimal_subscriber)
my_msg = msg_queue.get()
for client in ws_clients:
client.write_message(my_msg)
app = make_app()
server = tornado.httpserver.HTTPServer(app)
server.listen(8888)
tornado.ioloop.PeriodicCallback(send_ros_to_clients, 1).start()
tornado.ioloop.IOLoop.current().start()
minimal_subscriber.destroy_node()
rclpy.shutdown()
I also implemented the wsSend function into the send_ros_to_clients function. Some might say that using a global queue is not the best practice but I could not come up with another solution. I would appreciate any suggestions or corrections on my solution.

Python gRPC service for Envoy ratelimit

I am trying to create a small service to respond to Envoy's rate limiting queries. I have compiled all the relevant protobuff files and the one relevant for the service I am trying to implement is here:
https://github.com/envoyproxy/envoy/blob/v1.17.1/api/envoy/service/ratelimit/v3/rls.proto
There is a service definition in there but inside of the "compiled" python file, all I see about it is this:
_RATELIMITSERVICE = _descriptor.ServiceDescriptor(
name='RateLimitService',
full_name='envoy.service.ratelimit.v3.RateLimitService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1531,
serialized_end=1663,
methods=[
_descriptor.MethodDescriptor(
name='ShouldRateLimit',
full_name='envoy.service.ratelimit.v3.RateLimitService.ShouldRateLimit',
index=0,
containing_service=None,
input_type=_RATELIMITREQUEST,
output_type=_RATELIMITRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_RATELIMITSERVICE)
DESCRIPTOR.services_by_name['RateLimitService'] = _RATELIMITSERVICE
here is my feeble attempt at implementing the service
import logging
import asyncio
import grpc
from envoy.service.ratelimit.v3.rls_pb2 import RateLimitResponse, RateLimitRequest
class RL:
def ShouldRateLimit(self, request):
result = RateLimitResponse()
def add_handler(servicer, server):
rpc_method_handlers = {
'ShouldRateLimit': grpc.unary_unary_rpc_method_handler(
RL.ShouldRateLimit,
request_deserializer=RateLimitRequest.FromString,
response_serializer=RateLimitResponse.SerializeToString,
)
}
generic_handler = grpc.method_handlers_generic_handler(
'envoy.service.ratelimit.v3.RateLimitService',
rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
async def serve():
server = grpc.aio.server()
add_handler(RL(), server)
listen_addr = '[::]:5051'
server.add_insecure_port(listen_addr)
logging.info(f'Starting server on {listen_addr}')
await server.start()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
asyncio.run(serve())
How am I supposed to return (or even instantiate) a RateLimitResponse back to the caller ?

how to put a method in to thread and use it when performing the test

I have this part of code which is doing psubscribe to redis. I want to run this part of code in a thread an working in the background while the other part of code will check some notifications from this below.
def psubscribe(context, param1, param2, param3):
context.test_config = load_config()
RedisConnector(context.test_config["redis_host"],
context.test_config["redis_db_index"])
redis_notification_subscriber_connector = RedisConnector(context.test_config["notification__redis_host"],
int(param3),
int(context.test_config[
"notification_redis_port"]))
context.redis_connectors = redis_notification_connector.psubscribe_to_redis_event(param1,
timeout_seconds=int(
param2)
)
what I have done till now: but its not running :(
context.t = threading.Thread(target=psubscribe, args=['param1', 'param2', 'param3'])
context.t.start()
It is actually working. I think you didn't need actually to pass context variable to your psubscribe function.
Here is an example:
Start http server that listens on port 8000 as a background thread
Send http requests to it and validate response
Feature scenario:
Scenario: Run background process and validate responses
Given Start background process
Then Validate outputs
background_steps.py file:
import threading
import logging
from behave import *
from features.steps.utils import run_server
import requests
#given("Start background process")
def step_impl(context):
context.t = threading.Thread(target=run_server, args=[8000])
context.t.daemon = True
context.t.start()
#then("Validate outputs")
def step_impl(context):
response = requests.get('http://127.0.0.1:8000')
assert response.status_code == 501
utils.py file
from http.server import HTTPServer, BaseHTTPRequestHandler
def run_server(port, server_class=HTTPServer, handler_class=BaseHTTPRequestHandler):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
httpd.serve_forever()

Using pytest fixtures in class

I have begun writing unit tests for my Flask API. I have gotten them to work when declared outside of a class. However, for simplicity and OOP constraints, I am trying to have everything run from a class. The issue is I cannot seem to pass any fixture methods to my test class. The code I have here is as follow:
#conftest.py
import os, json, pytest
from ..app import create_app
from flask import Flask
#pytest.fixture
def env_setup():
env_name = os.getenv('FLASK_ENV')
app = create_app(env_name)
return app
I am trying to import env_setup into the following file.
# test_BaseURL.py
import pytest
#pytest.mark.usefixtures("env_setup")
class TestStaticPages:
def setUp(self, env_setup):
"""
Setup Test
"""
self.client = env_setup.test_client()
def test_base_route(self, env_setup):
#client = env_setup.test_client()
url = '/'
html1 = b'Welcome to the API. Please visit '
html2 = b'https://example.com to learn more about this app.'
response = self.client.get(url)
assert response.get_data() == html1 + html2
assert response.status_code == 200
I keep geeting the following error when I run this test:
> response = self.client.get(url)
E AttributeError: 'TestStaticPages' object has no attribute 'client'
src/tests/test_BaseURL.py:18: AttributeError
However if I should uncomment the line with client = env_setup.test_client() it works. For whatever reason it cannot seem to grab the setup from the setUP method and keeps erroring out.
Here is how I fixed my issue:
#conftest.py
import os, json, pytest
from ..app import create_app
from flask import Flask
#pytest.fixture
def client():
env_name = os.getenv('FLASK_ENV')
app = create_app(env_name)
client = app.test_client()
return client
I was then able to import the client into my other test class like so.
#test_StaticView.py
import pytest
#pytest.mark.usefixtures("client")
class TestStaticPages:
def test_base_route(self, client):
url = '/'
html1 = b'Welcome to the API. Please visit '
html2 = b'https://example.com to learn more about this app.'
response = client.get(url)
assert response.get_data() == html1 + html2
assert response.status_code == 200

singleton connections pool for aiohttp+aiomysql (python 3.5)

I am playing with aiohttp+aiomysql. I want to share same connection pool instance between request calls.
So i create a global var and preinit it once in corouting call.
My code:
import asyncio
from aiohttp import web
from aiohttp_session import get_session, session_middleware
from aiohttp_session.cookie_storage import EncryptedCookieStorage
from aiohttp_session import SimpleCookieStorage
#from mysql_pool import POOL
from aiomysql import create_pool
M_POOL = None
async def get_pool(loop):
global M_POOL
if M_POOL: return M_POOL
M_POOL = await create_pool(host='127.0.0.1', port=3306, user='user', password='user', db='test', loop=loop)
return M_POOL
async def query(request):
loop = asyncio.get_event_loop()
pool = await get_pool(loop)
print(id(pool))
async with pool.acquire() as conn:
async with conn.cursor() as cur:
await cur.execute("SELECT 42;")
value = await cur.fetchone()
print(value)
return web.Response(body=str.encode(str(value)))
app = web.Application(middlewares=[session_middleware(SimpleCookieStorage())])
app.router.add_route('GET', '/query', query)
web.run_app(app)
Is it convinient way of doing this, or may be something better?
I highly discourage global variable usage.
Please take a look on aiohttp demo for canonical approach.
SiteHandler is a class that implements website views.
But you've given demos for a case where request object in access.
I have the same problem using aiohttp. In my application I've
done to parts of modules:
one is for server functionality, and one for client (crawler).
So in server part it's ok, I can user request.app['dbpool']
But in crawler part I want use db connections to, and
I can't see reasons for another one pool connection creation.

Resources