How to write pytest for Websocket Client endpoint - python-3.x

I used WebsocketApp to create a python Websocket client(synch) which interacts with Websocket server.
Now, I am interested in integrating the Websocket client in end to end test.
I tried to write a Pytest for Websocket client, but because I am a beginner with Pytest, so I blocked and I do not know how should I continue it.
this is my code:
import json
import websocket
no_message = 0
count = 1
def on_error(ws, error):
print(error)
def on_close(ws, close_status_code, close_msg):
print("############## closed ###############")
def on_message(ws, message):
try:
if message is not None:
print("RESPONSE FROM SERVER.......", message)
global no_message
no_message += 1
if no_message >= count:
ws.close()
if message is None:
ws.close()
except:
print("error")
ws.on_error()
def on_open(ws):
global count
message = {
"session_id": "String",
"destination": "127.0.0.1",
"count": count,
"command": "LIVE_PING",
}
count = message.get("count")
ws.send(json.dumps(message))
if __name__ == "__main__":
websocket.enableTrace(True)
host = "ws://127.0.0.1:8000/"
ws = websocket.WebSocketApp(
host,
on_open=on_open,
on_message=on_message,
on_error=on_error,
on_close=on_close,
)
ws.run_forever()
and this my pytest for:
import json
import unittest.mock as mock
import websocket
class Testws():
def test_on_close(self):
print("Close")
def test_on_error(self):
print('ERROR')
def test_on_message(self, message):
assert message == 'RESPONSE FROM SERVER....... {"session_id":"String","sample":0,"elapsed":15}'
def test_on_open(self, ws):
fake_message = {
"session_id": "String",
"destination": "127.0.0.1",
"count": 2,
"command": "LIVE_PING",
}
ws.send(json.dumps(fake_message))
c = Testws()
host = "ws://127.0.0.1:8000/"
ws = websocket.WebSocketApp(host, on_open=c.test_on_open, on_message=c.test_on_message, )
ws.run_forever()
I tried also using Monkeypatching, but I am confused and do not think it is the right way
import json
import websocket
from wsClient import synch_ws_client as cl
from websocket import WebSocketApp
def test_synch_ws_client(monkeypatch):
def mock_on_message():
print('RESPONSE FROM SERVER....... {"session_id":"String","sample":0,"elapsed":2}')
def mock_on_error():
print('Error')
def mock_on_close():
print('############## closed ###############')
def mock_on_open(ws):
fake_massage = {
"session_id": "String",
"destination": "127.0.0.1",
"count": 1,
"command": "LIVE_PING"
}
ws.send(json.dumps(fake_massage))
monkeypatch.setattr(WebSocketApp, 'on_message', mock_on_message)
monkeypatch.setattr(WebSocketApp, 'on_close', mock_on_close)
monkeypatch.setattr(WebSocketApp, 'on_error', mock_on_error)
monkeypatch.setattr(WebSocketApp, 'on_open', mock_on_open)
assert cl.on_message() == 'RESPONSE FROM SERVER....... {"session_id":"String","sample":0,"elapsed":2}'
I do not know, how can I check for example the test_on_open and test_on_message modules are call successfully.

Related

AWS Lambda Function Issue with Slack Webhook

I'm using the AWS lambda function to send alerts to our slack channel. But, due to some unknown issue, I'm not getting slack alert and not even getting any kind of error message from the AWS lambda function. Logs represent that the function ran successfully without any error but I do not receipt any alert
code:
import json, sys, csv, os
import requests
def lambda_handler(event, context):
def Send2Slack(message):
if __name__ == '__main__':
print('inside slack function')
url = "webhook_URL"
title = (f"New Incoming Message")
slack_data = {
"username": "abc",
"channel" : "xyz",
"attachments": [
{
"color": "#ECB22E",
"fields": [
{
"title": title,
"value": message,
"short": "false",
}
]
}
]
}
byte_length = str(sys.getsizeof(slack_data))
headers = {'Content-Type': "application/json", 'Content-Length': byte_length}
response = requests.post(url, data=json.dumps(slack_data), headers=headers)
if response.status_code != 200:
raise Exception(response.status_code, response.text)
output = "Hello Slack "
Send2Slack(output)
Please let me know where I'm doing wrong and help me fix this issue.
I'm able to answer this issue.
def Send2Slack(message):
if __name__ == '__main__':
Once I removed if __name__ == '__main__': from send2slack function the it worked.
Otherwise, I was not able to get into the function.
Thanks for all your help.

Flask API - create nested json response group by field single table

I have a basic API setup to do a basic Post and Get from a single table. I want to create a nested array though grouping by force_element_type
model.py
from db import db
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy import text as sa_text
class ForceElementModel(db.Model):
__tablename__ = 'force_element'
__table_args__ = {'schema': 'force_element'}
force_element_id = db.Column(UUID(as_uuid=True), primary_key=True, server_default=sa_text("uuid_generate_v4()"))
name = db.Column(db.String(100), nullable=False)
force_element_type = db.Column(db.String(20), nullable=False)
def __init__(self, name, force_element_type):
self.name = name
self.force_element_type = force_element_type
def json(self):
return {'name': self.name, 'force_element_type': self.force_element_type}
#classmethod
def find_by_name(cls, name):
return cls.query.filter_by(name=name).first() # simple TOP 1 select
def save_to_db(self): # Upserting data
db.session.add(self)
db.session.commit() # Balla
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
resource.py
from flask_restful import Resource, reqparse
#from flask_jwt import jwt_required
from models.force_element import ForceElementModel
class ForceElement(Resource):
parser = reqparse.RequestParser() # only allow price changes, no name changes allowed
parser.add_argument('force_element_type', type=str, required=True, help='This field cannot be left blank')
##jwt_required()
def post(self, name):
if ForceElementModel.find_by_name(name):
return {'message': "An Force Element with name '{}' already exists.".format(name)}, 400
data = ForceElement.parser.parse_args()
force_element = ForceElementModel(name, data['force_element_type'])
try:
force_element.save_to_db()
except:
return {"message": "An error occurred inserting the item."}, 500
return force_element.json(), 201
class ForceElementList(Resource):
##jwt_required()
def get(self):
return {'force_elements': [force_element.json() for force_element in ForceElementModel.query.all()]}
class ForceElementType(Resource):
##jwt_required()
def get(self):
The GET endpoint using ForceElementList returns
{
"force_elements": [
{
"name": "San Antonio",
"force_element_type": "ship"
},
{
"name": "Nimitz",
"force_element_type": "ship"
},
{
"name": "Nimitz- Starboard",
"force_element_type": "Crew"
},
{
"name": "Nimitz- Port",
"force_element_type": "Crew"
}
]
}
I don't know how to group by force_element_type and return
[
"ship": [
{
"name": "San Antonio",
"force_element_id": "xxx1"
},
{
"name": "Nimitz",
"force_element_id": "xxx2"
}],
"crew": [
{
"name": "Nimitz- Starboard",
"force_element_id": "yyy1"
},
{
"name": "Nimitz- Port",
"force_element_id": "yyy2"
}
]
]
How do I create this separate andpoint?
OK I got there, here is how I did it. Is there a better way?
Lesson one use an online parser to check the json format this is what I was actually aiming for and the square braket at then start had me scratching my head for a while
{
"ship": [
{
"name": "San Antonio",
"force_element_id": "xxx1"
},
{
"name": "Nimitz",
"force_element_id": "xxx2"
}],
"crew": [
{
"name": "Nimitz- Starboard",
"force_element_id": "yyy1"
},
{
"name": "Nimitz- Port",
"force_element_id": "yyy2"
}]
}
This code creates the correct format for the output
class ForceElementType(Resource):
##jwt_required()
def get(self):
types = {}
force_elements = ForceElementModel.query.order_by(ForceElementModel.force_element_type.desc()).all()
for force_element in force_elements:
nested = {'name': force_element.name, 'force_element_id': str(force_element.force_element_id)}
print(nested)
if not force_element.force_element_type in types:
types[force_element.force_element_type] = []
types[force_element.force_element_type].append(nested)
response = types

moto testing for ecs

I have a python method that lists certain ECS Fargate services based on some tags and some names using boto3 get_paginator.
def list_service_name(environment,
resource_owner_name,
ecs_client):
list_of_service = list()
cluster_name = "my cluster name " + environment
target_string = "-somedummy"
resource_owner_tag = resource_owner_name
service_paginator = ecs_client.get_paginator('list_services')
for page in service_paginator.paginate(cluster=cluster_name,
launchType='FARGATE'):
for service in page['serviceArns']:
response = ecs_client.list_tags_for_resource(resourceArn=service)
for tags in response['tags']:
if tags['key'] == 'ResourceOwner' and \
tags['value'] == resource_owner_tag and \
service.endswith(target_string):
list_of_service.append(service)
return list_of_service
Now I would like to test this using moto.
Hence I have created conftest.py where I have defined all moto mock connections to the services like, ecs. Also, I have created test_main.py file like below where I have created dummy services connected to ECS Fargate. But for some reason, if I try to assert the outcome of the main method in the test file the service lists return empty. Whereas I would like to see test-service-for-successful as the outcome. Is there something I'm missing or pagination is still not available in moto?
from my_code.main import *
#pytest.fixture
def env_name():
return "local"
#pytest.fixture
def cluster_name(env_name):
return "my dummy" + env_name + "cluster_name"
#pytest.fixture
def successful_service_name():
return "test-service-for-successful"
#pytest.fixture
def un_successful_service_name():
return "test-service-for-un-successful"
#pytest.fixture
def resource_owner():
return "dummy_tag"
#pytest.fixture
def test_create_service(ecs_client,
cluster_name,
successful_service_name,
un_successful_service_name,
resource_owner):
_ = ecs_client.create_cluster(clusterName=cluster_name)
_ = ecs_client.register_task_definition(
family="test_ecs_task",
containerDefinitions=[
{
"name": "hello_world",
"image": "docker/hello-world:latest",
"cpu": 1024,
"memory": 400,
"essential": True,
"environment": [
{"name": "environment", "value": "local"}
],
"logConfiguration": {"logDriver": "json-file"},
}
],
)
ecs_client.create_service(
cluster=cluster_name,
serviceName=successful_service_name,
taskDefinition="test_ecs_task",
desiredCount=0,
launchType="FARGATE",
tags=[{"key": "resource_owner", "value": resource_owner}]
)
ecs_client.create_service(
cluster=cluster_name,
serviceName=un_successful_service_name,
taskDefinition="test_ecs_task",
desiredCount=0,
launchType="FARGATE",
tags=[{"key": "resource_owner", "value": resource_owner}]
)
yield
def test_list_service_name(env_name,
resource_owner,
ecs_client):
objects = list_service_name(env_name,
resource_owner,
ecs_client)
# here object is []
# Where as I should see successful_service_name

AttributeError: 'Connection' object has no attribute 'setdefault'|| self.__connection_settings.setdefault("charset", "utf8")

I am getting below error. Unable to find it. please help me to find it. Thanks in advance.
File "/home/ec2-user/my_app/env/lib64/python3.7/site-packages/pymysqlreplication/binlogstream.py", line 178, in init
self.__connection_settings.setdefault("charset", "utf8")
AttributeError: 'Connection' object has no attribute 'setdefault'
import json
import boto3
import pymysql
import socket,array
import pandas as pd
from pymysqlreplication import BinLogStreamReader
from pymysqlreplication.row_event import (
DeleteRowsEvent,
UpdateRowsEvent,
WriteRowsEvent,
)
connection = pymysql.connect(host='127.0.0.1',user='root',password='root')
def main():
kinesis = boto3.client("kinesis",region_name='ap-south-1')
stream = BinLogStreamReader(
connection_settings=connection,
only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
only_schemas=["test"],
log_file='mysql-bin.000003',
log_pos=2566,
server_id=100)
for binlogevent in stream:
for row in binlogevent.rows:
event = {"schema": binlogevent.schema,
"table": binlogevent.table,
"type": type(binlogevent).__name__,
"row": row
}
kinesis.put_record(StreamName="<TestStream>", Data=json.dumps(event), PartitionKey="default",)
print (json.dumps(event))
if __name__ == "__main__":
main()
connection should be a dictionary. For example:
connection = {
"host": "127.0.0.1",
"port": 3306,
"user": "root",
"passwd": "root"
}
An example of a correct usage of BinLogStreamReader is here

Returning large data from RPC (Crossbar + Autobahn|Python)

I am trying to transmit large data through websockets using crossbar/autobahn's RPC. My setup is as follow:
Python 2.7
A crossbar router (version 17.8.1.post1)
A back-end that will try to send a large pandas DataFrame as a json string
A front-end that will want to receive this string
In essence my front-end is trying to call a function that will return a large string.
class MyComponent(ApplicationSession):
#inlineCallbacks
def onJoin(self, details):
print("session ready")
try:
res = yield self.call(u'data.get')
And I get this error:
2017-08-09T16:38:10+0200 session closed with reason wamp.close.transport_lost [WAMP transport was lost without closing the session before]
2017-08-09T16:38:10+0200 Cancelling 1 outstanding requests
2017-08-09T16:38:10+0200 call error: ApplicationError(error=<wamp.close.transport_lost>, args=[u'WAMP transport was lost without closing the session before'], kwargs={}, enc_algo=None)
It seems crossbar is kicking me out because my client session looks dead to him, but I thought that autobahn would chunk my data and that the call would not block the client reactor.
I enabled a few things in my crossbar configuration to improve websocket treatment; thanks to that I was able to transmit larger amount of data but eventually I would hit a limit (config file largely copied and pasted from sam & max).
"options": {
"enable_webstatus": false,
"max_frame_size": 16777216,
"auto_fragment_size": 65536,
"fail_by_drop": true,
"open_handshake_timeout": 2500,
"close_handshake_timeout": 1000,
"auto_ping_interval": 10000,
"auto_ping_timeout": 5000,
"auto_ping_size": 4,
"compression": {
"deflate": {
"request_no_context_takeover": false,
"request_max_window_bits": 11,
"no_context_takeover": false,
"max_window_bits": 11,
"memory_level": 4
}
}
}
Any ideas, takes, things that I am doing wrong?
Thank you,
Client code:
from __future__ import print_function
import pandas as pd
from autobahn.twisted.wamp import ApplicationSession
from twisted.internet.defer import inlineCallbacks
class MyComponent(ApplicationSession):
#inlineCallbacks
def onJoin(self, details):
print("session ready")
try:
res = yield self.call(u'data.get')
print('Got the data')
data = pd.read_json(res)
print("call result: {}".format(data.head()))
print("call result shape: {0}, {1}".format(*data.shape))
except Exception as e:
print("call error: {0}".format(e))
if __name__ == "__main__":
from autobahn.twisted.wamp import ApplicationRunner
runner = ApplicationRunner(url=u"ws://127.0.0.1:8080/ws", realm=u"realm1")
runner.run(MyComponent)
Backend code
from __future__ import absolute_import, division, print_function
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.wamp import ApplicationSession
from twisted.internet import reactor, defer, threads
# Imports
import pandas as pd
def get_data():
"""Returns a DataFrame of stuff as a JSON
:return: str, data as a JSON string
"""
data = pd.DataFrame({
'col1': pd.np.arange(1000000),
'col2': "I'm big",
'col3': 'Like really big',
})
print("call result shape: {0}, {1}".format(*data.shape))
print(data.memory_usage().sum())
print(data.head())
return data.to_json()
class MyBackend(ApplicationSession):
def __init__(self, config):
ApplicationSession.__init__(self, config)
#inlineCallbacks
def onJoin(self, details):
# Register a procedure for remote calling
#inlineCallbacks
def async_daily_price(eqt_list):
res = yield threads.deferToThread(get_data)
defer.returnValue(res)
yield self.register(async_daily_price, u'data.get')
if __name__ == "__main__":
from autobahn.twisted.wamp import ApplicationRunner
runner = ApplicationRunner(url=u"ws://127.0.0.1:8080/ws", realm=u"realm1")
runner.run(MyBackend)
Configuration
{
"version": 2,
"controller": {},
"workers": [
{
"type": "router",
"realms": [
{
"name": "realm1",
"roles": [
{
"name": "anonymous",
"permissions": [
{
"uri": "",
"match": "prefix",
"allow": {
"call": true,
"register": true,
"publish": true,
"subscribe": true
},
"disclose": {
"caller": false,
"publisher": false
},
"cache": true
}
]
}
]
}
],
"transports": [
{
"type": "universal",
"endpoint": {
"type": "tcp",
"port": 8080
},
"rawsocket": {
},
"websocket": {
"ws": {
"type": "websocket",
"options": {
"enable_webstatus": false,
"max_frame_size": 16777216,
"auto_fragment_size": 65536,
"fail_by_drop": true,
"open_handshake_timeout": 2500,
"close_handshake_timeout": 1000,
"auto_ping_interval": 10000,
"auto_ping_timeout": 5000,
"auto_ping_size": 4,
"compression": {
"deflate": {
"request_no_context_takeover": false,
"request_max_window_bits": 11,
"no_context_takeover": false,
"max_window_bits": 11,
"memory_level": 4
}
}
}
}
},
"web": {
"paths": {
"/": {
"type": "static",
}
}
}
}
]
}
]
}
The solution suggested by the crossbar.io group was to use the progressive result option of the RPC.
A full working example is located at https://github.com/crossbario/autobahn-python/tree/master/examples/twisted/wamp/rpc/progress
In my code I had to add a the chunking of the result in the backend
step = 10000
if details.progress and len(res) > step:
for i in xrange(0, len(res), step):
details.progress(res[i:i+step])
else:
defer.returnValue(res)
And to the caller
res = yield self.call(
u'data.get'
options=CallOptions(
on_progress=partial(on_progress, res=res_list)
)
)
Where my function on_progress adds the chunks to a result list
def on_progress(x, res):
res.append(x)
Picking the right chunk size will do the trick.

Resources