Store http and websoket updates in parallel - python-3.x

I want receive http updates and WS updates in parallel.
For this targets i use threading, but i still receive only updates from WS. It's looks like
def http_req_update is blocked by infiniti messages from def on_message
Could please someone help me with it ?
import json
import time
from websocket import WebSocketApp
import requests
import threading
class BithumbWebSocketApp(WebSocketApp):
def __init__(self, url, **kwargs):
super(BithumbWebSocketApp, self).__init__(url, **kwargs)
def _request_orderbookdepth(self, channel, event=None, payload=None, auth_required=True):
current_time = int(time.time())
data = {
"time": current_time,
"type": "orderbookdepth",
"symbols": ["BTC_KRW"],
}
data = json.dumps(data)
print('request1', data)
self.send(data)
def subscribe(self, channel, payload=None, auth_required=True):
self._request_orderbookdepth(channel, "subscribe", payload, auth_required)
def unsubscribe(self, channel, payload=None, auth_required=True):
self._request_orderbookdepth(channel, "unsubscribe", payload, auth_required)
def on_open(ws):
print('Connected')
ws.subscribe("wss://pubwss.bithumb.com/pub/ws", "BTC_KRW", False)
msg_lst = []
def on_message(ws, message):
print('message', message)
msg = json.loads(message.encode('utf-8'))
print('msg1: ', msg)
msg_lst.append({
"msg": msg,
"type": msg['type'],
"list": msg['content']['list'],
"datetime": msg['content']['datetime'],
})
lst_to_json = json.dumps(msg_lst)
def ws_update():
app = BithumbWebSocketApp("wss://pubwss.bithumb.com/pub/ws",
on_open=on_open,
on_message=on_message)
app.run_forever(ping_interval=5)
def http_req_update():
currency = 'BTC_KRW' # ALL
url = f"https://api.bithumb.com/public/orderbook/{currency}"
headers = {
"accept": "application/json",
"content-type": "application/json"
}
response = requests.get(url, headers=headers)
print('snapshot_response', response.text)
if __name__ == "__main__":
trd1 = threading.Thread(target=ws_update)
trd2 = threading.Thread(target=http_req_update)
trd1.start()
trd2.start()

Related

count successful and unsuccessful post requests for asynchronous post call/request

I need help in implementing the logic to count number of successful post calls which are asynchronous in nature (status_code=200) as well as failed_calls (status_code != 200)
I am new to coroutines. Would appreciate if someone can suggest a better way of making a post asynchronous call which can be retried, polled for status, and that can emit metrics for successful post requests as well.
Following is my code:
asyncio.get_event_loop().run_in_executor(
None,
self.publish_actual,
event_name,
custom_payload,
event_message_params,
)
which calls publish_actual:
def publish_actual(
self,
event_name: str,
custom_payload={},
event_message_params=[],
):
"""Submits a post request using the request library
:param event_name: name of the event
:type event_name: str
:param key: key for a particular application
:param custom_payload: custom_payload, defaults to {}
:type custom_payload: dict, optional
:param event_message_params: event_message_params, defaults to []
:type event_message_params: list, optional
"""
json_data = {}
path = f"/some/path"
self.request(path, "POST", json=json_data)
which calls following request function
def request(self, api_path, method="GET", **kwargs):
try:
self._validate_configuration()
headers = {}
api_endpoint = self.service_uri.to_url(api_path)
logger.debug(api_endpoint)
if "headers" in kwargs and kwargs["headers"]:
headers.update(kwargs["headers"])
headers = {"Content-Type": "application/json"}
begin = datetime.now()
def build_success_metrics(response, *args, **kwargs):
tags = {
"name": "success_metrics",
"domain": api_endpoint,
"status_code": 200,
}
build_metrics(tags)
def check_for_errors(response, *args, **kwargs):
response.raise_for_status()
response = self.session.request(
method=method,
url=api_endpoint,
headers=headers,
timeout=self.timeout,
hooks={"response": [build_success_metrics, check_for_errors]},
**kwargs,
)
end = datetime.now()
logger.debug(
f"'{method}' request against endpoint '{api_endpoint}' took {round((end - begin).total_seconds() * 1000, 3)} ms"
)
logger.debug(f"response: {response}")
except RequestException as e:
tags = {
"name": "error_metrics",
"domain": api_endpoint,
"exception_class": e.__class__.__name__,
}
build_metrics(tags)
return f"Exception occured: {e}"
Let me know if anything else is required from my end to explain what exactly I have done and what I am trying to achieve.
There is not much await and async in your example so I've just addressed the counting part of your question in general terms in asyncio. asyncio.Queue is good for this because you can separate out the counting from the cause quite simply.
import asyncio
import aiohttp
class Count():
def __init__(self, queue: asyncio.Queue):
self.queue = queue
self.good = 0
self.bad = 0
async def count(self):
while True:
result = await self.queue.get()
if result == 'Exit':
return
if result == 200:
self.good += 1
else:
self.bad += 1
async def request(q: asyncio.Queue):
async with aiohttp.ClientSession() as session:
for _ in range(5): # just poll 30 times in this instance
await asyncio.sleep(0.1)
async with session.get(
'https://httpbin.org/status/200%2C500', ssl=False
) as response:
q.put_nowait(response.status)
q.put_nowait('Exit')
async def main():
q = asyncio.Queue()
cnt = Count(q)
tasks = [cnt.count(), request(q)]
await asyncio.gather(*[asyncio.create_task(t) for t in tasks])
print(cnt.good, cnt.bad)
if __name__ == "__main__":
asyncio.run(main())
Output is random given httpbin response. Should add to 5.
4 1

Using json.dump and json_enccode format to requests.post ,server return Response [500],serve use tornade.escape.json_decode

Using Python 3.8.10 and requests 2.21.0
The service post handle function below:
from tornado.escape import json_decode
def get_payload(self):
return json_decode(self.request.body)
def post(self):
""" acquire device """
data = self.get_payload()
udid = data["udid"]
idle_timeout = data.get('idleTimeout', 600)
email = self.current_user.email
try:
await D(udid).acquire(email, idle_timeout)
self.write_json({
"success": True,
"description": "Device successfully added"
})
except AcquireError as e:
self.set_status(403) # forbidden
self.write_json({
"success": False,
"description": "Device add failed: " + str(e),
})
The Client post fun below:
def post_occupied_device(self,udid = None,idleTimeout = None):
url = self.SERVER_URL+'/api/v1/user/devices'
occupied_dict = {
"udid": udid
}
try:
resp = requests.post(url,json=json.dumps(occupied_dict),headers=self.headers)
except requests.exceptions.RequestException as e:
print(e)
return "failed,network error"
print(resp)
enter code here
The server return:<Response [500]>
Then i change json data to this :
resp = requests.post(url,json=json_encode(occupied_dict),headers=self.headers)
The server return:<Response [500]>
Then i change json data to this :
requests.post(url,json=occupied_dict,headers=self.headers)
it is ok,The server return:<Response [200]>
i checkout the result and type between json_encode and json.dumps:
import json
from tornado.escape import json_encode
occupied_json = {
"udid": "DFEFDGDSGDF"
}
occupied_dict = {
'udid': "DFEFDGDSGDF"
}
req1 = json.dumps(occupied_json)
req2 = json_encode(occupied_json)
print(req1,type(req1))
print(req2,type(req2))
they are the same:
{"udid": "DFEFDGDSGDF"} <class 'str'>
{"udid": "DFEFDGDSGDF"} <class 'str'>
so,why?

Google App Engine request log including severity

I am trying to implement the method in this Stackoverflow answer to get my logs grouped by request and showing the highest severity level of the child logs on the request.
This is what I got so far:
custom_logger.py:
import inspect
import json
import os
from flask import g, request
from google.cloud import logging as gcp_logging
from google.cloud.logging.resource import Resource
LOG_LEVELS = ('DEFAULT', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
resource = Resource(type='gae_app',
labels={'project_id': os.environ['GOOGLE_CLOUD_PROJECT'],
'module_id': os.environ['GAE_SERVICE'],
'version_id': os.environ['GAE_VERSION']})
client = gcp_logging.Client()
custom_logger = client.logger('custom_logger')
request_logger = client.logger('request_logger')
def log_request(response):
trace_id = get_trace_id()
severity = LOG_LEVELS[g.get('log_level', 0)]
request_info = {
'requestMethod': request.method,
'requestUrl': request.url,
'status': response.status_code,
'userAgent': request.headers.get('USER-AGENT'),
'responseSize': response.content_length,
'latency': g.request_duration(),
'remoteIp': request.remote_addr
}
if request.method == 'POST':
payload = request.json() or json.loads(request.data.decode())
else:
payload = {}
request_logger.log_struct(payload,
trace=trace_id,
http_request=request_info,
severity=severity)
def default(text):
_log(text)
def log(text):
return default(text)
def debug(text, *args):
_log(text, *args)
def info(text, *args):
_log(text, *args)
def warning(text, *args):
_log(text, *args)
def warn(text, *args):
return warning(text, *args)
def error(text, *args):
_log(text, *args)
def critical(text, *args):
_log(text, *args)
def _log(text, *args):
trace_id = get_trace_id()
severity = inspect.stack()[1][3].upper()
new_level = LOG_LEVELS.index(severity)
previous_level = g.get('log_level', 0)
g.log_level = max(previous_level, new_level)
message = text % args
custom_logger.log_text(message, resource=resource,
severity=severity, trace=trace_id)
def get_trace_id():
return (f"projects/{os.environ['GOOGLE_CLOUD_PROJECT']}/traces/"
f"{request.headers['X-Cloud-Trace-Context'].split('/')[0]}")
main.py:
import json
import time
from flask import Flask, g, request, make_response
from flask_cors import CORS
import custom_logger as logging
app = Flask(__name__)
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
#app.before_request
def setup_timing():
g.request_start_time = time.time()
g.request_duration = lambda: f'{(time.time() - g.request_start_time):.5f}s'
#app.after_request
def log_request(response):
logging.log_request(response)
return response
#app.route('/', methods=['GET', 'OPTIONS'])
def _test():
logger.debug('DEBUG %s', 'TEST')
logger.info('INFO %s', 'TEST')
logger.warning('WARNING %s', 'TEST')
logger.error('ERROR %s', 'TEST')
logger.critical('CRITICAL %s', 'TEST')
return 'Success'
It seems like the request_logger.log_struct function does not result in any entry being added to the logs. If I add a request_logger.log_text function before the request_logger.log_struct function then this does end up in the logs. Why do I not see the results of request_logger.log_struct in the logs?
After adding the "resource" property to the log_struct call in the log_request function I can see the requests in the "Logs viewer". The logs are now grouped and the highest severity level is added to the parent log entry.
request_logger.log_struct(payload,
resource=resource,
trace=trace_id,
http_request=request_info,
severity=severity)

Postman able to use GET request properly but my program cannot

I have a GET and PUT request built below:
from flask import Flask
from flask_restful import Api, Resource, reqparse
app = Flask(__name__)
api = Api(app)
userStorage =[
{
"id": "1234",
"currentBot": "BestBot"
}
]
class User(Resource):
def get(self, id):
for user in userStorage:
if(id == user["id"]):
return user, 200
return "User not found", 404
def put(self, id):
parser = reqparse.RequestParser()
parser.add_argument("currentBot")
args = parser.parse_args()
for user in userStorage:
if(id == user["id"]):
user["currentBot"] = args["currentBot"]
return user, 200
user = {
"id": id,
"currentBot": args["currentBot"]
}
userStorage.append(user)
return user, 201
def delete(self, id):
global userStorage
userStorage = [user for user in userStorage if user["id"] != id]
return "{} is deleted.".format(id), 200
api.add_resource(User, "/user/<string:id>")
app.run(debug = True, port = 4000)
Postman can properly get a response 200 when I do a simple get request but when I try to do a request through my own program it returns a 404
import requests
payload2Storage = {
"currentBot": "BestBot"
}
headers = {"Content-Type": "application/json"}
params = {
"id": "1234"
}
#response = requests.request("PUT", "http://127.0.0.1:4000/user/", data=payload2Storage, params=params, headers=headers)
response2 = requests.request("GET", "http://127.0.0.1:4000/user/", params=params, headers=headers)
Is there something wrong with my request to get the info from userStorage?
In the client code, changing from 127.0.0.1 to localhost worked for me. Try this:
response2 = requests.request("GET", "http://localhost:4000/user/", params=params, headers=headers)
OR in the server code, bind the server to 127.0.0.1 explicitly via host argument like this:
app.run(debug = True, port = 4000, host='127.0.0.1')
Other error in the code is user["id"] returns a str while id is a int, change the code as below:
def get(self, id):
for user in userStorage:
if(id == int(user["id"])):
return user, 200
return "User not found", 404

Origin http://localhost:8000 is not allowed by Access-Control-Allow-Origin by sending request to flask

I am sending GET request from localhost:8000 to flask :
$(document).ready(function() {
$('#test').click(function() {
$.getJSON("http://localhost:5000/test/", function() {
}).done(function() {
document.location = "http://localhost:5000";
}).fail(function() {
console.log("server not ready.");
});
});
});
and in "server.py" I am handling GET like:
import app
#server.route('/test/',methods = ['GET'])
def test():
print "in test status check"
return jsonify({'status':'OK'})
However I am getting this error:
XMLHttpRequest cannot load http://127.0.0.1:**5000**/test/. Origin http://127.0.0.1:**8000** is not allowed by Access-Control-Allow-Origin.
In flask you can create custom decorator to control Access Origin Policy. This article may help you: http://flask.pocoo.org/snippets/56/
Code from article:
from datetime import timedelta
from flask import make_response, request, current_app
from functools import update_wrapper
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
And here is how you can use it:
#app.route('/my_service')
#crossdomain(origin='*')
def my_service():
return jsonify(foo='cross domain ftw')

Resources