dear community
I've tried to execute async def in FastAPI app.
the workflow is crate FastAPI service to recieve from end-user requests and send it to another service such as DB Writer service.
first, I've create async def for send request with aiosonic library
Here's the code
import aiosonic
from aiosonic.timeout import Timeouts
async def db_writer_requests(arrival_input, prediction_out) :
client = aiosonic.HTTPClient()
timeouts_settings = Timeouts(sock_connect = 10,sock_read = 10)
await client.post('http://127.0.0.1:8082/api/motor/writer/test1',
headers = {'Content-Type' : 'application/json'},
json = arrival_input,
timeouts = timeouts_settings)
client.shutdown()
Here's main app
#app.post('/api/motor/test')
async def manager_api(raw_input:arrival_requests) :
depart_json = dict(raw_input)
inc_preds, model_error = await predict_requests(depart_json)
if (inc_preds == None) or (inc_preds['status_code'] != 200) :
return inc_preds if model_error == None else model_error
else :
mlid = uuid.uuid4()
pred_output = model_output(
predict_uuid = str(mlid),
net_premium = str(inc_preds["result"]["data"]["net_premium"]),
car_repair = str(inc_preds["result"]["data"]["car_repair"]),
place_repair = str(inc_preds["result"]["data"]["place_repair"]),
insurer_tier = str(inc_preds["result"]["data"]["insurer_tier"])
)
send_msg = good_response_msg(
status = 'OK',
status_code = 200,
result = data(
data = pred_output
)
)
await db_writer_requests(depart_json,dict(pred_output))
return send_msg
when I've tried to send request.
case 1 I've not to use "await", the program not send request to service and not show any response in endpoint service.
case 2 I've use await it worked normally, but if endpoint service not available the main service shown "Internal Server Error".
Thank you for Advance
Related
I'm using the httpx library and asyncio to try and send about 100K of get requests.
I ran the code and received httpx.ConnectError so I opened wireshark and saw that I was getting a lot of messages saying TCP Retransmission TCP Port numbers reused
when I saw the data in wireshark and the error httpx.ConnectError I added limits = httpx.Limits(max_connections=10000) to limit the amount of active connections to 10,000 but I still get that error.
my code:
import asyncio
import json
import httpx
SOME_URL = "some url"
ANOTHER_URL = "another url"
MAX = 10000
async def search():
guids = [guid for guid in range(688001, 800000)] # 688001 - 838611
timeout = httpx.Timeout(None)
limits = httpx.Limits(max_connections=MAX)
async with httpx.AsyncClient(timeout=timeout, limits=limits) as client:
tasks = [client.get(f"{SOME_URL}{guid}", timeout=timeout) for guid in guids]
blob_list = await asyncio.gather(*tasks) # <---- error from here !!!!!
blob_list = [(res, guid) for res, guid in zip(blob_list, guids)]
guids = [guid for res, guid in blob_list]
blob_list = [json.loads(res.text)["blob_name"] for res, guid in blob_list]
async with httpx.AsyncClient(timeout=timeout, limits=limits) as client:
tasks = [client.get(f"{ANOTHER_URL}{blob}", timeout=timeout) for blob in blob_list]
game_results = await asyncio.gather(*tasks) # <---- error from here !!!!!
game_results = [(res, guid) for res, guid in zip(game_results, guids)]
game_results = [guid for res, guid in game_results]
print(game_results)
def main():
asyncio.run(search())
if __name__ == '__main__':
main()
this is a minimal version of my code there some steps in between the requests that I deleted, but I didn't touch the code that made the trouble, there are comments on the lines that I receive the errors (# <---- error from here !!!!!).
does anyone know how to solve this? or another way to send about 100K of get requests fast?
I managed to solve my problem with the following code:
(this is not the entire code, only the parts needed to send the requests, I have some stuff in between)
import asyncio
from aiohttp import ClientSession
SOME_URL = "some url"
ANOTHER_URL = "another url"
MAX_SIM_CONNS = 50
worker_responses = []
async def fetch(url, session):
async with session.get(url) as response:
return await response.read()
async def fetch_worker(url_queue: asyncio.Queue):
global worker_responses
async with ClientSession() as session:
while True:
url = await url_queue.get()
try:
if url is None:
return
response = await fetch(url, session)
worker_responses.append(response)
finally:
url_queue.task_done()
# calling task_done() is necessary for the url_queue.join() to work correctly
async def fetch_all(base_url: str, range_: range):
url_queue = asyncio.Queue(maxsize=10000)
worker_tasks = []
for i in range(MAX_SIM_CONNS):
wt = asyncio.create_task(fetch_worker(url_queue))
worker_tasks.append(wt)
for i in range_:
await url_queue.put(f"{base_url}{i}")
for i in range(MAX_SIM_CONNS):
# tell the workers that the work is done
await url_queue.put(None)
await url_queue.join()
await asyncio.gather(*worker_tasks)
if __name__ == '__main__':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(fetch_all(SOME_URL, range(680_842, 840_423)))
print(worker_responses)
I used aiohttp instead of httpx and used asyncio.Queue to reduce RAM usage and it worked for me.
I wanted to make a program where I can check the order details of my order at Bithumb Exchange.
So I looked at the docs(https://api.bithumb.com/info/orders) and made it, but the same error code kept coming out, so I don't know what to do.
import time
import math
import base64
import hmac, hashlib
import urllib.parse
import requests
class XCoinAPI:
api_url = "https://api.bithumb.com";
api_key = "";
api_secret = "";
def __init__(self, api_key, api_secret):
self.api_key = api_key;
self.api_secret = api_secret;
def body_callback(self, buf):
self.contents = buf;
def microtime(self, get_as_float = False):
if get_as_float:
return time.time()
else:
return '%f %d' % math.modf(time.time())
def usecTime(self) :
mt = self.microtime(False)
mt_array = mt.split(" ")[:2];
return mt_array[1] + mt_array[0][2:5];
def xcoinApiCall(self, endpoint, rgParams):
# 1. Api-Sign and Api-Nonce information generation.
# 2. Request related information from the Bithumb API server.
#
# - nonce: it is an arbitrary number that may only be used once.
# - api_sign: API signature information created in various combinations values.
endpoint_item_array = {
"endpoint" : endpoint
}
uri_array = dict(endpoint_item_array, **rgParams) # Concatenate the two arrays.
str_data = urllib.parse.urlencode(uri_array)
nonce = self.usecTime()
data = endpoint + chr(1) + str_data + chr(1) + nonce
utf8_data = data.encode('utf-8')
key = self.api_secret
utf8_key = key.encode('utf-8')
h = hmac.new(bytes(utf8_key), utf8_data, hashlib.sha512)
hex_output = h.hexdigest()
utf8_hex_output = hex_output.encode('utf-8')
api_sign = base64.b64encode(utf8_hex_output)
utf8_api_sign = api_sign.decode('utf-8')
headers = {
"Accept": "application/json",
"Content-Type": "application/x-www-form-urlencoded",
"Api-Key": self.api_key,
"Api-Nonce": nonce,
"Api-Sign": utf8_api_sign
}
url = self.api_url + endpoint
r = requests.post(url, headers=headers, data=uri_array)
return r.json()
a = XCoinAPI(api_key="MYKEY1c", api_secret="MYKEY2")
aa= a.xcoinApiCall("/info/orders",{"order_currency":"LN","payment_currency":"BTC"})
print(aa)
{'status': '5100', 'message': 'Bad Request.(Auth Data)'}
Process finished with exit code 0
The error code 5100, bad request keeps coming up(https://apidocs.bithumb.com/docs/api-%EC%A3%BC%EC%9A%94-%EC%97%90%EB%9F%AC-%EC%BD%94%EB%93%9C)
I really don't know which code to modify.
I think it's a parameter problem with XcoinApiCall, but I don't know.
I am currently login with Google with no problems, using Authlib for my Starlette app, but Azure throws this invalid claim "iss" error when doing:
await client.parse_id_token(request, token)
Please, any help will be wonderful. Googling it I didn't found anything.
The complete code snippet its:
async def do_azure_login(request: Request) -> Any:
redirect_uri = request.url_for('authz_azure').replace(' ', '')
azure = OAUTH.create_client('azure')
return await azure.authorize_redirect(request, redirect_uri)
async def authz_azure(request: Request) -> HTMLResponse:
return await authz(request, OAUTH.azure)
async def authz(request: Request, client: OAuth) -> HTMLResponse:
token = await client.authorize_access_token(request)
user = dict(await client.parse_id_token(request, token))
request.session['username'] = user['email']
request.session['first_name'] = user.get('given_name', '')
request.session['last_name'] = user.get('family_name', '')
response = TEMPLATING_ENGINE.TemplateResponse(
name='app.html',
context={
'request': request
}
)
return response
I think the problem may be in using those:
AZURE_CONF_URL = (
'https://login.microsoftonline.com/common/v2.0/.well-known/openid-configuration'
)
AZURE_AUTHZ_URL = (
'https://login.microsoftonline.com/common/oauth2/authorize'
)
Problem should be this "issuer":"https://login.microsoftonline.com/{tenantid}/v2.0" placed in the AZURE_CONF_URL link. I've seen people having this same issue.
I'm still researching. This may be useful.
https://github.com/MicrosoftDocs/azure-docs/issues/38427
https://github.com/authlib/loginpass/issues/65
I try to receive data from two endpoints in same time. But if websocket stop to send messages I won't receive data from request from "https://www.blabla.com". What is the best way for solving this problem?
import asyncio
import aiohttp
URL = 'wss://www.some_web_socket.io'
async def get_some_data(session):
url = "https://www.blabla.com"
async with session.get(url) as response:
data = await response.text()
return data
async def ws_handler(url):
async with aiohttp.ClientSession() as session:
async with session.ws_connect(url) as ws:
msg = await ws.receive()
while True:
some_data_from_get_request = await get_some_data(session)
msg_from_websocket = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
print(stream_data)
print(some_data_from_get_request)
def _main():
asyncio.run(ws_handler(URL))
if __name__ == "__main__":
_main()
This code serializes the return values of HTTP and websocket communication:
while True:
some_data_from_get_request = await get_some_data(session)
msg_from_websocket = await ws.receive()
To be able to detect either of the two coroutines returning, you can use asyncio.wait(..., return_when=asyncio.FIRST_COMPLETED):
http_fut = asyncio.ensure_future(get_some_data(session))
ws_fut = asyncio.ensure_future(ws.receive())
pending = {http_fut, ws_fut}
while pending:
_done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
if http_fut.done():
some_data_from_get_request = http_fut.result()
...
if ws_fut.done():
msg_from_websocket = ws_fut.result()
...
I am using the following python script to test an application that is running on an AWS instance,
import sys
import requests
import logging
import random
from datetime import datetime
import threading
import os
import time
logger = logging.getLogger('Intrudx')
handle = logging.FileHandler('Intrudx.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handle.setFormatter(formatter)
logger.addHandler(handle)
logger.setLevel(logging.INFO)
loop_count = int(sys.argv[1])
sleep_time = int(sys.argv[2])
# CHECKING THE HEARTBEAT
def heartbeat(SessionID, SiteID):
logger.info("Starting heartbeat thread")
try:
heart_url = 'http://ec2-instance-address.com/license/heartbeat'
heart_result = requests.post(heart_url, json={
"SessionID":str(SessionID),
"SiteID" : str(SiteID)
})
if heart_result.status_code is 500:
logger.error("Heartbeat Failed with 500")
return "We Got 500"
response_text = heart_result.json()["ResponseText"]
logger.info("Heartbeat: "+str(response_text))
except Exception as e:
logger.error("Heartbeat Failed"+str(e))
# FINDING THE SERVER IP
def ip(SessionID):
logger.info("Starting get server info thread")
try:
get_server_url = 'http://ec2-instance-address.com/server/getStreamingServer'
get_server_result = requests.post(get_server_url, json={"SessionID": str(SessionID)})
result_code = get_server_result.status_code
if result_code is 500:
logger.error("GetStreamingServerInfo: " + "Failed")
return "We Got 500"
response_text = get_server_result.json()["ResponseText"]
logger.info("GetStreamingServerInfo: " + str(response_text))
except Exception as e:
logger.error("GetStreamingServerInfo: " + str(e))
def main():
for i in range(loop_count):
# LOGIN
try:
login_url = 'http://ec2-instance-address.com/user/login'
login_result = requests.post(login_url, json={
"AccountName": "Account1",
"UserID": "user2",
"UserPassword": "test"
})
result_code = login_result.status_code
if result_code is 500:
logger.error("Login: "+"Failed")
return "We Got 500"
SessionID = login_result.json()["SessionID"]
response_text = login_result.json()["ResponseText"]
logger.info("Login: "+str(response_text)+": "+ str(SessionID))
print(str(SessionID)+str(response_text))
except Exception as e:
result_code = str(e)
logger.error("Login: "+str(e))
# GET NEW SITE
try:
get_new_site_url = 'http://ec2-instance-address.com/license/getNewSite'
get_new_site_result = requests.post(get_new_site_url, json={"SessionID": str(SessionID)})
result_code = get_new_site_result.status_code
if result_code is 500:
logger.error("Login: " + "Failed")
return "We Got 500"
response_text = get_new_site_result.json()["ResponseText"]
site_id = get_new_site_result.json()["NewSiteID"]
logger.info("getNewSite: "+str(response_text)+": "+str(site_id))
except Exception as e:
result_code = str(e)
logger.error("getNewSite"+str(e))
# STARTING HEARTBEAT THREAD
try:
threading.Thread(target=heartbeat(SessionID, site_id), args=(SessionID, site_id,)).start()
except Exception as e:
logger.error("Problem starting thread: "+str(e))
# STARTING GET SERVER INFO THREAD
try:
threading.Thread(target=ip(SessionID), args=(SessionID)).start()
except Exception as e:
logger.error("Problem while starting Get Server Info Thread"+str(e))
This script is using just one user, creating one session/connection with the server to make API calls.
In a similar way, I want to test the application with 50 or 100 different users (With different accounts/credentials) connected to the server making API calls. Like 50 or 100 users are concurrently using the application. So I can ensure that the application is handling 50 users properly.
How can I do this kind of testing with a script?
Update: Most of the routes are hidden, they need #login_required.
I recommend you try Bees With Machine Guns. Its a python script that will launch micro EC2 instances and send many requests from these instances to your application. This will simulate a large surge in traffic for performance testing.
I heard about it from AWS training videos on CBT Nuggets. The instructor was effective using it to trigger auto scaling and load test his configuration.
Good luck.
You could try our little tool k6 also: https://github.com/loadimpact/k6
You script the behaviour of the virtual users using JavaScript, so it is quite easy to get 50 different users logging in with different credentials. Would look something like this (this code is going to need debugging though :)
import http from "k6/http";
let login_url = "http://ec2-instance-address.com/user/login";
let get_new_site_url = "http://ec2-instance-address.com/license/getNewSite";
let credentials = [
{ "account": "Account1", "username": "joe", "password": "secret" },
{ "account": "Account2", "username": "jane", "password": "verysecret" }
];
export default function() {
let session_id = doLogin();
let response = doGetNewSite(session_id);
let response_text = response["ResponseText"];
let new_site_id = response["NewSiteID"];
for (i = 0; i < loop_count; i++) {
// do heartbeat stuff?
}
}
function doLogin() {
let index = Math.floor(Math.random() * credentials.length);
let post_body = {
"AccountName": credentials[index]["account"],
"UserID": credentials[index]["username"],
"UserPassword": credentials[index]["password"]
};
let http_headers = { "Content-Type": "application/json" };
let res = http.post(login_url, JSON.stringify(post_body), { headers: http_headers });
check(res, {
"Response code is 200": (r) => r.status == 200,
"Login successful": (r) => JSON.parse(r.body).hasOwnProperty("SessionID")
});
return JSON.parse(res.body)["SessionID"];
}
function doGetNewSite(session_id) {
let http_headers = { "Content-Type": "application/json" };
let post_body = { "SessionID": session_id };
let res = http.post(get_new_site_url, JSON.strjngify(post_body), { headers: http_headers });
check(res, {
"Status code was 200": (r) => r.status == 200,
"Got response text": (r) => JSON.parse(r.body).hasOwnProperty("ResponseText"),
"Got new site id": (r) => JSON.parse(r.body).hasOwnProperty("NewSiteID")
});
return JSON.parse(res.body);
}