using httpx to send 100K get requests - python-3.x

I'm using the httpx library and asyncio to try and send about 100K of get requests.
I ran the code and received httpx.ConnectError so I opened wireshark and saw that I was getting a lot of messages saying TCP Retransmission TCP Port numbers reused
when I saw the data in wireshark and the error httpx.ConnectError I added limits = httpx.Limits(max_connections=10000) to limit the amount of active connections to 10,000 but I still get that error.
my code:
import asyncio
import json
import httpx
SOME_URL = "some url"
ANOTHER_URL = "another url"
MAX = 10000
async def search():
guids = [guid for guid in range(688001, 800000)] # 688001 - 838611
timeout = httpx.Timeout(None)
limits = httpx.Limits(max_connections=MAX)
async with httpx.AsyncClient(timeout=timeout, limits=limits) as client:
tasks = [client.get(f"{SOME_URL}{guid}", timeout=timeout) for guid in guids]
blob_list = await asyncio.gather(*tasks) # <---- error from here !!!!!
blob_list = [(res, guid) for res, guid in zip(blob_list, guids)]
guids = [guid for res, guid in blob_list]
blob_list = [json.loads(res.text)["blob_name"] for res, guid in blob_list]
async with httpx.AsyncClient(timeout=timeout, limits=limits) as client:
tasks = [client.get(f"{ANOTHER_URL}{blob}", timeout=timeout) for blob in blob_list]
game_results = await asyncio.gather(*tasks) # <---- error from here !!!!!
game_results = [(res, guid) for res, guid in zip(game_results, guids)]
game_results = [guid for res, guid in game_results]
print(game_results)
def main():
asyncio.run(search())
if __name__ == '__main__':
main()
this is a minimal version of my code there some steps in between the requests that I deleted, but I didn't touch the code that made the trouble, there are comments on the lines that I receive the errors (# <---- error from here !!!!!).
does anyone know how to solve this? or another way to send about 100K of get requests fast?

I managed to solve my problem with the following code:
(this is not the entire code, only the parts needed to send the requests, I have some stuff in between)
import asyncio
from aiohttp import ClientSession
SOME_URL = "some url"
ANOTHER_URL = "another url"
MAX_SIM_CONNS = 50
worker_responses = []
async def fetch(url, session):
async with session.get(url) as response:
return await response.read()
async def fetch_worker(url_queue: asyncio.Queue):
global worker_responses
async with ClientSession() as session:
while True:
url = await url_queue.get()
try:
if url is None:
return
response = await fetch(url, session)
worker_responses.append(response)
finally:
url_queue.task_done()
# calling task_done() is necessary for the url_queue.join() to work correctly
async def fetch_all(base_url: str, range_: range):
url_queue = asyncio.Queue(maxsize=10000)
worker_tasks = []
for i in range(MAX_SIM_CONNS):
wt = asyncio.create_task(fetch_worker(url_queue))
worker_tasks.append(wt)
for i in range_:
await url_queue.put(f"{base_url}{i}")
for i in range(MAX_SIM_CONNS):
# tell the workers that the work is done
await url_queue.put(None)
await url_queue.join()
await asyncio.gather(*worker_tasks)
if __name__ == '__main__':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
asyncio.run(fetch_all(SOME_URL, range(680_842, 840_423)))
print(worker_responses)
I used aiohttp instead of httpx and used asyncio.Queue to reduce RAM usage and it worked for me.

Related

How to post group of requests to 2 urls with aiohttp

I have 2 URLs and 60k+ requests. Basically, I need to post every request to both URLs, then compare their responses, but not to wait for the response to post another request.
I've tried to do it with aiohttp and asyncio
import asyncio
import time
import aiohttp
import os
from aiofile import AIOFile
testURL = ""
prodURL = ""
directoryWithRequests = ''
directoryToWrite = ''
headers = {'content-type': 'application/soap+xml'}
i = 1
async def fetch(session, url, reqeust):
global i
async with session.post(url=url, data=reqeust.encode('utf-8'), headers=headers) as response:
if response.status != 200:
async with AIOFile(directoryToWrite + str(i) + '.xml', 'w') as afp:
await afp.write(reqeust)
i += 1
return await response.text()
async def fetch_all(session, urls, request):
results = await asyncio.gather(*[asyncio.create_task(fetch(session, url, request)) for url in urls])
return results
async def asynchronousRequests(requestBody):
urls = [testURL, prodURL]
global i
with open(requestBody) as my_file:
body = my_file.read()
async with aiohttp.ClientSession() as session:
htmls = await fetch_all(session, urls, body)
# some conditions
async def asynchronous():
try:
start = time.time()
futures = [asynchronousRequests(directoryWithRequests + i) for i in os.listdir(directoryWithRequests)]
for future in asyncio.as_completed(futures):
result = await future
print("Process took: {:.2f} seconds".format(time.time() - start))
except Exception as e:
print(str(e))
if __name__ == '__main__':
try:
# AsyncronTest
ioloop = asyncio.ProactorEventLoop()
ioloop.run_until_complete(asynchronous())
ioloop.close()
if i == 1:
print('Regress is OK')
else:
print('Number of requests to check = {}'.format(i))
except Exception as e:
print(e)
I believe that the code above works, but it creates N futures, where the N equals to the number of request files. This brings to sort of ddos because the server can't response to that number of requests at the same time.
Found suitable solution. Basically it's just 2 async tasks:
tasks = [
postRequest(testURL, client, body),
postRequest(prodURL, client, body)
]
await asyncio.wait(tasks)
It's not the same performance as the code in the question with afortable number of requests, but as least it doesn't ddos the server that much.

Handling ensure_future and its missing tasks

I have a streaming application that almost continuously takes the data given as input and sends an HTTP request using that value and does something with the returned value.
Obviously to speed things up I've used asyncio and aiohttp libraries in Python 3.7 to get the best performance, but it becomes hard to debug given how fast the data moves.
This is what my code looks like
'''
Gets the final requests
'''
async def apiRequest(info, url, session, reqType, post_data=''):
if reqType:
async with session.post(url, data = post_data) as response:
info['response'] = await response.text()
else:
async with session.get(url+post_data) as response:
info['response'] = await response.text()
logger.debug(info)
return info
'''
Loops through the batches and sends it for request
'''
async def main(data, listOfData):
tasks = []
async with ClientSession() as session:
for reqData in listOfData:
try:
task = asyncio.ensure_future(apiRequest(**reqData))
tasks.append(task)
except Exception as e:
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
responses = await asyncio.gather(*tasks)
return responses #list of APIResponses
'''
Streams data in and prepares batches to send for requests
'''
async def Kconsumer(data, loop, batchsize=100):
consumer = AIOKafkaConsumer(**KafkaConfigs)
await consumer.start()
dataPoints = []
async for msg in consumer:
try:
sys.stdout.flush()
consumedMsg = loads(msg.value.decode('utf-8'))
if consumedMsg['tid']:
dataPoints.append(loads(msg.value.decode('utf-8')))
if len(dataPoints)==batchsize or time.time() - startTime>5:
'''
#1: The task below goes and sends HTTP GET requests in bulk using aiohttp
'''
task = asyncio.ensure_future(getRequests(data, dataPoints))
res = await asyncio.gather(*[task])
if task.done():
outputs = []
'''
#2: Does some ETL on the returned values
'''
ids = await asyncio.gather(*[doSomething(**{'tid':x['tid'],
'cid':x['cid'], 'tn':x['tn'],
'id':x['id'], 'ix':x['ix'],
'ac':x['ac'], 'output':to_dict(xmltodict.parse(x['response'],encoding='utf-8')),
'loop':loop, 'option':1}) for x in res[0]])
simplySaveDataIntoDataBase(id) # This is where I see some missing data in the database
dataPoints = []
except Exception as e:
logger.error(e)
logger.error(traceback.format_exc())
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
logger.error(str(exc_type) +' '+ str(fname) +' '+ str(exc_tb.tb_lineno))
if __name__ == '__main__':
loop = asyncio.get_event_loop()
asyncio.ensure_future(Kconsumer(data, loop, batchsize=100))
loop.run_forever()
Does the ensure_future need to be awaited ?
How does aiohttp handle requests that come a little later than the others? Shouldn't it hold the whole batch back instead of forgetting about it altoghter?
Does the ensure_future need to be awaited ?
Yes, and your code is doing that already. await asyncio.gather(*tasks) awaits the provided tasks and returns their results in the same order.
Note that await asyncio.gather(*[task]) doesn't make sense, because it is equivalent to await asyncio.gather(task), which is again equivalent to await task. In other words, when you need the result of getRequests(data, dataPoints), you can write res = await getRequests(data, dataPoints) without the ceremony of first calling ensure_future() and then calling gather().
In fact, you almost never need to call ensure_future yourself:
if you need to await multiple tasks, you can pass coroutine objects directly to gather, e.g. gather(coroutine1(), coroutine2()).
if you need to spawn a background task, you can call asyncio.create_task(coroutine(...))
How does aiohttp handle requests that come a little later than the others? Shouldn't it hold the whole batch back instead of forgetting about it altoghter?
If you use gather, all requests must finish before any of them return. (That is not aiohttp policy, it's how gather works.) If you need to implement a timeout, you can use asyncio.wait_for or similar.

aiohttp and asyncio how to get response from http request and websocket in concurrent way?

I try to receive data from two endpoints in same time. But if websocket stop to send messages I won't receive data from request from "https://www.blabla.com". What is the best way for solving this problem?
import asyncio
import aiohttp
URL = 'wss://www.some_web_socket.io'
async def get_some_data(session):
url = "https://www.blabla.com"
async with session.get(url) as response:
data = await response.text()
return data
async def ws_handler(url):
async with aiohttp.ClientSession() as session:
async with session.ws_connect(url) as ws:
msg = await ws.receive()
while True:
some_data_from_get_request = await get_some_data(session)
msg_from_websocket = await ws.receive()
if msg.type == aiohttp.WSMsgType.TEXT:
print(stream_data)
print(some_data_from_get_request)
def _main():
asyncio.run(ws_handler(URL))
if __name__ == "__main__":
_main()
This code serializes the return values of HTTP and websocket communication:
while True:
some_data_from_get_request = await get_some_data(session)
msg_from_websocket = await ws.receive()
To be able to detect either of the two coroutines returning, you can use asyncio.wait(..., return_when=asyncio.FIRST_COMPLETED):
http_fut = asyncio.ensure_future(get_some_data(session))
ws_fut = asyncio.ensure_future(ws.receive())
pending = {http_fut, ws_fut}
while pending:
_done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
if http_fut.done():
some_data_from_get_request = http_fut.result()
...
if ws_fut.done():
msg_from_websocket = ws_fut.result()
...

Fetching multiple urls with aiohttp in python

In a previous question, a user suggested the following approach for fetching multiple urls (API calls) with aiohttp:
import asyncio
import aiohttp
url_list = ['https://api.pushshift.io/reddit/search/comment/?q=Nestle&size=30&after=1530396000&before=1530436000', 'https://api.pushshift.io/reddit/search/comment/?q=Nestle&size=30&after=1530436000&before=1530476000']
async def fetch(session, url):
async with session.get(url) as response:
return await response.json()['data']
async def fetch_all(session, urls, loop):
results = await asyncio.gather(*[loop.create_task(fetch(session, url)) for url in urls], return_exceptions= True)
return results
if __name__=='__main__':
loop = asyncio.get_event_loop()
urls = url_list
with aiohttp.ClientSession(loop=loop) as session:
htmls = loop.run_until_complete(fetch_all(session, urls, loop))
print(htmls)
However, this results in only returning Attribute errors:
[AttributeError('__aexit__',), AttributeError('__aexit__',)]
(which I enabled, otherwhise it would just break). I really hope there is somebody here, who can help with this, it is still kind of hard to find resources for asyncio etc. The returned data is in json format. In the end I would like to put all json dicts in a list.
Working example:
import asyncio
import aiohttp
import ssl
url_list = ['https://api.pushshift.io/reddit/search/comment/?q=Nestle&size=30&after=1530396000&before=1530436000',
'https://api.pushshift.io/reddit/search/comment/?q=Nestle&size=30&after=1530436000&before=1530476000']
async def fetch(session, url):
async with session.get(url, ssl=ssl.SSLContext()) as response:
return await response.json()
async def fetch_all(urls, loop):
async with aiohttp.ClientSession(loop=loop) as session:
results = await asyncio.gather(*[fetch(session, url) for url in urls], return_exceptions=True)
return results
if __name__ == '__main__':
loop = asyncio.get_event_loop()
urls = url_list
htmls = loop.run_until_complete(fetch_all(urls, loop))
print(htmls)

Getting a file without saving it aiohttp discord.py

So i have a command where it sends whatever the user said after the command to a website api and sends the file the site generates. However i'm changing over to aiohttp as it doesn't block like the standered requests functions
This is how i do it with normal requests and it works fine:
elif (data[0].lower() == ">signgirl"):
await bot.send_typing(message.channel)
tmp = message.content.replace(">signgirl", "")
m = hashlib.md5()
m.update(tmp.encode('utf-8'))
print(tmp, m.hexdigest())
r = requests.post("http://localhost/sign.php", stream=True, data={'text': tmp})
if (r.status_code() == 200):
await bot.send_file(destination=message.channel, filename=str(m.hexdigest()+".png"), fp=r.raw)
However when i try with aiohttp i have no idea how to actually get the raw file data..
So i made this function to get it. but it doesn't let me return an image and i cannot check the http status code without it causing an error.
async def post_data2(url, payload):
async with aiohttp.ClientSession() as session2:
async with session2.post(url, data=payload) as response2:
output = {}
output['data'] = await Image.open(BytesIO(response2.read()))
output['status'] = 200 #await str(response2.status()) #Why is this object not callable?
return output
How else could i do this? Is this possible? aiohttp doesn't seem as easy to understand.
Mister Day "V" Own from the discord.py discord server sent a perfect example of getting and sending the data
async with aiohttp.ClientSession() as session:
# or use a session you already have
async with session.get("http://example.com") as resp:
buffer = io.BytesIO(await resp.read())
# buffer is a file-like
await client.send_file(channel, fp=buffer, filename="whatever")

Resources