I would simply like to associate responses from aiohttp asynchronous HTTP requests with an identifier. I am using the following code to hit the API and extract contactproperty object which requires an external field (contacid) in order to call its API:
def get_contact_properties(self, office_name, api_key, ids, chunk_size=100, **params):
properties_pages = []
batch = 0
while True:
chunk_ids = [ids[i] for i in range(batch * chunk_size + 1, chunk_size * (1 + batch) + 1)]
urls = ["{}/{}".format(self.__get_base_url(), "contacts/{}/properties?api_key={}".format(contactid, api_key))
for contactid in chunk_ids]
responses_raw = self.get_responses(urls, self.get_office_token(office_name), chunk_size)
try:
responses_json = [json.loads(response_raw) for response_raw in responses_raw]
except Exception as e:
print(e)
valid_responses = self.__get_valid_contact_properties_responses(responses_json)
properties_pages.append(valid_responses)
if len(valid_responses) < chunk_size: # this is how we know there are no more pages with data
break
else:
batch = batch + 1
ids is a list of ids. The problem is that I do not know which response corresponds to which id so that later I can link it to contact entity using contacid. This is my fetch() function so I was wondering how to edit this function to return the contactid along with output.
async def __fetch(self, url, params, session):
async with session.get(url, params=params) as response:
output = await response.read()
return (output)
async def __bound_fetch(self, sem, url, params, session):
# Getter function with semaphore.
async with sem:
output = await self.__fetch(url, params, session)
return output
You can return the url (or whatever key identifies your request) together with the output.
Regarding using the data, I think you should read the response directly as JSON, especially since aiohttp can do this for you automatically.
async def __fetch(self, url, params, session):
async with session.get(url, params=params) as response:
try:
data = await response.json()
except ValueError as exc:
print(exc)
return None
return data
async def __bound_fetch(self, sem, url, params, session):
# Getter function with semaphore.
async with sem:
output = await self.__fetch(url, params, session)
return {"url": url, "data": data}
You did not post the get_responses function but I'm guessing something like this should work:
responses = self.get_responses(urls, self.get_office_token(office_name), chunk_size)
Responses will be a list of {"url": url, data: "data"} (data can be None for invalid responses); however with the code above one invalid request will not affect the others.
Related
I need help in implementing the logic to count number of successful post calls which are asynchronous in nature (status_code=200) as well as failed_calls (status_code != 200)
I am new to coroutines. Would appreciate if someone can suggest a better way of making a post asynchronous call which can be retried, polled for status, and that can emit metrics for successful post requests as well.
Following is my code:
asyncio.get_event_loop().run_in_executor(
None,
self.publish_actual,
event_name,
custom_payload,
event_message_params,
)
which calls publish_actual:
def publish_actual(
self,
event_name: str,
custom_payload={},
event_message_params=[],
):
"""Submits a post request using the request library
:param event_name: name of the event
:type event_name: str
:param key: key for a particular application
:param custom_payload: custom_payload, defaults to {}
:type custom_payload: dict, optional
:param event_message_params: event_message_params, defaults to []
:type event_message_params: list, optional
"""
json_data = {}
path = f"/some/path"
self.request(path, "POST", json=json_data)
which calls following request function
def request(self, api_path, method="GET", **kwargs):
try:
self._validate_configuration()
headers = {}
api_endpoint = self.service_uri.to_url(api_path)
logger.debug(api_endpoint)
if "headers" in kwargs and kwargs["headers"]:
headers.update(kwargs["headers"])
headers = {"Content-Type": "application/json"}
begin = datetime.now()
def build_success_metrics(response, *args, **kwargs):
tags = {
"name": "success_metrics",
"domain": api_endpoint,
"status_code": 200,
}
build_metrics(tags)
def check_for_errors(response, *args, **kwargs):
response.raise_for_status()
response = self.session.request(
method=method,
url=api_endpoint,
headers=headers,
timeout=self.timeout,
hooks={"response": [build_success_metrics, check_for_errors]},
**kwargs,
)
end = datetime.now()
logger.debug(
f"'{method}' request against endpoint '{api_endpoint}' took {round((end - begin).total_seconds() * 1000, 3)} ms"
)
logger.debug(f"response: {response}")
except RequestException as e:
tags = {
"name": "error_metrics",
"domain": api_endpoint,
"exception_class": e.__class__.__name__,
}
build_metrics(tags)
return f"Exception occured: {e}"
Let me know if anything else is required from my end to explain what exactly I have done and what I am trying to achieve.
There is not much await and async in your example so I've just addressed the counting part of your question in general terms in asyncio. asyncio.Queue is good for this because you can separate out the counting from the cause quite simply.
import asyncio
import aiohttp
class Count():
def __init__(self, queue: asyncio.Queue):
self.queue = queue
self.good = 0
self.bad = 0
async def count(self):
while True:
result = await self.queue.get()
if result == 'Exit':
return
if result == 200:
self.good += 1
else:
self.bad += 1
async def request(q: asyncio.Queue):
async with aiohttp.ClientSession() as session:
for _ in range(5): # just poll 30 times in this instance
await asyncio.sleep(0.1)
async with session.get(
'https://httpbin.org/status/200%2C500', ssl=False
) as response:
q.put_nowait(response.status)
q.put_nowait('Exit')
async def main():
q = asyncio.Queue()
cnt = Count(q)
tasks = [cnt.count(), request(q)]
await asyncio.gather(*[asyncio.create_task(t) for t in tasks])
print(cnt.good, cnt.bad)
if __name__ == "__main__":
asyncio.run(main())
Output is random given httpbin response. Should add to 5.
4 1
I have a synchronous code with requests that I am trying to move to using aiohttp.ClientSession.
Indeed, I have a class in which I set a aiohttp.ClientSession with various headers, among those an API key. The above code works for requesting data: (I deleted the init.... everything works but this function)
class Client():
def __init__(self):
loop = asyncio.get_event_loop()
self.session = aiohttp.ClientSession(
loop=loop,
headers=self.get_headers()
)
def send_signed_request(self, url_path, payload={}):
session = requests.session()
session.headers.update(self._get_headers())
query_string = urlencode(payload)
url = url_path + '?' + query_string
params = {'url': url, 'params': {}}
response = session.post(**params)
return response.json()
client = Client()
results = client.send_signed_request(url, params)
From that, with the requests session, I obtain a valid response from the server.
for some reason, the code below, with aiohttp session does not work and I have no idea how to adapt it.
async def send_signed_request(self, url_path, payload={}):
query_string = urlencode(payload)
url = url_path + '?' + query_string
params = {'url': url, 'data': {}}
async with self.session.post(**params) as response:
return await response.json()
does anybody knows my error please?
Question: when I call generateCSVFromIncidentIdsWithArgs(list) twice with 2 different lists lets say "list1" and "list2", though the first list response appears correctly, the second list response has the results of list1 as well. I am not sure which variable to reset before making the second call so that the second list call appears without mixing the first list results.
function definition: function fetches response from a url with provided IDs in list
async def fetch(self, url, incident, session, csv):
async with session.get(url) as response:
self.format_output(incident, await response.read())
async def bound_fetch(self, sem, url, incident, session, csv):
# Getter function with semaphore.
async with sem:
await self.fetch(url, incident, session, csv)
async def run(self, r, csv):
url = self.conversations_url
tasks = []
# create instance of Semaphore
sem = asyncio.Semaphore(1000)
sslcontext = ssl.create_default_context(cafile=certifi.where())
sslcontext.load_cert_chain('certificate.pem',
'plainkey.pem')
# Create client session that will ensure we dont open new connection
# per each request.
async with ClientSession(connector=aiohttp.TCPConnector(ssl=sslcontext)) as session:
for i in r:
# pass Semaphore and session to every GET request
task = asyncio.ensure_future(self.bound_fetch(sem, url + str(i), i, session, csv))
tasks.append(task)
responses = await asyncio.gather(*tasks)
return responses
function call:
def generateCSVFromIncidentIdsWithArgs(list):
incident_list = list
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
future = asyncio.ensure_future(run(incident_list, True))
loop.run_until_complete(future)
generateCSVFromIncidentIdsWithArgs(list1)
generateCSVFromIncidentIdsWithArgs(list2)
I have 2 URLs and 60k+ requests. Basically, I need to post every request to both URLs, then compare their responses, but not to wait for the response to post another request.
I've tried to do it with aiohttp and asyncio
import asyncio
import time
import aiohttp
import os
from aiofile import AIOFile
testURL = ""
prodURL = ""
directoryWithRequests = ''
directoryToWrite = ''
headers = {'content-type': 'application/soap+xml'}
i = 1
async def fetch(session, url, reqeust):
global i
async with session.post(url=url, data=reqeust.encode('utf-8'), headers=headers) as response:
if response.status != 200:
async with AIOFile(directoryToWrite + str(i) + '.xml', 'w') as afp:
await afp.write(reqeust)
i += 1
return await response.text()
async def fetch_all(session, urls, request):
results = await asyncio.gather(*[asyncio.create_task(fetch(session, url, request)) for url in urls])
return results
async def asynchronousRequests(requestBody):
urls = [testURL, prodURL]
global i
with open(requestBody) as my_file:
body = my_file.read()
async with aiohttp.ClientSession() as session:
htmls = await fetch_all(session, urls, body)
# some conditions
async def asynchronous():
try:
start = time.time()
futures = [asynchronousRequests(directoryWithRequests + i) for i in os.listdir(directoryWithRequests)]
for future in asyncio.as_completed(futures):
result = await future
print("Process took: {:.2f} seconds".format(time.time() - start))
except Exception as e:
print(str(e))
if __name__ == '__main__':
try:
# AsyncronTest
ioloop = asyncio.ProactorEventLoop()
ioloop.run_until_complete(asynchronous())
ioloop.close()
if i == 1:
print('Regress is OK')
else:
print('Number of requests to check = {}'.format(i))
except Exception as e:
print(e)
I believe that the code above works, but it creates N futures, where the N equals to the number of request files. This brings to sort of ddos because the server can't response to that number of requests at the same time.
Found suitable solution. Basically it's just 2 async tasks:
tasks = [
postRequest(testURL, client, body),
postRequest(prodURL, client, body)
]
await asyncio.wait(tasks)
It's not the same performance as the code in the question with afortable number of requests, but as least it doesn't ddos the server that much.
I wrote a script that uses a nursery and the asks module to loop through and call an API based upon the loop variables. I get responses but don't know how to return the data like you would with asyncio.
I also have a question on limiting the APIs to 5 per second.
from datetime import datetime
import asks
import time
import trio
asks.init("trio")
s = asks.Session(connections=4)
async def main():
start_time = time.time()
api_key = 'API-KEY'
org_id = 'ORG-ID'
networkIds = ['id1','id2','idn']
url = 'https://api.meraki.com/api/v0/networks/{0}/airMarshal?timespan=3600'
headers = {'X-Cisco-Meraki-API-Key': api_key, 'Content-Type': 'application/json'}
async with trio.open_nursery() as nursery:
for i in networkIds:
nursery.start_soon(fetch, url.format(i), headers)
print("Total time:", time.time() - start_time)
async def fetch(url, headers):
print("Start: ", url)
response = await s.get(url, headers=headers)
print("Finished: ", url, len(response.content), response.status_code)
if __name__ == "__main__":
trio.run(main)
When I run nursery.start_soon(fetch...) , I am printing data within fetch, but how do I return the data? I didn't see anything similar to asyncio.gather(*tasks) function.
Also, I can limit the number of sessions to 1-4, which helps get down below the 5 API per second limit, but was wondering if there was a built in way to ensure that no more than 5 APIs get called in any given second?
Returning data: pass the networkID and a dict to the fetch tasks:
async def main():
…
results = {}
async with trio.open_nursery() as nursery:
for i in networkIds:
nursery.start_soon(fetch, url.format(i), headers, results, i)
## results are available here
async def fetch(url, headers, results, i):
print("Start: ", url)
response = await s.get(url, headers=headers)
print("Finished: ", url, len(response.content), response.status_code)
results[i] = response
Alternately, create a trio.Queue to which you put the results; your main task can then read the results from the queue.
API limit: create a trio.Queue(10) and start a task along these lines:
async def limiter(queue):
while True:
await trio.sleep(0.2)
await queue.put(None)
Pass that queue to fetch, as another argument, and call await limit_queue.get() before each API call.
Based on this answers, you can define the following function:
async def gather(*tasks):
async def collect(index, task, results):
task_func, *task_args = task
results[index] = await task_func(*task_args)
results = {}
async with trio.open_nursery() as nursery:
for index, task in enumerate(tasks):
nursery.start_soon(collect, index, task, results)
return [results[i] for i in range(len(tasks))]
You can then use trio in the exact same way as asyncio by simply patching trio (adding the gather function):
import trio
trio.gather = gather
Here is a practical example:
async def child(x):
print(f"Child sleeping {x}")
await trio.sleep(x)
return 2*x
async def parent():
tasks = [(child, t) for t in range(3)]
return await trio.gather(*tasks)
print("results:", trio.run(parent))
Technically, trio.Queue has been deprecated in trio 0.9. It has been replaced by trio.open_memory_channel.
Short example:
sender, receiver = trio.open_memory_channel(len(networkIds)
async with trio.open_nursery() as nursery:
for i in networkIds:
nursery.start_soon(fetch, sender, url.format(i), headers)
async for value in receiver:
# Do your job here
pass
And in your fetch function you should call async sender.send(value) somewhere.
When I run nursery.start_soon(fetch...) , I am printing data within fetch, but how do I return the data? I didn't see anything similar to asyncio.gather(*tasks) function.
You're asking two different questions, so I'll just answer this one. Matthias already answered your other question.
When you call start_soon(), you are asking Trio to run the task in the background, and then keep going. This is why Trio is able to run fetch() several times concurrently. But because Trio keeps going, there is no way to "return" the result the way a Python function normally would. where would it even return to?
You can use a queue to let fetch() tasks send results to another task for additional processing.
To create a queue:
response_queue = trio.Queue()
When you start your fetch tasks, pass the queue as an argument and send a sentintel to the queue when you're done:
async with trio.open_nursery() as nursery:
for i in networkIds:
nursery.start_soon(fetch, url.format(i), headers)
await response_queue.put(None)
After you download a URL, put the response into the queue:
async def fetch(url, headers, response_queue):
print("Start: ", url)
response = await s.get(url, headers=headers)
# Add responses to queue
await response_queue.put(response)
print("Finished: ", url, len(response.content), response.status_code)
With the changes above, your fetch tasks will put responses into the queue. Now you need to read responses from the queue so you can process them. You might add a new function to do this:
async def process(response_queue):
async for response in response_queue:
if response is None:
break
# Do whatever processing you want here.
You should start this process function as a background task before you start any fetch tasks so that it will process responses as soon as they are received.
Read more in the Synchronizing and Communicating Between Tasks section of the Trio documentation.