from futures to asyncio - python-3.x

i am puzzled getting the following to work with asyncio:
the code snippet below is querying a number devices (via snmp) and returns a dictionary, it works fine, but is limited by multiprocessing.cpu_count()
def do_polling(netelement, snmp_comm):
msg = {}
msg.update({
'bgp' : do_lookup_bgp(netelement, snmp_comm),
'iface' : do_lookup_iface(netelement, snmp_comm),
'ifidx' : do_lookup_ifindex(netelement, snmp_comm),
'agg' : do_lookup_agg(netelement, snmp_comm),
})
return msg
def save(netelement, job):
data[netelement] = job.result()
with concurrent.futures.ProcessPoolExecutor(max_workers=multiprocessing.cpu_count()) as executor:
for k,v in INFO['dev'].items():
job = executor.submit(do_polling, k, v['snmp_comm'])
job.add_done_callback(functools.partial(save, k))
so i would like to migrate to the asyncio approach by changing like this:
#asyncio.coroutine
def do_polling(netelement, snmp_comm):
msg = {}
msg['bgp'] = yield from do_lookup_bgp(netelement, snmp_comm)
msg['iface'] = yield from do_lookup_iface(netelement, snmp_comm)
msg['ifidx'] = yield from do_lookup_ifindex(netelement, snmp_comm)
msg['agg'] = yield from do_lookup_agg(netelement, snmp_comm)
#asyncio.coroutine
def schedule(INFO):
for k,v in INFO['dev'].items():
asyncio.async(do_polling(k, v))
asyncio.get_event_loop().run_until_complete(schedule)
but i am getting the following error:
Traceback (most recent call last):
File "/home/app/ip-spotlight/code/ixmac.py", line 60, in <module>
main()
File "/home/app/ip-spotlight/code/ixmac.py", line 16, in main
app.ixmac.initialize.run(INFO)
File "/home/app/ip-spotlight/code/app/ixmac/initialize.py", line 191, in run
asyncio.get_event_loop().run_until_complete(schedule)
File "/usr/lib64/python3.4/asyncio/base_events.py", line 353, in run_until_complete
future = tasks.ensure_future(future, loop=self)
File "/usr/lib64/python3.4/asyncio/tasks.py", line 553, in ensure_future
raise TypeError('A Future, a coroutine or an awaitable is required')
TypeError: A Future, a coroutine or an awaitable is required
could you please advise what i am doing wrong ?

You are not using it as a coroutine. You should change the last line to:
asyncio.get_event_loop().run_until_complete(schedule(the_info_variable))

Related

Is there any way of getting values from keys inside other keys?

(first post sorry if i do this wrong) So I am making a bot (on discord) for me and my friends using discord.py (since python is the easiest code ever) and I've came across this. I need to get values from keys INSIDE OTHER keys. How do I do this?
So I've tried to change res to res.text and res.json and res.content and I could only find the "data" but not "id","name" or "description" which I need.
import discord
from discord.ext.commands import Bot
from discord.ext import commands
import requests, json
import asyncio
Client = discord.Client()
client = commands.Bot(command_prefix='?')
#client.event
async def on_ready():
print('started')
#client.command()
async def findfriends(ctx,userid):
res = requests.get("https://friends.roblox.com/v1/users/"+userid+"/friends")
var = json.loads(res.text)
def a(a):
ID = a['id']
return ID
def b(b):
Name = b['name']
return Name
def c(c):
description = c['description']
return description
data = var['data'] #I can get this working
print(data)
#cv = data['name'] #but this wont work
#id = a(var) #nor this
#name = b(var) #nor this
#desc = c(var) #nor this
#await ctx.send("\nID: " + id + "\nName: " + name + "\nDesc: " + desc) # this is just sending the message
client.run(BOT TOKEN HERE) #yes i did indeed add it but just for the question i removed it
As I said in the code, I can only get "data" working and not id,name or desc. For id name and desc it just throws an error
Ignoring exception in command findfriends:
Traceback (most recent call last):
File "C:\Users\Calculator\PycharmProjects\ryhrthrthrhrebnfbngfbfg\venv\lib\site-packages\discord\ext\commands\core.py", line 79, in wrapped
ret = await coro(*args, **kwargs)
File "C:/Users/Calculator/PycharmProjects/ryhrthrthrhrebnfbngfbfg/a.py", line 277, in findfriends
id = a(var) #nor this
File "C:/Users/Calculator/PycharmProjects/ryhrthrthrhrebnfbngfbfg/a.py", line 266, in a
ID = a['id']
KeyError: 'id'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\Calculator\PycharmProjects\ryhrthrthrhrebnfbngfbfg\venv\lib\site-packages\discord\ext\commands\bot.py", line 863, in invoke
await ctx.command.invoke(ctx)
File "C:\Users\Calculator\PycharmProjects\ryhrthrthrhrebnfbngfbfg\venv\lib\site-packages\discord\ext\commands\core.py", line 728, in invoke
await injected(*ctx.args, **ctx.kwargs)
File "C:\Users\Calculator\PycharmProjects\ryhrthrthrhrebnfbngfbfg\venv\lib\site-packages\discord\ext\commands\core.py", line 88, in wrapped
raise CommandInvokeError(exc) from exc
discord.ext.commands.errors.CommandInvokeError: Command raised an exception: KeyError: 'id'
and
Ignoring exception in command findfriends:
Traceback (most recent call last):
File "C:\Users\Calculator\PycharmProjects\ryhrthrthrhrebnfbngfbfg\venv\lib\site-packages\discord\ext\commands\core.py", line 79, in wrapped
ret = await coro(*args, **kwargs)
File "C:/Users/Calculator/PycharmProjects/ryhrthrthrhrebnfbngfbfg/a.py", line 274, in findfriends
data = var['data']['id'] #I can get this working
TypeError: list indices must be integers or slices, not str
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Users\Calculator\PycharmProjects\ryhrthrthrhrebnfbngfbfg\venv\lib\site-packages\discord\ext\commands\bot.py", line 863, in invoke
await ctx.command.invoke(ctx)
File "C:\Users\Calculator\PycharmProjects\ryhrthrthrhrebnfbngfbfg\venv\lib\site-packages\discord\ext\commands\core.py", line 728, in invoke
await injected(*ctx.args, **ctx.kwargs)
File "C:\Users\Calculator\PycharmProjects\ryhrthrthrhrebnfbngfbfg\venv\lib\site-packages\discord\ext\commands\core.py", line 88, in wrapped
raise CommandInvokeError(exc) from exc
discord.ext.commands.errors.CommandInvokeError: Command raised an exception: TypeError: list indices must be integers or slices, not str
The https://friends.roblox.com/v1/users/<userid>/friends endpoint returns a list of all the friends that the user has, which can be of varying size.
With var = json.loads(res.text) you are loading the response text into a json object, which contains the key data, which you access using data = var['data']. The new data variable now contains a list object, which is why cv = data['name'] fails to work as list objects do not take strings as keys, they are accessed using integers.
You need to iterate over the list to get all information on the friends. The below code goes through the list, pulls information for each item in the list and sends a message of the information once it has gone through all items.
import discord
from discord.ext.commands import Bot
from discord.ext import commands
import requests, json
import asyncio
client = commands.Bot(command_prefix='?')
#client.event
async def on_ready():
print('started')
#client.command()
async def findfriends(ctx,userid):
res = requests.get("https://friends.roblox.com/v1/users/"+userid+"/friends")
var = json.loads(res.text)
data = var['data']
print(data)
friends_msg = 'Friends information:'
for friend in data:
id = friend['id']
name = friend['name']
desc = friend['description']
friends_msg = friends_msg + "\nID: " + id + "\nName: " + name + "\nDesc: " + desc
await ctx.send(friends_msg)
client.run(BOT TOKEN HERE)

Keyboard Interrupting an asyncio.run() raises CancelledError and keeps running the code

I have inspected this SO question on how to gracefully close out the asyncio process. Although, when I perform it on my code:
async def ob_main(product_id: str, freq: int) -> None:
assert freq >= 1, f'The minimum frequency is 1s. Adjust your value: {freq}.'
save_loc = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data', 'ob', product_id)
while True:
close = False
try:
full_save_path = create_save_location(save_loc)
file = open(full_save_path, 'a', encoding='utf-8')
await ob_collector(product_id, file)
await asyncio.sleep(freq)
except KeyboardInterrupt:
close = True
task.cancel()
loop.run_forever()
task.exception()
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
error_msg = repr(traceback.format_exception(exc_type, exc_value, exc_traceback))
print(error_msg)
logger.warning(f'[1]-Error encountered collecting ob data: {error_msg}')
finally:
if close:
loop.close()
cwow()
exit(0)
I get the following traceback printed in terminal:
^C['Traceback (most recent call last):\n', ' File "/anaconda3/lib/python3.7/asyncio/runners.py", line 43, in run\n return loop.run_until_complete(main)\n', ' File "/anaconda3/lib/python3.7/asyncio/base_events.py", line 555, in run_until_complete\n self.run_forever()\n', ' File "/anaconda3/lib/python3.7/asyncio/base_events.py", line 523, in run_forever\n self._run_once()\n', ' File "/anaconda3/lib/python3.7/asyncio/base_events.py", line 1722, in _run_once\n event_list = self._selector.select(timeout)\n', ' File "/anaconda3/lib/python3.7/selectors.py", line 558, in select\n kev_list = self._selector.control(None, max_ev, timeout)\n', 'KeyboardInterrupt\n', '\nDuring handling of the above exception, another exception occurred:\n\n', 'Traceback (most recent call last):\n', ' File "coinbase-collector.py", line 98, in ob_main\n await asyncio.sleep(freq)\n', ' File "/anaconda3/lib/python3.7/asyncio/tasks.py", line 564, in sleep\n return await future\n', 'concurrent.futures._base.CancelledError\n']
and the code keeps running.
task and loop are the variables from the global scope, defined in the __main__:
loop = asyncio.get_event_loop()
task = asyncio.run(ob_main(args.p, 10))
Applying this question's method solves the issue. So in the above case:
try:
loop.run_until_complete(ob_main(args.p, 10))
except KeyboardInterrupt:
cwow()
exit(0)
However, I do not uderstand why that works.

Python - Multiprocessing Pool map returning can't pickle error

I have following code which creates a testrail client and executes testrail's GET_SUITES API call.
I have a function to call the GET_SUITES API and I am passing testrail client & test_rail_project_id as params
I am trying to use multiprocessing to execute over my list of projects to speed up things and I am can't pickle error
My code:
from itertools import product
def get_suites(client, project_id):
try:
path = 'get_suites/{projectid}'.format(projectid=project_id)
test_rail_response = client.send_get(path)
return test_rail_response
except Exception as e:
raise Exception(str(e))
if __name__ == "__main__":
testRailClient = APIClient(TESTRAIL_URL)
pool = Pool(2)
all_project_ids = [100, 200, 300]
data = pool.starmap(get_suites, product([testRailClient], all_project_ids))
Error stack:
Traceback (most recent call last):
File "main.py", line 57, in <module>
data = pool.starmap(testrailapi.get_suites, product([testRailClient], all_project_ids))
File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/pool.py", line 274, in starmap
return self._map_async(func, iterable, starmapstar, chunksize).get()
File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/pool.py", line 644, in get
raise self._value
File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/pool.py", line 424, in _handle_tasks
put(task)
File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "/usr/local/Cellar/python/3.6.5/Frameworks/Python.framework/Versions/3.6/lib/python3.6/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
TypeError: can't pickle SSLContext objects
Any suggestions please?
Thank you
PS: I am using Python3.6
UPDATE:
As suggested I tried removing the API client as a parameter and it worked but I am getting the same error when I have "get_suites" as a method. Please see my updated code below
class TestRailExecution:
def __init__(self, url, username, password):
self.url = url
self.username = username
self.password = password
self.client = APIClient(self.url)
self.client.user = username
self.client.password = password
def get_suites(self, project_id):
try:
path = 'get_suites/{projectid}'.format(projectid=project_id)
test_rail_response = self.client.send_get(path)
return test_rail_response
except Exception as e:
raise Exception(str(e))
if __name__ == "__main__":
testRailClient = TestRailExecution(TESTRAIL_URL, user, password)
pool = Pool(2)
data = pool.map(get_suites, [100, 200, 300])

Celery Workflow Error: TypeError: unsupported operand type(s) for |: 'AsyncResult' and 'AsyncResult'

I am quite novice with celery and its fundamentals, so please forgive my ignorance. I am trying to design a workflow which is as follows:
import os
from celery import Celery, group, chord, chain
class CeleryConfig:
"""
Configuration for Celery
"""
broker_url = os.environ.get('CELERY_BROKER_URL','mongodb://localhost:9001/jobs')
result_backend = os.environ.get('CELERY_RESULT_BACKEND', 'mongodb://localhost:9001/')
celery_mongodb_backend_settings = {
"database": "results",
"taskmeta_collection": "test",
}
enable_utc = True
timezone = "UTC"
celery_imports = ('test',)
task_always_eager = os.environ.get('CELERY_ALWAYS_EAGER', False)
task_eager_propagates = os.environ.get('CELERY_EAGER_PROPAGATES',False)
task_serializer = 'json'
celery_app = Celery("test")
celery_app.config_from_object(CeleryConfig)
#celery_app.task(bind=True)
def dummy_task(self, *args, **kwargs):
return True
#celery_app.task(bind=True)
def a(self, pagination, *args, **kwargs):
return pagination[0] + pagination[1]
#celery_app.task(bind=True)
def b(self, pagination, *args, **kwargs):
return pagination[0] + pagination[1]
#celery_app.task(bind=True)
def workflow(self):
batch_size = 10
flow_a = chord(group(a.s((k,batch_size), max_retries=None) for k in range(5)), dummy_task.s())()
flow_b = chord(group(b.si((k,batch_size), immutable=True, max_retries=None) for k in range(5)), dummy_task.s())()
r = chain(flow_a, flow_b)()
return r
task = workflow.apply_async()
I use celery -A test worker -l info to run the celery worker. When I try to execute the above script I get the following error:
[2018-07-20 22:51:11,221: ERROR/ForkPoolWorker-1] Task test.workflow[aaea266e-bd5a-4414-a42f-83ccb5d16145] raised unexpected: TypeError("unsupported operand type(s) for |: 'AsyncResult' and 'AsyncResult'",)
Traceback (most recent call last):
File "/Users/sohaibfarooqi/projects/code/env/lib/python3.5/site-packages/celery/app/trace.py", line 382, in trace_task
R = retval = fun(*args, **kwargs)
File "/Users/sohaibfarooqi/projects/code/env/lib/python3.5/site-packages/celery/app/trace.py", line 641, in __protected_call__
return self.run(*args, **kwargs)
File "/Users/sohaibfarooqi/projects/code/test.py", line 42, in workflow
r = chain(flow_a, flow_b)()
File "/Users/sohaibfarooqi/projects/code/env/lib/python3.5/site-packages/celery/canvas.py", line 784, in __new__
return reduce(operator.or_, tasks)
TypeError: unsupported operand type(s) for |: 'AsyncResult' and 'AsyncResult'
Although this error is raised, tasks executes fine. Can anyone suggest me what I am doing wrong and point me in right direction?
Celery Version: 4.2.0
Python Version: 3.5.0
I was getting a similar error for until I did something like
chain(task1.si(param1,param2), task2.si(paramX))()
If you use "si" in place of "s" its immutable and the result of the first is not passed to the next task.
more on this http://docs.celeryproject.org/en/latest/userguide/canvas.html#immutability

AttributeError: 'module' object has no attribute 'ensure_future'

Hi i am writing a n/w bound server application using python asyncio which can accept a post request.
In post request i am accepting a symbol parameter
please tell me the best way to deal with n/w bound application.where i am collecting the data from another web api's by sending the post request to them.
Following is the code :
import asyncio
import aiohttp
import json
import logging
# async def fetch_content(url, symbols):
# yield from aiohttp.post(url, symbols=symbols)
#asyncio.coroutine
def fetch_page(writer, url, data):
response = yield from aiohttp.post(url, data=data)
resp = yield from response.read_and_close()
print(resp)
writer.write(resp)
return
#asyncio.coroutine
def process_payload(writer, data, scale):
tasks = []
data = data.split('\r\n\r\n')[1]
data = data.split('\n')
data = [x.split(':') for x in data]
print(data)
data = {x[0]: x[1] for x in data}
print(data)
# data = data[0].split(':')[1]
data = data['symbols']
print(data)
data = data.split(',')
data_len = len(data)
data_first = 0
data_last = scale
url = 'http://xxxxxx.xxxxxx.xxx/xxxx/xxxx'
while data_last < data_len:
tasks.append(asyncio.ensure_future(fetch_page(writer, url,{'symbols': ",".join(data[data_first:data_last])})))
data_first += scale
data_last += scale
tasks.append(asyncio.ensure_future(fetch_page(writer, url,{'symbols': ",".join(data[data_first:data_last])})))
loop.run_until_complete(tasks)
return
#asyncio.coroutine
def process_url(url):
pass
#asyncio.coroutine
def echo_server():
yield from asyncio.start_server(handle_connection, 'xxxxxx.xxxx.xxx', 3000)
#asyncio.coroutine
def handle_connection(reader, writer):
data = yield from reader.read(8192)
if data:
message = data.decode('utf-8')
print(message)
yield from process_payload(writer, message, 400)
writer.write_eof()
writer.close()
#url = 'http://XXXXXXX.xxxxx.xxx/xxxx/xxxxxx/xxx'
data = {'symbols': 'GD-US,14174T10,04523Y10,88739910,03209R10,46071F10,77543110,92847N10'}
loop = asyncio.get_event_loop()
loop.run_until_complete(echo_server())
try:
loop.run_forever()
finally:
loop.close()
But i am receiving the following error:
future: <Task finished coro=<handle_connection() done, defined at fql_server_async_v2.py:53> exception=AttributeError("'module' object has no attribute 'ensure_future'",)>
Traceback (most recent call last):
File "/home/user/anugupta/lib/python3.4/asyncio/tasks.py", line 234, in _step
result = coro.send(value)
File "fql_server_async_v2.py", line 60, in handle_connection
yield from process_payload(writer, message, 400)
File "/home/user/anugupta/lib/python3.4/asyncio/coroutines.py", line 141, in coro
res = func(*args, **kw)
File "fql_server_async_v2.py", line 41, in process_payload
tasks.append(asyncio.ensure_future(fetch_page(writer, url, {'symbols':",".join(data[data_first:data_last])})))
AttributeError: 'module' object has no attribute 'ensure_future'
^CTraceback (most recent call last):
File "fql_server_async_v2.py", line 72, in <module>
loop.run_forever()
File "/home/user/anugupta/lib/python3.4/asyncio/base_events.py", line 236, in run_forever
self._run_once()
File "/home/user/anugupta/lib/python3.4/asyncio/base_events.py", line 1017, in _run_once
event_list = self._selector.select(timeout)
File "/home/user/anugupta/lib/python3.4/selectors.py", line 424, in select
fd_event_list = self._epoll.poll(timeout, max_ev)
ensure_future was added in asyncio 3.4.4, use async for earlier versions.
While async is deprecated now it will be supported in oversable future.

Resources