Python 3.10.6
ManagerTask() - responsible of executing Task() methods .
Note , Task's method are actually are Celery tasks (async ).
I'd like to add the option to track those tasks execution inside ManagerTask class.
I manage to get it work but as it my first asyncio I'm not sure i'm doing it right ,
(I'm aware of Flower).
Second, at current running single Main() which orchestrating executing Task() ,
In real world need to expand it to support executing multiple sessions of Main() in parallel.
class ManagerTask:
def __init__(self ,id:int ) -> None:
self.id = id
self.tasks: List["Task"] = []
self.executed_tasks: List["Task"] = []
self.state = State.SCHEDULED
def load_tasks(self , configs: List[Dict]):
# code for loading tasks
async def run(self):
""" Execute all tasks and start monitor results"""
execute_task = asyncio.create_task(self.task_exec())
monitor_progresss = asyncio.create_task(self.monitor_progress())
await execute_task
await monitor_progresss
async def monitor_progress(self):
""" If one task Failed - Main.state failed if all success mark Main.state -success"""
import time
failure_flag = False
count_success = 0
i = 0
while True:
state = self.executed_tasks[i].task_run.state
if state == 'SUCCESS':
count_success += 1
i += 1
if state == 'FAILURE':
failure_flag = True
i += 1
print(f'Rule_ID:{self.executed_tasks[i].rule_id} \
\nCelery_UUID:{self.executed_tasks[i].task_celery_id} \
\nstatus - {self.executed_tasks[i].task_run.state}')
# all tasks proccessed (either failed/success)
if i == len(self.executed_tasks) -1:
if failure_flag:
self.state = State.FAILED
elif count_success == len(self.executed_tasks) :
self.state = State.FINISHED
break;
# otherwise wait
await asyncio.sleep(3)
async def task_exec(self):
for task in self.starting_tasks:
task.run()
client (execute the app):
...code
cust = MainTask(id=1)
cust.load_tasks(configs=rules ,db_connections=db_connections)
asyncio.run(cust.run())
print("MainTask State:" + cust.state)
Example of output:
Rule_ID:4
Celery_UUID:fecde27c-b58a-43cd-9498-3478404c248b
status - FAILURE
....
Rule_ID:6
Celery_UUID:85df9bba-3d75-4b00-a533-a81cd3f6afb3
status - SUCCESS
MainTask State:Failed
1.Is that is proper way executing asyncio ?
2.for running multiple MainTask , how I should do it? Thread/Asyncio?
As this program executing all task using celery I think i should also run in asyncio but not sure .
second , would be thankful for guidance if this is the right approach
async def exec_id(id):
cust = MainTask(id=id)
cust.load_tasks(configs=rules ,...)
await cust.run()
async def main():
ids = [111,222,333]
for id in ids:
await exec_id(id)
asyncio.run(main())
Related
I am using python multicall library (https://github.com/banteg/multicall.py) to get the ERC20 balances with multiple wallet addresses at once with multiprocessing.
Once the process starts multicall returns the result in less than 1 sec, but once this process continues & run for hours it starts giving results in more than 1 min sometime it takes up to 300 secs too.
Can anyone answer the reason behind the latency in response by time.
Below is the code sample:
block_number = 11374651
GET_BALANCE_FN = "balanceOf(address)(uint256)"
def call_back_obj(success, value):
"""
Callback to process results from multicall for a function.
If call fails returns False (if changed to string throws error)
"""
if success is True and type(value) == bytes:
return value.decode("utf-8")
elif success is True:
return value
else:
return False
def get_instance():
web3_instance = Web3(
Web3.HTTPProvider(
node_provider,
request_kwargs={"timeout": 10},
)
)
web3_instance.middleware_onion.inject(geth_poa_middleware, layer=0)
return web3_instance
w3 = get_instance()
def token_balance_handler(addresses, block_number=None):
calls = []
for address_map in addresses:
contract_addr = address_map.get("tokenAddress")
wallet_addr = address_map.get("walletAddress")
calls.append(
Call(
contract_addr,
[GET_BALANCE_FN, (wallet_addr)],
[[f"{wallet_addr}-{contract_addr}", call_back_obj]],
)
)
return Multicall(
calls, _w3=w3, block_id=block_number, require_success=False
)
print(token_balance_handler(addresses, block_number)())
I am using the following function to implement a program that changes its behavior depending on the IP of the connected PC.
There is a problem with this function that if something tries to login and fails, it may get the IP of the failed one.
And now that we've encountered that possibility, the program is broken.
What edits do I need to make to make this function behave as expected?
import psutil
def get_ip(port=3389):
ip = ""
for x in psutil.net_connections():
if x.status == "ESTABLISHED" and x.laddr.port == port:
ip = x.raddr.ip
break
I changed the function based on Bijay Regmi's comment. Thank you. wmi was difficult for me, so I used win32evtlog to read it out little by little. I am working on improving readability and finding bugs little by little.
def systime(xml):
return datetime.fromisoformat(xml.find(f'{ns}System/{ns}TimeCreated').get('SystemTime')[:-2] + "+00:00")
def last_event(handle,
event_id,
condition: Callable[['Event'], bool] = None) -> Optional['Event']:
now = datetime.now(tz=timezone.utc)
while True:
events = win32evtlog.EvtNext(handle, 20)
if not events:
break
for event in events:
xml_content = win32evtlog.EvtRender(event, win32evtlog.EvtRenderEventXml)
obj = Event(ET.fromstring(xml_content))
if obj.EventID == event_id:
if obj.SystemTime + timedelta(minutes=5) < now:
return None
if condition and not condition(obj):
continue
return obj
class Event:
def __init__(self, xml: ET.Element):
self.EventID = xml and xml.find(f'{ns}System/{ns}EventID').text
self.SystemTime = xml and systime(xml)
self.xml = xml
if self.EventID == '24':
self.IpAddress = xml.find(f'{ns}UserData/{{Event_NS}}EventXML/{{Event_NS}}Address').text
elif self.EventID == '4624':
self.IpAddress = xml.find(f'{ns}EventData/{ns}Data[#Name="IpAddress"]').text
else:
self.IpAddress = None
The problem I am having is that:
To start an async function in the background I need an asycio event
loop.
This event loop usualy exists in the main thread, and when started,
blocks the exeuction of that thread (i.e lines of code after starting
the event loop aren't run untill the event loop is cancelled).
However, ROS2 has it's own event loop (executor) that also usually runs in the main thread
and blocks execution. This means it is difficult to have both event loops running
My attempted sollution was to start the asyncio event loop in a seperate thread. This is started in the Node constructor, and stops after the Node is deconstructed.
This looks like this:
class IncrementPercentDoneServiceNode(Node):
def __create_task(self, f: Awaitable):
self.__task = self.__loop.create_task(f)
def __init__(self):
super().__init__('increment_percent_done_service_node')
self.__loop = asyncio.new_event_loop()
self.__task: Optional[Task] = None
self.__thread = threading.Thread(target=self.__loop.run_forever)
self.__thread.start()
self.done = False
self.create_service(Trigger, 'start_incrementing',
callback=lambda request, responce : (
self.get_logger().info("Starting service"),
self.__loop.call_soon_threadsafe(self.__create_task, self.__increment_percent_complete()),
TriggerResponse(success=True, message='')
)[-1]
)
def __del__(self):
print("stopping loop")
self.done = True
if self.__task is not None:
self.__task.cancel()
self.__loop.stop()
self.__thread.join()
async def __increment_percent_complete(self):
timeout_start = time.time()
duration = 5
while time.time() < (timeout_start + duration):
time_since_start = time.time() - timeout_start
percent_complete = (time_since_start / duration) * 100.0
self.get_logger().info("Percent complete: {}%".format(percent_complete))
await asyncio.sleep(0.5)
self.get_logger().info("leaving async function")
self.done = True
if __name__ == '__main__':
rclpy.init()
test = IncrementPercentDoneServiceNode()
e = MultiThreadedExecutor()
e.add_node(test)
e.spin()
Is this a sensible way to do it? Is there a better way? How would I cancel the start_incrementing service with another service? (I know that this is what actions are for, but I cannot use them in this instance).
I'm trying to simulate processing in threads by using asyncio.Queue. However, I'm struggling to turn a threaded processing simulation part to asynchronous loop.
So what my script does in brief: 1) receive processing requests over a websocket, 2) assign the request to the requested queue (which simulates a thread), 3) runs processing queues, which put responses into one shared response queue, and then 4) the websocket takes out the responses from the shared queue one by one and sends them out to the server.
Simplified version of my code:
# Initialize empty processing queues for the number of threads I want to simulate
processing_queues = [asyncio.Queue() for i in range(n_queues)
# Initialize shared response queue
response_q = asyncio.Queue()
# Set up a websocket context manager
async with websockets.connect(f"ws://{host}:{port}") as websocket:
while True:
# Read incoming requests
message = await websocket.recv()
# Parse mssg -> get request data and on which thread / queue to process it
request_data, queue_no = parse_message(message)
# Put the request data to the requested queue (imitating thread)
await processing_queues[queue_no].put(request_data)
# THIS IS WHERE I THINK ASYNCHRONY BREAKS (AND I NEED HELP)
# Do processing in each imitated processing thread
for proc_q in processing_queues:
if not proc_q.empty():
request_data = await proc_q.get()
# do the processing
response = process_data(request_data)
# Add the response to the response queue
await response_q.put(response)
# Send responses back to the server
if not response_q.empty():
response_data = response_q.get()
await websocket.send(response_data)
From the output of the script, I deduced that 1) I seem to receive requests and send out responses asynchronously; 2) processing in queues does not happen asynchronously. Correct me if I'm wrong.
I was reading about create_task() in asyncio. Maybe that could be a way to solve my problem?
I'm open to any solution (even hacky).
P.S. I would just use threads from threading library, but I need asyncio for websockets library.
P.P.S. Threaded version of my idea.
class ProcessingImitationThread(threading.Thread):
def __init__(self, thread_id, request_q, response_q):
threading.Thread.__init__(self)
self.thread_id = thread_id
self.request_q = request_q
self.response_q = response_q
def run(self):
while True:
try:
(x, request_id) = self.request_q.get()
except Empty:
time.sleep(0.2)
else:
if x == -1:
# EXIT CONDITION
break
else:
sleep_time_for_x = count_imitation(x, state)
time.sleep(sleep_time_for_x)
self.response_q.put(request_id)
print(f"request {request_id} executed")
# Set up
processing_qs = [queue.Queue() for i in range(n_processes_simulated)]
response_q = queue.Queue()
processing_thread_handlers = []
for i in n_processes_simulated:
# create thread
t = ProcessingImitationThread(i, processing_qs[i], response_q)
processing_thread_handlers.append(t)
# Main loop
while True:
# receive requests and assign to requested queue (so that thread picks up)
if new_request:
requested_process, x, request_id = parse(new_request)
processing_qs[requested_process].put((x, request_id))
...
# if there are any new responses, sent them out to the server
if response_q.q_size() > 0:
request_id = response_q.get()
# Networking: send to server
...
# Close down
...
EDIT: fixes small typos.
Your intuition that you need create_task is correct, as create_task is the closest async equivalent of Thread.start: it creates a task that runs in parallel (in an async sense) to whatever you are doing now.
You need separate coroutines that drain the respective queues running in parallel; something like this:
async def main():
processing_qs = [asyncio.Queue() for i in range(n_queues)]
response_q = asyncio.Queue()
async with websockets.connect(f"ws://{host}:{port}") as websocket:
processing_tasks = [
asyncio.create_task(processing(processing_q, response_q))
for processing_q in processing_qs
]
response_task = asyncio.create_task(
send_responses(websocket, response_q))
while True:
message = await websocket.recv()
requested_process, x, request_id = parse(message)
await processing_qs[requested_process].put((x, request_id))
async def processing(processing_q, response_q):
while True:
x, request_id = await processing_q.get()
... create response ...
await response_q.put(response)
async def send_responses(websocket, response_q):
while True:
msg = await response_q.get()
await websocket.send(msg)
I am trying to set up a bot in discord that works on timers. One of them will work if one person types in the '!challenge' command, in which the bot will wait for 60 seconds to see if anyone types the '!accept' command in response. If it does not, it states 'Challenge was ignored. Resetting.' Or something along those lines.
Another timer actually runs during the game itself, and is an hour long. However, the hour resets after a command has been put in. If the games is idle for an hour (One player DCs or quits) the bot resets the game itself.
I had this working with threading:
# Initiate a challenge to the room. Opponent is whoever uses the !accept command. This command should be
# unavailable for use the moment someone !accepts, to ensure no one trolls during a fight.
if message[:10] == "!challenge":
msg, opponent, pOneInfo, new_game, bTimer, playerOne = message_10_challenge(channel, charFolder, message, unspoiledArena, character, self.game)
if opponent is not "":
self.opponent = opponent
if pOneInfo is not None:
self.pOneInfo = pOneInfo
self.pOneTotalHP = self.pOneInfo['thp']
self.pOneCurrentHP = self.pOneInfo['thp']
self.pOneLevel = self.pOneInfo['level']
if new_game != 0:
self.game = new_game
if bTimer is True:
timeout = 60
self.timer = Timer(timeout, self.challengeTimeOut)
self.timer.start()
if playerOne is not "":
self.playerOne = playerOne
super().MSG(channel, msg)
and:
# Response to use to accept a challenge.
if message == "!accept":
msg, pTwoInfo, new_game, playerTwo, bTimer, bGameTimer, new_oppenent, token = message_accept(channel, charFolder, unspoiledArena, character, self.game, self.opponent, self.pOneInfo)
if not charFile.is_file():
super().MSG(channel, "You don't even have a character made to fight.")
else:
if new_game is not None:
self.game = new_game
if pTwoInfo is not None:
self.pTwoInfo = pTwoInfo
self.pTwoTotalHP = self.pTwoInfo['thp']
self.pTwoCurrentHP = self.pTwoInfo['thp']
self.pTwoLevel = self.pTwoInfo['level']
if bTimer:
self.timer.cancel()
if bGameTimer:
gametimeout = 3600
self.gameTimer = Timer(gametimeout, self.combatTimeOut)
self.gameTimer.start()
if new_oppenent is not None:
self.opponent = new_oppenent
if playerTwo is not None:
self.playerTwo = playerTwo
if token is not None:
self.token = token
for msg_item in msg:
super().MSG(channel, msg_item)
with functions:
def challengeTimeOut(self):
super().MSG(unspoiledArena, "Challenge was not accepted. Challenge reset.")
self.game = 0
def combatTimeOut(self):
super().PRI('Unspoiled Desire', "!reset")
The above is an example of the same game, but on a different chat platform, with threading to handle the timers. But threading and discord.py aren't friends I guess. So I am trying to get the above code to work with discord.py, which seems to use asyncio.
The thought was to use asyncio.sleep() in the
if bTimer is true:
await asyncio.sleep(60)
self.game = 0
await ctx.send("Challenge was not accepted. Challenge reset.")
And this works...but it doesn't stop the timer, so even if someone !accepts, thus changing bTimer to False, which would cancel the timer:
if bTimer:
self.timer.cancel()
it's still going to say "Challenge was not accepted. Challenge reset."
The same problem will occure with bGameTimer if I try:
if bGameTimer:
await asyncio.sleep(3600)
await ctx.send("!reset")
the game will be hardwired to reset after 1 hours time, no matter if the game is done or not. Rather than resetting the 1 hour timer after every turn, and ONLY resetting if a full hour has passed in which no commands are made.
Is there a way to easily cancel or reset sleep cycles?
The asyncio code equivalent to your threading code would be:
...
if bTimer:
self.timer = asyncio.create_task(self.challengeTimeout())
...
async def challengeTimeout(self):
await asyncio.sleep(60)
self.game = 0
await ctx.send("Challenge was not accepted. Challenge reset.")
asyncio.create_task() creates a light-weight "task" object roughly equivalent to a thread. It runs "in parallel" to the your other coroutines, and you can cancel it by invoking its cancel() method:
if bTimer:
self.timer.cancel()