Im currently working with Python ThreadPoolExecutor and I wonder how I am able to re-execute a task once it is finished
import random
import threading
import time
from concurrent.futures import as_completed
from concurrent.futures.thread import ThreadPoolExecutor
import requests
URLS = [
'URL1',
'URL2',
'URL3',
'URL4',
'URL5',
'URL6',
'URL7',
'URL8',
'URL9',
'URL10',
'URL11',
]
def doRequest(url):
response = requests.get(f'https://www.google.se/?{url}')
time.sleep(random.randint(1, 3))
return response
def ourLoop():
with ThreadPoolExecutor(max_workers=2) as executor:
future_tasks = [
executor.submit(
doRequest,
url
) for url in URLS]
for future in as_completed(future_tasks):
response = future.result()
print(f"Got result! -> {response}")
while True:
t = threading.Thread(target=ourLoop, )
t.start()
print('Joining thread and waiting for it to finish...')
t.join()
What I wonder is that once we get a response inside the for future in as_completed(future_tasks): I want it to go back again into the "queue" so that it can constantly do the requests until I close the application. Is that possible to do that using ThreadPoolExecutor?
Related
the problem itself is that the last 4 lines cannot be executed because there is an infinite loop in the function above, without which it is impossible, who can offer a solution?
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils import executor
from requests import Request, Session
import requests
import json
import pprint
import time
bot = Bot(token='')
dp = Dispatcher(bot)
site = "https://pro-api.coinmarketcap.com/v2/cryptocurrency/quotes/latest"
parameters = {
'slug':'bitcoin',
'convert':'USD'
}
headers = {
'Accepts':'application/json',
'X-CMC_PRO_API_KEY':''
}
def main():
bitcoin_history = []
while True:
session = Session()
session.headers.update(headers)
response = session.get(site, params=parameters)
price = (json.loads(response.text)['data']['1']['quote']['USD']['price'])
bitcoin_history.append(price)
print(bitcoin_history)
if len(bitcoin_history) == 5:
bitcoin_history = []
time.sleep(30)
main()
#dp.message_handler(commands=["btcusd"])
async def echo_send(message : types.Message):
await message.answer("$" + str())
executor.start_polling(dp, skip_updates=True)
This question already has answers here:
FastAPI runs api-calls in serial instead of parallel fashion
(2 answers)
Closed 11 months ago.
I created a server that wait for webhook signal, and when there is signal, it will create a new process to run the loop() function, and when running the loop() function, I want it to call the function printmessage() asynchronously, so it will run the next line in the loop function without waiting the printmessage() function finish processing, but I got the following errors, how I resolve it?
#main.py
import time
from fastapi import Request, FastAPI
import multiprocessing as mp
import uvicorn
import asyncio
async def printmessage(fruit):
print(fruit)
time.sleep(5)
async def loop(fruit):
while True:
task = asyncio.create_task(printmessage(fruit))
time.sleep(1)
fruit="apple"
if __name__ == '__main__':
print("PROGRAM LAUNCH...")
print("WEBHOOK RECEIVE READY...")
app = FastAPI()
#app.post("/webhook")
async def webhook(request : Request):
print("WEBHOOK RECEIVED")
p = mp.Process(target=loop,args=[fruit])
p.start()
print('done')
return 'WEBHOOK RECEIVED'
The intended output should be printing apple every 1 second.
ERRORS:
RuntimeWarning: coroutine 'loop' was never awaited
self._target(*self._args, **self._kwargs)
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
I tried the following way to avoid the errors but there is no output at all:
#main.py
import time
from fastapi import Request, FastAPI
import multiprocessing as mp
import uvicorn
import asyncio
async def printmessage(fruit):
print(fruit)
time.sleep(5)
async def loop(fruit):
while True:
task = asyncio.create_task(printmessage(fruit))
time.sleep(1)
def preloop(fruit):
asyncio.run(loop(fruit))
fruit="apple"
if __name__ == '__main__':
print("PROGRAM LAUNCH...")
print("WEBHOOK RECEIVE READY...")
app = FastAPI()
#app.post("/webhook")
async def webhook(request : Request):
print("WEBHOOK RECEIVED")
p = mp.Process(target=preloop,args=[fruit])
p.start()
print('done')
return 'WEBHOOK RECEIVED'
Here is how you can call an async function in a new process using multiprocessing.
In this code, each request to /webhook creates a new process, which prints apple every 5 seconds.
from __future__ import annotations
import asyncio
from multiprocessing import Process
from fastapi import FastAPI
app = FastAPI()
process_pool: list[Process] = []
async def print_message(fruit):
print(fruit)
async def loop(fruit):
while True:
await print_message(fruit)
await asyncio.sleep(5)
def run_loop(fruit):
asyncio.run(loop(fruit))
#app.get("/webhook")
async def webhook():
print("WEBHOOK RECEIVED")
fruit = "apple"
process = Process(target=run_loop, args=(fruit,))
process_pool.append(process)
process.start()
print('done')
return 'WEBHOOK RECEIVED'
#app.on_event("shutdown")
async def shutdown_event():
for process in process_pool:
process.kill()
for process in process_pool:
while process.is_alive():
continue
process.close()
if __name__ == '__main__':
print("PROGRAM LAUNCH...")
print("WEBHOOK RECEIVE READY...")
I am new to asynchronous functions and threads, and I am trying to return a series of values obtained from a Web socket to pass to another thread where synchronous code is executing. In the code, I also use a multi Web socket approach. Below I show you the code:
"""
This code is designed to run an asynchronous loop
with asyncio in a separate thread. This allows mixing
a synchronous code with an asynchronous one.
"""
import asyncio
from datetime import datetime
from threading import Thread
import websockets
from typing import Tuple, List, Iterable
import json
import time
URLS = [
"wss://stream.binance.com:9443/ws/xrpusdt#kline_1m",
"wss://stream.binance.com:9443/ws/btcusdt#kline_1m",
]
def start_background_loop(loop: asyncio.AbstractEventLoop):
asyncio.set_event_loop(loop)
loop.run_forever()
async def IndividualSubscription(url: str):
"""An individual subscription to each WebSocket is created"""
async with websockets.connect(url) as websocket:
data = await websocket.recv()
data = json.loads(data)
print('\n', data)
return data
async def Subscriptions(URLS: Iterable[str]):
"""All concurrent tickets are subscribed and all are combined
in a single coroutine."""
while True:
task = [asyncio.create_task(SuscripcionIndividual(url)) for url in URLS]
# All tasks are run in parallel
await asyncio.gather(*tareas)
#return tareas
def main():
loop = asyncio.new_event_loop()
t = Thread(target=start_background_loop, args=(loop,), daemon=True)
t.start()
task = asyncio.run_coroutine_threadsafe(Suscripciones(URLS), loop)
for i in task.result():
print(f"{i}")
#return tareas
def function():
for i in range(100):
print("This is out of asynchronous ", i)
time.sleep(1)
if __name__ == "__main__":
main()
T2 = Thread(target=function,)
T2.start()
I tried to just put return to the async code, but by doing this the async loop only runs once and not continuously as I would expect. Also, I've tried the method .result() over .create_task() . Is it possible to return values from an asynchronous function?
If you want interoperability between synchronous and asynchronous code you need to design some communication mechanism that won't block the thread running async code. Queues are commonly used for communication between threads, janus library implements queues compatible with threads running async code, it does so by exposing sync queue interface to sync code and async queue interface to async code.
Your code is a little chaotic, so I cleaned it up just to show communication between sync thread (main thread) and async thread (background thread running asyncio loop)
import asyncio
from datetime import datetime
from threading import Thread
import websockets
from typing import Tuple, List, Iterable
import json
import time
import janus # pip install janus
URLS = [
"wss://stream.binance.com:9443/ws/xrpusdt#kline_1m",
"wss://stream.binance.com:9443/ws/btcusdt#kline_1m",
]
def start_background_loop(loop: asyncio.AbstractEventLoop):
asyncio.set_event_loop(loop)
loop.run_forever()
async def IndividualSubscription(url: str):
"""An individual subscription to each WebSocket is created"""
async with websockets.connect(url) as websocket:
return json.loads(await websocket.recv())
async def Subscriptions(URLS: Iterable[str], results: asyncio.Queue):
"""All concurrent tickets are subscribed and all are combined
in a single coroutine."""
while True:
tasks = [asyncio.create_task(SuscripcionIndividual(url)) for url in URLS]
for task in await asyncio.gather(*tasks):
await results.put(task.result())
def async_main(results: asyncio.Queue):
asyncio.run(Subscriptions(URLS, results))
if __name__ == "__main__":
results = janus.Queue(100) # max size of 100
async_thread = Thread(target=async_main, args=(results.async_q,))
async_thread.daemon = True # exit if main thread exits
async_thread.start()
while True:
print(f"[sync thread] got result from async thread: {results.sync_q.get()}")
What would be the best approach for setting a timeout condition on a task/function that's been submitted to a ThreadPoolExecutor using tornado.concurrent's #run_on_executor decorator? Example Tornado handler below:
import json
import time
import tornado.web
from concurrent.futures import ThreadPoolExecutor
from tornado.concurrent import run_on_executor
class MyHandler(tornado.web.RequestHandler):
def initialize(self) -> None:
self.executor = ThreadPoolExecutor(1)
#run_on_executor
def blocking_function(self) -> None:
""" Run Blocking Function on ThreadPoolExecutor. """
seconds = 10
time.sleep(seconds)
response = json.dumps({"message": f"Slept for {seconds} seconds."})
return response
async def get(self) -> None:
response = await self.blocking_function()
self.write(response)
Does something like tornado.gen.with_timeout found here exist for #run_on_executor?
Thank you for your time.
Since run_on_executor returns a Future object, you can use it with gen.with_timetout:
from datetime import timedelta
async def get(self):
response = await gen.with_timeout(
timedelta(seconds=5),
self.blocking_function()
)
...
Don't forget to handle the timeout exception.
I'm using falcon framework in python to form json responses of web api.
For instance I have a function called logic() that works for 30-90min. I want something like this:
When http-client asks for /api/somepath.json we call
somepath_handle()
somepath_handle() runs logic() in another thread/process
When logic() is finished, thread is closed
somepath_handle() reads response of logic() from return
If somepath_handle() was killed before logic() was finished, then thread/etc with logic() isn't stopped until it is finished
The code:
def somepath_handle():
run_async_logic()
response=wait_for_async_logic_response() # read response of logic()
return_response(response)
If your process takes such a long time, I advise you to send the result to the user using email, or maybe a live notification system ?
I am using a simple worker to create the queue where I am processing some commands. If add simple response storage than there will be possibility to process any requests and not loss them when connection was lost.
Example:
It's main function that used falconframework.org to response to requests.
main.py:
from flow import Flow
import falcon
import threading
import storage
__version__ = 0.1
__author__ = 'weldpua2008#gmail.com'
app = falcon.API(
media_type='application/json')
app.add_route('/flow', Flow())
THREADS_COUNT = 1
# adding the workers to process queue of command
worker = storage.worker
for _ in xrange(THREADS_COUNT):
thread = threading.Thread(target=worker)
thread.daemon = True
thread.start()
It's simple storage with worker code
storage.py:
from Queue import Queue
import subprocess
import logging
main_queque = Queue()
def worker():
global main_roles_queque
while True:
try:
cmd = main_queque.get()
#do_work(item)
#time.sleep(5)
handler = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = handler.communicate()
logging.critical("[queue_worker]: stdout:%s, stderr:%s, cmd:%s" %(stdout, stderr, cmd))
main_queque.task_done()
except Exception as error:
logging.critical("[queue_worker:error] %s" %(error))
It's class that will process any requests [POST, GET]
flow.py:
import storage
import json
import falcon
import random
class Flow(object):
def on_get(self, req, resp):
storage_value = storage.main_queque.qsize()
msg = {"qsize": storage_value}
resp.body = json.dumps(msg, sort_keys=True, indent=4)
resp.status = falcon.HTTP_200
#curl -H "Content-Type: application/json" -d '{}' http://10.206.102.81:8888/flow
def on_post(self, req, resp):
r = random.randint(1, 10000000000000)
cmd = 'sleep 1;echo "ss %s"' % str(r)
storage.main_queque.put(cmd)
storage_value = cmd
msg = {"value": storage_value}
resp.body = json.dumps(msg, sort_keys=True, indent=4)
resp.status = falcon.HTTP_200