aiomysql makes random errors in events appear - python-3.x

so far I've been using mysql.connector to manage mysql in my discord bot, however, since that's not async, I'm trying to change to aiomysql. It works, kinda... I'm having a problem where sometimes I get something like
ERROR:asyncio:Task was destroyed but it is pending!
task: <ClientEventTask state=pending event=on_raw_message_delete coro=<bound method LoggingSystem.on_raw_message_delete of <cogs.loggingsystem.LoggingSystem object at 0x059C7A48>>>
In this case it was in on_raw_message_delete of a cog, but the event where this happens isn't always the same and it's only on events, never in commands.
So the question is, am I doing something wrong that may cause that?
# mysqlconnection.py
pool: aiomysql.Pool = None
async def getpool():
global pool
pool = await aiomysql.create_pool(host="", user="", password="", db="", pool_recycle=0)
async def execute_search(search: str, vals: tuple = None): # This function is what I use to get data from the DB across the whole bot
async with pool.acquire() as connection:
async with connection.cursor() as cursor:
if vals is None:
await cursor.execute(search)
else:
await cursor.execute(search, vals)
result = await cursor.fetchall()
return result
async def execute_alter(query: str, vals: tuple): # This function is what I use to alter data in the DB across the whole bot
async with pool.acquire() as connection:
async with connection.cursor() as cursor:
await cursor.execute(query, vals)
await connection.commit()
# bot.py
from mysqlconnection import execute_search, execute_alter, getpool
#bot.listen()
async def on_ready():
await getpool()
...
So, anything wrong with my approach?

Related

websocket messages appears at once rather than individual messages

I want my backend send progress message to UI via websocket.
My problem is all messages,which produced by calling
automate_algorithm()
function appear together at once at the end of process, instead of appear one by one. Is there any wrong with my code.
This class create a dictionary which key is project id, and value is the opened websocket
class ConnectionManager:
def __init__(
self
):
self.connections: dict[str, WebSocket] = {}
async def connect(
self,
id: str,
websocket: WebSocket
):
"""To add new open socket to memory storage
Args:
id:(str)the
"""
await websocket.accept()
self.connections[id] = websocket
async def disconnect(self, id: str):
if id in self.connections:
await self.connections[id].close(code=100,reason=None)
del self.connections[id]
async def send_response(
self,
id: str,
data: str,
status:str='running'
):
print(
f"tries to send response for client with id :{id}. Response is {data}")
try:
await self.connections[id].send_json(data=dict(
timestamp=time.strftime("%H:%M:%S", time.localtime()),
message=data,
id=id,
status=status
)
)
if status=="completed":
await self.disconnect(id)
except Exception as e:
print(str(e))
self.disconnect(id)
manager = ConnectionManager()#create a context for web socket manager
This method get user HTTP request, and start process
#router.websocket("/auto_algo/{client_id}")
async def auto_algo(
websocket: WebSocket,
client_id: str,
):
await manager.connect(client_id, websocket)
# HANDLE FUNCTION*****
await automate_algorithm(idt=client_id)
This is the main method which produce the messages,that should write in websocket.
async def send_message_to_socket(
client_id: str,
what: str,
status:str='running'
):
global manager
await manager.send_response(client_id, what,status)
# automate to algorithm ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
async def automate_algorithm(idt,language='en'):
from controllers.dispatcher_controller import send_message_to_socket
await send_message_to_socket(client_id=idt,what="process starting")#This message appear at start correctly
mds2 = create_mds(idt,mapper=False)
await send_message_to_socket(client_id=idt,what="main_data_structure 2 created...")#the rest of message appear together at the end of process
sample_data = create_sample_data(idt,mapper=False)
await send_message_to_socket(client_id=idt,what="sample data created...")
corr = correlation_matrix(idt,mapper=False)
await send_message_to_socket(client_id=idt,what="correlation created...")
mds3 = accomplish_mds(idt,mapper=False)
await send_message_to_socket(client_id=idt,what="main_data_structure 3 created...")
When the code is executed, the control is never returned to the event loop. There is an easy fix for this; add some await asyncio.sleep(0) to semi-manually return control to the event loop, so it has a chance to execute the send() method of other tasks.So I add await asyncio.sleep(0) right after each await send_message_to_socket(...) line, and the code works correctly

Need to parse two sessions at the same time with telethon on Python

i have some troubles with parsing two or more sessions at the same time with telethon. I have tried this:
class NewSession:
def __init__(self, session_name):
self.client = TelegramClient(session_name, api_id, api_hash)
self.session_name = session_name
async def pool(self):
print("working with:", self.session_name)
#self.client.on(events.NewMessage(outgoing=True))
async def main(event):
message = event.message.to_dict()
msg_text = message['message']
print(msg_text)
try:
await self.client.start()
await self.client.run_until_disconnected()
finally:
await self.client.disconnect()
async def main():
user = NewSession("321")
user2 = NewSession("123")
await user.pool()
await user2.pool()
if __name__ == '__main__':
asyncio.run(main())
But only one is working. Need help :)
The problem is inside your main function. When you await for a coroutine to return it doesn't mean that the execution continues to the next expression. So, in your code the line await user2.pool() is going to be executed only when the user.poll() coroutines returns a value, this is when the session '321' is disconnected.
You need to run the tasks concurrently; you can use the function asyncio.gather. Reworking your main:
async def main():
user = NewSession("321")
user2 = NewSession("123")
await asyncio.gather(user.pool(), user2.pool())

SQLAlchemy async engine with ORM unable to execute basic queries

I have switched my SQLAlchemy database code to use an async engine and am having trouble establishing basic functionality.
I have a class that starts the database like this:
class PostgresDb:
def __init__(self):
self._session = None
self._engine = None
def __getattr__(self, name):
return getattr(self._session, name)
def init(self):
self._engine = create_async_engine(
ENGINE,
echo=True,
future=True)
self._session = sessionmaker(
self._engine, expire_on_commit=False, class_=AsyncSession
)()
async def create_all(self):
async with self._engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
# Other methods...
Example of how create_all gets called:
async def init_db_tables():
self.init()
await self.create_all()
asyncio.run(init_db_tables())
When I want to achieve basic functionality, like getting all the tables, I can do something like:
def get_tables(self):
with create_engine(SYNCHRONOUS_ENGINE).connect() as conn:
meta = MetaData(conn, schema=SCHEMA)
meta.reflect(views=True)
table_list = meta.tables
return table_list
This is not ideal as I need to actually pass in a synchronous engine connection instead of the actual async engine I am using in the Class. It is also very verbose and shouldn't need to be initiated like this for every query.
I have tried doing something like this to select the table 'appuser' from the database:
async def get_tables(self):
self.init()
async with self._session() as session:
q = select('appuser')
result = await session.execute(q)
curr = result.scalars()
for i in curr:
print(i)
Which I've tried calling like this
db = PostgresDb()
asyncio.run(db.get_tables())
asyncio.get_event_loop().run_until_complete(db.get_tables())
These both give error:
async with self._session() as session:
TypeError: 'AsyncSession' object is not callable
Calling it with db.get_tables() errors RuntimeWarning: coroutine 'PostgresDb.get_tables' was never awaited db.get_tables() RuntimeWarning: Enable tracemalloc to get the object allocation traceback
Trying to use inspector with run_sync like this:
async def get_tables(self):
await self.init()
async with self._engine.begin() as conn:
inspector = conn.run_sync(inspect(conn))
table_names = await conn.run_sync(inspector.get_table_names())
print(table_names)
Returns error
sqlalchemy.exc.NoInspectionAvailable: Inspection on an AsyncConnection is currently not supported. Please use ``run_sync`` to pass a callable where it's possible to call ``inspect`` on the passed connection.
I have read the documentation at https://docs.sqlalchemy.org/en/14/orm/extensions/asyncio.html#sqlalchemy.ext.asyncio.AsyncConnection.run_sync but I am still unclear about how to work cleanly with async engines.
Thanks for any and all insight you're able to offer on how to execute a simple query get all tables in SQLAlchmey using the async engine!

Tornado websocket client: how to async on_message? (coroutine was never awaited)

How can I make the on_message function work asynchronously in my Tornado WebSocketClient?
I guess I need to await the on_message function, but I don't know how.
Or is there even a fundamental misconception in the way how I try to implement an asynchronous WebSocketClient?
import tornado.websocket
from tornado.queues import Queue
from tornado import gen
import json
q = Queue()
class WebsocketClient():
def __init__(self, url, connections):
self.url = url
self.connections = connections
print("CLIENT started")
print("CLIENT initial connections: ", len(self.connections))
async def send_message(self):
async for message in q:
try:
msg = json.loads(message)
print(message)
await gen.sleep(0.001)
finally:
q.task_done()
async def update_connections(self, connections):
self.connections = connections
print("CLIENT updated connections: ", len(self.connections))
async def on_message(self, message):
await q.put(message)
await gen.sleep(0.001)
async def connect(self):
client = await tornado.websocket.websocket_connect(url=self.url, on_message_callback=self.on_message)
RuntimeWarning: coroutine 'WebsocketClient.on_message' was never awaited
self._on_message_callback(message)
RuntimeWarning: Enable tracemalloc to get the object allocation traceback
on_message_callback is supposed to be a regular function, not a coroutine. And it is meant to be used in old-style code when people used callbacks instead of coroutines.
For the newer async-style code, you don't need this callback. You can just do this:
async def connect(self):
client = await tornado.websocket.websocket_connect(url=self.url)
while True:
message = await client.read_message()
if message is None:
# None message means the connection was closed
break
print("Message received:", message)
await q.put(message)
await gen.sleep(0.001)

RuntimeError when running coroutine from __init__

Here's a sample code.
class Foo:
def __init__(self):
self._run_coro()
def _run_coro(self):
async def init():
bar = #some I/O op
self.bar = bar
loop = asyncio.get_event_loop()
loop.run_until_complete(init())
async def spam(self):
return await #I/O op
async def main():
foo = Foo()
await foo.spam()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
When I run this code, I get following exception:
RuntimeError: This event loop is already running
If I initialize Foo outside main, the code runs without any exception. I want to initialize Foo such that during initialization it runs a coroutine which creates a class attribute bar.
I am unable to figure how to do it correctly. How can I run a coroutine from __init__.
Any help would be highly appreciated.
class Foo:
def __init__(self):
self.session = requests.Session()
self.async_session = None
#I guess this can be done to initialize it.
s = self.init_async_session()
try:
s.send(None)
except StopIteration:
pass
finally:
s.close()
async def init_async_session(self):
#ClientSession should be created inside a coroutine.
self.async_session = aiohttp.ClientSession()
What would be the right way to initialize self.async_session
If some method uses something asynchronous it should be explicitly defined as asynchronous either. This is a core idea behind asyncio: make you write code a way you always know if some arbitrary method may do something asynchronous.
In your snippet you want to do async thing (bar I/O) inside sync method __init__ and asyncio prohibits it. You should make _run_coro async and initialize Foo asynchronously, for example, using __await__ method:
import asyncio
class Foo:
def __await__(self):
return self._run_coro().__await__()
async def _run_coro(self): # real async initializer
async def init():
await asyncio.sleep(1) # bar I/O
self.bar = 123
await init()
return self
async def spam(self):
return await asyncio.sleep(1) # I/O op
async def main():
foo = await Foo()
await foo.spam()
asyncio.run(main()) # instead of two lines in Python 3.7+
You may be interested in reading this answer to understand better how asyncio works and how to handle it.
Upd:
s = self.init_async_session()
try:
s.send(None)
Don't do such things: generator's method are only details of implementation in regard of coroutines. You can predict how coroutine will react on calling .send() method and you can rely on this behavior.
If you want to execute coroutine use await, if you want to start it "in background" use task or other functions from asyncio doc.
What would be the right way to initialize self.async_session
When it comes to aiohttp.ClientSession it should not only be created, but properly closed also. Best way to do it is to use async context manager as shown in aiohttp doc.
If you want to hide this operation inside Foo you can make it async manager either. Complete example:
import aiohttp
class Foo:
async def __aenter__(self):
self._session = aiohttp.ClientSession()
await self._session.__aenter__()
return self
async def __aexit__(self, *args):
await self._session.__aexit__(*args)
async def spam(self):
url = 'http://httpbin.org/delay/1'
resp = await self._session.get(url)
text = await resp.text()
print(text)
async def main():
async with Foo() as foo:
await foo.spam()
asyncio.run(main())
Upd2:
You can combine ways to init/close object from above to achive result you like. As long as you keep in mind both operations are asynchronous and thus should be awaited, everything should be fine.
One more possible way:
import asyncio
import aiohttp
class Foo:
def __await__(self):
return self._init().__await__()
async def _init(self):
self._session = aiohttp.ClientSession()
await self._session.__aenter__()
return self
async def close(self):
await self._session.__aexit__(None, None, None)
async def spam(self):
url = 'http://httpbin.org/delay/1'
resp = await self._session.get(url)
text = await resp.text()
print(text)
async def main():
foo = await Foo()
try:
await foo.spam()
finally:
await foo.close()
asyncio.run(main())
Here's my solution.
class Session:
def __init__(self, headers):
self._headers = headers
self._session = requests.Session()
self._async_session = None
async def _init(self):
self._session = aiohttp.ClientSession(headers=headers)
async def async_request(self, url):
while True:
try:
async with self._async_session.get(url) as resp:
resp.raise_for_status()
return await resp.text()
except aiohttp.client_exceptions.ClientError:
#retry or raise
except AttributeError:
if isinstance(self._async_session, aiohttp.ClientSession):
raise
await self._init()
def request(self, url):
return self._session.get(url).text
async def close(self):
if isinstance(self._async_session, aiohttp.ClientSession):
await self._session.close()
async def main():
session = Session({})
print(await session.async_request('https://httpstat.us/200')
await session.close()
asyncio.run(main())
I can initialize the Session class and make synchronous as well as asynchronous requests. I do not have to explicitly call await session._init() to initialize self._async_session as when session._async_request is called and self._async_session is None, then await session._init() will be called and the request will be retried.

Resources