Python asyncio: how to mock __aiter__() method? - python-3.x

I have a code which is listening to messages on WebSocket using aiohttp.
It looks like:
async for msg in ws:
await self._ws_msg_handler.handle_message(ws, msg, _services)
Where ws is an instance of aiohttp.web.WebSocketResponse() (original code)
In my test I mock WebSocketResponse() and its __aiter__ method:
def coro_mock(**kwargs):
return asyncio.coroutine(mock.Mock(**kwargs))
#pytest.mark.asyncio
#mock.patch('aiojsonrpc.request_handler.WebSocketMessageHandler')
async def test_rpc_websocket_handler(
MockWebSocketMessageHandler,
rpc_websocket_handler
):
ws_response = 'aiojsonrpc.request_handler.WebSocketResponse'
with mock.patch(ws_response) as MockWebSocketResponse:
MockRequest = mock.MagicMock()
req = MockRequest()
ws_instance = MockWebSocketResponse.return_value
ws_instance.prepare = coro_mock()
ws_instance.__aiter__ = coro_mock(return_value=iter(range(5)))
ws_instance.__anext__ = coro_mock()
handle_msg_result = 'Message processed'
MockWebSocketMessageHandler.handle_message.side_effect = Exception(
handle_msg_result)
msg_handler = MockWebSocketMessageHandler()
with pytest.raises(Exception) as e:
await request_handler.RpcWebsocketHandler(msg_handler)(req)
assert str(e.value) == handle_msg_result
Though when I run the test it fails with the error message saying:
'async for' requires an object with __aiter__ method, got MagicMock
=================================================================================== FAILURES ===================================================================================
__________________________________________________________________________ test_rpc_websocket_handler __________________________________________________________________________
MockWebSocketMessageHandler = <MagicMock name='WebSocketMessageHandler' id='140687969989632'>
rpc_websocket_handler = <aiojsonrpc.request_handler.RpcWebsocketHandler object at 0x7ff47879b0f0>
#pytest.mark.asyncio
#mock.patch('aiojsonrpc.request_handler.WebSocketMessageHandler')
async def test_rpc_websocket_handler(
MockWebSocketMessageHandler,
rpc_websocket_handler
):
ws_response = 'aiojsonrpc.request_handler.WebSocketResponse'
with mock.patch(ws_response) as MockWebSocketResponse:
# MockRequest = mock.create_autospec(aiohttp.web_reqrep.Request)
# req = MockRequest(*[None] * 6)
MockRequest = mock.MagicMock()
req = MockRequest()
ws_instance = MockWebSocketResponse.return_value
ret = mock.Mock()
ws_instance.prepare = coro_mock()
ws_instance.__aiter__ = coro_mock(return_value=iter(range(5)))
ws_instance.__anext__ = coro_mock()
handle_msg_result = 'Message processed'
MockWebSocketMessageHandler.handle_message.side_effect = Exception(
handle_msg_result)
msg_handler = MockWebSocketMessageHandler()
with pytest.raises(Exception) as e:
await request_handler.RpcWebsocketHandler(msg_handler)(req)
> assert str(e.value) == handle_msg_result
E assert "'async for' ...got MagicMock" == 'Message processed'
E - 'async for' requires an object with __aiter__ method, got MagicMock
E + Message processed
tests/test_request_handler.py:252: AssertionError
So it behaves like __aiter__() was never mocked.
How I'm supposed to accomplish correct mocking in this case?
Update:
For now I've found a workaround to make the code testable though I would really appreciate if someone tell me how to deal with the issue described in the original question.

You can make the mocked class return an object implementing the expected interface:
class AsyncIterator:
def __init__(self, seq):
self.iter = iter(seq)
def __aiter__(self):
return self
async def __anext__(self):
try:
return next(self.iter)
except StopIteration:
raise StopAsyncIteration
MockWebSocketResponse.return_value = AsyncIterator(range(5))
I don't think there is a way (yet) to correctly mock an object implementing __aiter__, it may be a python bug, as async for rejects a MagicMock, even if hasattr(the_magic_mock, '__aiter__') is True.
EDIT (13/12/2017): the library asynctest supports asynchronous iterators and context managers since 0.11, asynctest.MagicMock provides this feature for free.

For posterity, I had the same problem of needing to test an async for loop, but the accepted solution doesn't seem to work for Python 3.7. The example below works for 3.6.x and 3.7.0, but not for 3.5.x:
import asyncio
class AsyncIter:
def __init__(self, items):
self.items = items
async def __aiter__(self):
for item in self.items:
yield item
async def print_iter(items):
async for item in items:
print(item)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
things = AsyncIter([1, 2, 3])
loop.run_until_complete(print_iter(things))
loop.close()
With the above, mocking it looks something like:
with mock.patch('some.async.iter', return_value=AsyncIter([1, 2, 3])):
# do test requiring mocked iter

Works for py38
from unittest.mock import MagicMock
async def test_iterable(self):
loop_iterations = 0
mock = MagicMock()
mock.__aiter__.return_value = range(5)
async for _ in mock:
loop_iterations += 1
self.assertEqual(5, loop_iterations)

I have a python version that supports AsyncMock and I also leverage pytest_mock. I came up with this solution to this problem combining the use of AsyncMock side_effect:
from typing import List
import pytest
import asyncio
from pytest_mock.plugin import MockerFixture
pytestmark = pytest.mark.asyncio
async def async_generator(numbers: List[int]):
for number in numbers:
yield number
await asyncio.sleep(0.1)
async def function_to_test(numbers: List[int]):
async for thing in async_generator(numbers):
yield thing * 3
await asyncio.sleep(0.1)
async def test_async_generator(mocker: MockerFixture):
mock_numbers = [1, 2, 3, 4, 5]
async def async_generator_side_effect(numbers: List[int]):
for number in numbers:
yield number
mock_async_generator = mocker.patch("tests.test_async_generator.async_generator")
mock_async_generator.side_effect = async_generator_side_effect
actual = []
async for result in function_to_test(mock_numbers):
actual.append(result)
assert actual == [3, 6, 9, 12, 15]

Related

How to implement "new style" Python AsyncIO iterator function from an "old style" class?

I am trying to learn Python AsyncIO but having a lot of trouble with finding good tutorials that are up to date etc.
Let's say we have this "old style" async iterator class:
def chain(sink, *coro_pipeline):
f = sink
for coro_func, coro_args, coro_kwargs in coro_pipeline:
f = coro_func(f, *coro_args, **coro_kwargs)
return f
class sendable_deque(collections.deque):
send = collections.deque.append
class AsyncIterator(object):
def __init__(self, f, buf_size, *coro_pipeline):
self.events = sendable_deque()
self.coro = chain(self.events, *coro_pipeline)
self.coro_finished = False
self.f = f
self.buf_size = buf_size
def __aiter__(self):
return self
async def __anext__(self):
if self.events:
return self.events.popleft()
if self.coro_finished:
raise StopAsyncIteration
while True:
data = await self.f.read(self.buf_size)
try:
self.coro.send(data)
if self.events:
return self.events.popleft()
except StopIteration:
self.coro_finished = True
if self.events:
return self.events.popleft()
raise StopAsyncIteration
# An example of a pipeline
def _parse_pipeline(parser, config):
return (
(parser['parse_basecoro'], [], {}),
(parser['basic_parse_basecoro'], [], config)
)
How is this now implemented? As I am lead to believe we use something more like:
async def async_parse(f, buf_size):
while True:
yield f.read(buf_size)
async def parse(f, buf_size):
async with async_parse(f, buf_size) as pf:
print(pf)
But the code in the class is using a chaining method which I believe is done with asyncio.gather()?
Any good modern guides/tutorials would be a great help also.
Using an async generator the above hand-rolled async iterator can be expressed as follows (untested):
async def async_iterable(f, buf_size, coro_pipeline):
events = sendable_deque()
coro = utils.chain(events, *coro_pipeline)
done = False
while True:
while events:
yield events.popleft()
if done:
break
data = await f.read(buf_size)
try:
coro.send(data)
except StopIteration:
done = True
In the long run you might want to reconsider the use of bi-directional generators and replace them with native asyncio constructs such as queues.

mock multiprocessing Pool.map in testing

I want to write a test case for a method in my code mocking the multiprocessing part using pytest.
This the code and I need to write a test for simulation method.
from multiprocessing import Pool
class Simulation:
def __init__(self, num):
self.num = num
def simulation(self):
pool = Pool(processes=self.num)
val = [1, 2, 3, 4, 5]
res = pool.map(self.sim, val)
return res
def sim(self, val):
return val * val
Here is the test case.
import pytest
from multi import Simulation
#pytest.fixture
def sim():
return Simulation(2)
#pytest.fixture
def mock_map():
def map(self, val1=1, val2=2):
return [val1, val2]
return map
def test_sim(sim, mock_map, monkeypatch):
monkeypatch.setattr('multiprocessing.Pool.map', mock_map)
res = sim.simulation()
assert res == [1,2]
When running the test I get the out put as [1,4,6,16,25] where I need to mock the out put as [1,2].

Appending to merged async generators in Python

I'm trying to merge a bunch of asynchronous generators in Python 3.7 while still adding new async generators on iteration. I'm currently using aiostream to merge my generators:
from asyncio import sleep, run
from aiostream.stream import merge
async def go():
yield 0
await sleep(1)
yield 50
await sleep(1)
yield 100
async def main():
tasks = merge(go(), go(), go())
async for v in tasks:
print(v)
if __name__ == '__main__':
run(main())
However, I need to be able to continue to add to the running tasks once the loop has begun. Something like.
from asyncio import sleep, run
from aiostream.stream import merge
async def go():
yield 0
await sleep(1)
yield 50
await sleep(1)
yield 100
async def main():
tasks = merge(go(), go(), go())
async for v in tasks:
if v == 50:
tasks.merge(go())
print(v)
if __name__ == '__main__':
run(main())
The closest I've got to this is using the aiostream library but maybe this can also be written fairly neatly with just the native asyncio standard library.
Here is an implementation that should work efficiently even with a large number of async iterators:
class merge:
def __init__(self, *iterables):
self._iterables = list(iterables)
self._wakeup = asyncio.Event()
def _add_iters(self, next_futs, on_done):
for it in self._iterables:
it = it.__aiter__()
nfut = asyncio.ensure_future(it.__anext__())
nfut.add_done_callback(on_done)
next_futs[nfut] = it
del self._iterables[:]
return next_futs
async def __aiter__(self):
done = {}
next_futs = {}
def on_done(nfut):
done[nfut] = next_futs.pop(nfut)
self._wakeup.set()
self._add_iters(next_futs, on_done)
try:
while next_futs:
await self._wakeup.wait()
self._wakeup.clear()
for nfut, it in done.items():
try:
ret = nfut.result()
except StopAsyncIteration:
continue
self._iterables.append(it)
yield ret
done.clear()
if self._iterables:
self._add_iters(next_futs, on_done)
finally:
# if the generator exits with an exception, or if the caller stops
# iterating, make sure our callbacks are removed
for nfut in next_futs:
nfut.remove_done_callback(on_done)
def append_iter(self, new_iter):
self._iterables.append(new_iter)
self._wakeup.set()
The only change required for your sample code is that the method is named append_iter, not merge.
This can be done using stream.flatten with an asyncio queue to store the new generators.
import asyncio
from aiostream import stream, pipe
async def main():
queue = asyncio.Queue()
await queue.put(go())
await queue.put(go())
await queue.put(go())
xs = stream.call(queue.get)
ys = stream.cycle(xs)
zs = stream.flatten(ys, task_limit=5)
async with zs.stream() as streamer:
async for item in streamer:
if item == 50:
await queue.put(go())
print(item)
Notice that you may tune the number of tasks that can run at the same time using the task_limit argument. Also note that zs can be elegantly defined using the pipe syntax:
zs = stream.call(queue.get) | pipe.cycle() | pipe.flatten(task_limit=5)
Disclaimer: I am the project maintainer.

How to iterate over an asynchronous iterator with a timeout?

I think it's easier to understand in terms of code:
try:
async for item in timeout(something(), timeout=60):
await do_something_useful(item)
except asyncio.futures.TimeoutError:
await refresh()
I want the async for to run at most 60 seconds.
I needed to do something like this to create a websocket(also an async iterator) which times out if it doesn't get a message after a certain duration. I settled on the following:
socket_iter = socket.__aiter__()
try:
while True:
message = await asyncio.wait_for(
socket_iter.__anext__(),
timeout=10
)
except asyncio.futures.TimeoutError:
# streaming is completed
pass
AsyncTimedIterable could be the implementation of timeout() in your code:
class _AsyncTimedIterator:
__slots__ = ('_iterator', '_timeout', '_sentinel')
def __init__(self, iterable, timeout, sentinel):
self._iterator = iterable.__aiter__()
self._timeout = timeout
self._sentinel = sentinel
async def __anext__(self):
try:
return await asyncio.wait_for(self._iterator.__anext__(), self._timeout)
except asyncio.TimeoutError:
return self._sentinel
class AsyncTimedIterable:
__slots__ = ('_factory', )
def __init__(self, iterable, timeout=None, sentinel=None):
self._factory = lambda: _AsyncTimedIterator(iterable, timeout, sentinel)
def __aiter__(self):
return self._factory()
(original answer)
Or use this class to replace your timeout() function:
class AsyncTimedIterable:
def __init__(self, iterable, timeout=None, sentinel=None):
class AsyncTimedIterator:
def __init__(self):
self._iterator = iterable.__aiter__()
async def __anext__(self):
try:
return await asyncio.wait_for(self._iterator.__anext__(),
timeout)
except asyncio.TimeoutError:
return sentinel
self._factory = AsyncTimedIterator
def __aiter__(self):
return self._factory()
A simple approach is to use an asyncio.Queue, and separate the code into two coroutines:
queue = asyncio.Queue()
async for item in something():
await queue.put(item)
In another coroutine:
while True:
try:
item = await asyncio.wait_for(queue.get(), 60)
except asyncio.TimeoutError:
pass
else:
if item is None:
break # use None or whatever suits you to gracefully exit
await do_something_useful(item)
refresh()
Please note, it will make the queue grow if the handler do_something_useful() is slower than something() generates items. You may set a maxsize on the queue to limit the buffer size.
Answer to your question can be different based on nature of refresh function. If it's very short-running function it can be freely called inside coroutine. But if it's blocking function (due to network or CPU) it should be ran in executor to avoid freezing asyncio event loop.
Code below shows example for the first case, changing it to run refresh in executor is not hard.
Second thing to be clarified is a nature of asynchronous iterator. As far as I understand, you're using it to either get result from something or None if timeout occurred.
If I understand logic correctly, your code can be written clearer (similar to non-async style as asyncio is created to allow) using async_timeout context manager and without using asynchronous iterator at all:
import asyncio
from async_timeout import timeout
async def main():
while True:
try:
async with timeout(60):
res = await something()
await do_something_useful(item)
except asyncio.TimeoutError:
pass
finally:
refresh()
Your question is missing a couple of details, but assuming something() is an async iterator or generator and you want item to be sentinel everytime something has not yielded a value within the timeout, here is an implementation of timeout():
import asyncio
from typing import *
T = TypeVar('T')
# async generator, needs python 3.6
async def timeout(it: AsyncIterator[T], timeo: float, sentinel: T) -> AsyncGenerator[T, None]:
try:
nxt = asyncio.ensure_future(it.__anext__())
while True:
try:
yield await asyncio.wait_for(asyncio.shield(nxt), timeo)
nxt = asyncio.ensure_future(it.__anext__())
except asyncio.TimeoutError:
yield sentinel
except StopAsyncIteration:
pass
finally:
nxt.cancel() # in case we're getting cancelled our self
test:
async def something():
yield 1
await asyncio.sleep(1.1)
yield 2
await asyncio.sleep(2.1)
yield 3
async def test():
expect = [1, None, 2, None, None, 3]
async for item in timeout(something(), 1, None):
print("Check", item)
assert item == expect.pop(0)
asyncio.get_event_loop().run_until_complete(test())
When wait_for() times out it will cancel the task. Therefore, we need to wrap it.__anext__() in a task and then shield it, to be able to resume the iterator.
I want the coroutine to execute refresh at least every 60 seconds.
If you need to execute refresh every 60 seconds regardless of what happens with do_something_useful, you can arrange that with a separate coroutine:
import time
async def my_loop():
# ensure refresh() is invoked at least once in 60 seconds
done = False
async def repeat_refresh():
last_run = time.time()
while not done:
await refresh()
now = time.time()
await asyncio.sleep(max(60 - (now - last_run), 0))
last_run = now
# start repeat_refresh "in the background"
refresh_task = asyncio.get_event_loop().create_task(repeat_refresh())
try:
async for item in something():
if item is not None:
await do_something_useful(item)
await refresh()
finally:
done = True

how to cache asyncio coroutines

I am using aiohttp to make a simple HTTP request in python 3.4 like this:
response = yield from aiohttp.get(url)
The application requests the same URL over and over again so naturally I wanted to cache it. My first attempt was something like this:
#functools.lru_cache(maxsize=128)
def cached_request(url):
return aiohttp.get(url)
The first call to cached_request works fine, but in later calls I end up with None instead of the response object.
I am rather new to asyncio so I tried a lot of combinations of the asyncio.coroutine decorator, yield from and some other things, but none seemed to work.
So how does caching coroutines work?
Maybe a bit late, but I've started a new package that may help: https://github.com/argaen/aiocache. Contributions/comments are always welcome.
An example:
import asyncio
from collections import namedtuple
from aiocache import cached
from aiocache.serializers import PickleSerializer
Result = namedtuple('Result', "content, status")
#cached(ttl=10, serializer=PickleSerializer())
async def async_main():
print("First ASYNC non cached call...")
await asyncio.sleep(1)
return Result("content", 200)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
print(loop.run_until_complete(async_main()))
print(loop.run_until_complete(async_main()))
print(loop.run_until_complete(async_main()))
print(loop.run_until_complete(async_main()))
Note that as an extra, it can cache any python object into redis using Pickle serialization. In case you just want to work with memory, you can use the SimpleMemoryCache backend :).
An popular async version of lru_cache exist here: async_lru
To use functools.lru_cache with coroutines, the following code works.
class Cacheable:
def __init__(self, co):
self.co = co
self.done = False
self.result = None
self.lock = asyncio.Lock()
def __await__(self):
with (yield from self.lock):
if self.done:
return self.result
self.result = yield from self.co.__await__()
self.done = True
return self.result
def cacheable(f):
def wrapped(*args, **kwargs):
r = f(*args, **kwargs)
return Cacheable(r)
return wrapped
#functools.lru_cache()
#cacheable
async def foo():
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
return await resp.text()
The following is thread safe
class ThreadSafeCacheable:
def __init__(self, co):
self.co = co
self.done = False
self.result = None
self.lock = threading.Lock()
def __await__(self):
while True:
if self.done:
return self.result
if self.lock.acquire(blocking=False):
self.result = yield from self.co.__await__()
self.done = True
return self.result
else:
yield from asyncio.sleep(0.005)
I wrote a simple cache decorator myself:
def async_cache(maxsize=128):
cache = {}
def decorator(fn):
def wrapper(*args):
key = ':'.join(args)
if key not in cache:
if len(cache) >= maxsize:
del cache[cache.keys().next()]
cache[key] = yield from fn(*args)
return cache[key]
return wrapper
return decorator
#async_cache()
#asyncio.coroutine
def expensive_io():
....
This kind-of-works. But many aspects can probably be improved. For example: If the cached function is called a second time before the first call returns, it will execute a second time.
I'm not that familiar with aiohttp so I'm not sure of exactly what is happening that would cause Nones to be returned, but the lru_cache decorator will not work with async functions.
I use a decorator which does essentially the same thing; note that it is different to tobib's decorator above in that it will always return a future or a task, rather than the value:
from collections import OrderedDict
from functools import _make_key, wraps
def future_lru_cache(maxsize=128):
# support use as decorator without calling, for this case maxsize will
# not be an int
try:
real_max_size = int(maxsize)
except ValueError:
real_max_size = 128
cache = OrderedDict()
async def run_and_cache(func, args, kwargs):
"""Run func with the specified arguments and store the result
in cache."""
result = await func(*args, **kwargs)
cache[_make_key(args, kwargs, False)] = result
if len(cache) > real_max_size:
cache.popitem(False)
return result
def wrapper(func):
#wraps(func)
def decorator(*args, **kwargs):
key = _make_key(args, kwargs, False)
if key in cache:
# Some protection against duplicating calls already in
# progress: when starting the call cache the future, and if
# the same thing is requested again return that future.
if isinstance(cache[key], asyncio.Future):
return cache[key]
else:
f = asyncio.Future()
f.set_result(cache[key])
return f
else:
task = asyncio.Task(run_and_cache(func, args, kwargs))
cache[key] = task
return task
return decorator
if callable(maxsize):
return wrapper(maxsize)
else:
return wrapper
I used _make_key from functools as lru_cache does, I guess it's supposed to be private so probably better to copy it over.
This is how I think it's most easily done, using the built-in lru_cache and futures:
import asyncio
import functools
# parameterless decorator
def async_lru_cache_decorator(async_function):
#functools.lru_cache
def cached_async_function(*args, **kwargs):
coroutine = async_function(*args, **kwargs)
return asyncio.ensure_future(coroutine)
return cached_async_function
# decorator with options
def async_lru_cache(*lru_cache_args, **lru_cache_kwargs):
def async_lru_cache_decorator(async_function):
#functools.lru_cache(*lru_cache_args, **lru_cache_kwargs)
def cached_async_function(*args, **kwargs):
coroutine = async_function(*args, **kwargs)
return asyncio.ensure_future(coroutine)
return cached_async_function
return async_lru_cache_decorator
#async_lru_cache(maxsize=128)
async def your_async_function(...): ...
This is basically taking your original function and wrapping it so I can store the Coroutine it returns and convert it into a Future. This way, this can be treated as a regular function and you can lru_cache-it as you would usually do it.
Why is wrapping it in a Future necessary? Python coroutines are low level constructs and you can't await one more than once (You would get RuntimeError: cannot reuse already awaited coroutine). Futures, on the other hand, are handy and can be awaited consecutively and will return the same result.
One caveat is that caching a Future will also cache when the original functions raised an Error. The original lru_cache does not cache interrupted executions, so watch out for this edge case using the solution above.
Further tweaking can be done to merge both the parameter-less and the parameterized decorators, like the original lru_cache which supports both usages.
Another variant of lru decorator, which caches not yet finished coroutines, very useful with parallel requests to the same key:
import asyncio
from collections import OrderedDict
from functools import _make_key, wraps
def async_cache(maxsize=128, event_loop=None):
cache = OrderedDict()
if event_loop is None:
event_loop = asyncio.get_event_loop()
awaiting = dict()
async def run_and_cache(func, args, kwargs):
"""await func with the specified arguments and store the result
in cache."""
result = await func(*args, **kwargs)
key = _make_key(args, kwargs, False)
cache[key] = result
if len(cache) > maxsize:
cache.popitem(False)
cache.move_to_end(key)
return result
def decorator(func):
#wraps(func)
async def wrapper(*args, **kwargs):
key = _make_key(args, kwargs, False)
if key in cache:
return cache[key]
if key in awaiting:
task = awaiting[key]
return await asyncio.wait_for(task, timeout=None, loop=event_loop)
task = asyncio.ensure_future(run_and_cache(func, args, kwargs), loop=event_loop)
awaiting[key] = task
result = await asyncio.wait_for(task, timeout=None, loop=event_loop)
del awaiting[key]
return result
return wrapper
return decorator
async def test_async_cache(event_loop):
counter = 0
n, m = 10, 3
#async_cache(maxsize=n, event_loop=event_loop)
async def cached_function(x):
nonlocal counter
await asyncio.sleep(0) # making event loop switch to other coroutine
counter += 1
return x
tasks = [asyncio.ensure_future(cached_function(x), loop=event_loop)
for x in list(range(n)) * m]
done, pending = await asyncio.wait(tasks, loop=event_loop, timeout=1)
assert len(done) == n * m
assert counter == n
event_loop = asyncio.get_event_loop()
task = asyncio.ensure_future(test_async_cache(event_loop))
event_loop.run_until_complete(task)
I think that the simplest way is to use aiohttp_cache (documentation)
pip install aiohttp-cache
And use it in code:
from aiohttp_cache import cache, setup_cache
#cache() # <-- DECORATED FUNCTION
async def example_1(request):
return web.Response(text="Example")
app = web.Application()
app.router.add_route('GET', "/", example_1)
setup_cache(app) # <-- INITIALIZED aiohttp-cache
web.run_app(app, host="127.0.0.1")
Try async-cache :pypi async-cache :github for caching async functions in python.
It also supports function which have parameters of user defined or object type or unhashable type which is not supported in either functools.lru_cache or async_lru .
Usage:
pip install async-cache
from cache import AsyncLRU
#AsyncLRU(maxsize=128)
async def func(*args, **kwargs):
pass
I wrote a simple package named asyncio-cache - https://github.com/matan1008/asyncio-cache.
I tried to keep the code as close as possible to the original python implementation and as simple as possible.
For example:
from asyncio_cache import lru_cache
import aiohttp
#lru_cache(maxsize=128)
async def cached_get(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as resp:
return await resp.text()

Resources