in my tests I did a coverage and I end up with untested parts inside an async with, what the coverage sees is the part about exceptions, my code is like this:
#classmethod
async def update_eda_configuration(
cls,
configuration: ConfigurationInputOnUpdate
) -> Union['Configuration', ConfigurationOperationError]:
async with get_session() as conn:
result = await conn.execute(
select(Configuration).where(Configuration.id==configuration.id))
configuration_to_update = result.scalars().unique().first()
if configuration_to_update is not None:
configuration_to_update.attribute = configuration.attribute
configuration_to_update.value = configuration.value
try:
await conn.commit()
return configuration_to_update
except Exception as e:
matches = re.findall(
pattern='DETAIL:.*',
string=str(e.orig),
)
Coverage looks like this:
I tried some tests but to no avail, trying for example patching to conn.commit.
How can I do this?
Related
I'm trying to make the Gino mock, but I keep seeing the error
gino.exceptions.UninitializedError: Gino engine is not initialized.
My code is formed like this:
# __init__.py
#functools.lru_cache
def get_db_service():
db = Gino(dsn=settings.get_settings().postgresql_conn_url)
return db
# model
_db = get_db_service()
class EdaTableInstance(_db.Model):
__tablename__ = "eda_table_instance"
#...
#classmethod
async def get_all(cls) -> List['EdaTableInstance']:
async with _db.acquire():
return await EdaTableInstance.query.gino.all()
How I'm writing the tests (various attempts):
# conftest.py
#pytest.fixture(autouse=True)
def mock_get_db_service(mocker):
db = Gino(dsn="sqlite//:memory:")
async_mock = AsyncMock(db)
mocker.patch("gateway_api.services.get_db_service", return_value=async_mock)
yield
or
# conftest.py
#pytest.fixture
async def db_initialize():
await db.set_bind('sqlite:///:memory:')
await db.gino.create_all()
await EdaTableInstance.create_eda_table_instance(
EdaTableInstanceInputOnCreate({"name":"table_server1", "host":"123.123.123.123"})
)
yield
or
# test_models.py
#pytest.fixture
def mock_gino_get_all(mocker):
mocker.patch("gino.api.GinoExecutor.all", return_value=[])
#pytest.mark.asyncio
#pytest.mark.parametrize("id, expected", [(None, None)])
async def test_01_table_instance_get_all(id, expected):
mock_cursor = MagicMock()
mock_cursor.configure_mock(
**{
"get_one.return_value":[id]
}
)
res = await EdaTableInstance().get_one(mock_cursor)
assert res == expected
I would like to use SQLite in memory, so I don't have to connect from a database, if you know better methods to mock the database, it would help me so much.
I am learning Python here with asyncio. I have a Javascript background and am expecting it to work similar to js.
Unfortunately, it says that my coroutine does not have the columns from the chain data frame in the code that proceeds this excerpt. My impression is that it is not awaiting correctly.
async def get_chain(symbol):
try:
chain = await options.get_options_chain(symbol, req["expiration"])
return chain
except Exception as err:
print(err, flush=True)
async def get_price(symbol):
try:
price = await si.get_live_price(symbol)
return price
except Exception as err:
print(err, flush=True)
#app.route("/dev/api/option/rankings", methods=["POST"])
#cross_origin(origin="*")
async def get_option_rankings():
# get saved db symbols
req = request.get_json()
arr = []
for symbol in req["symbols"]:
# get stock price
chain = get_chain(symbol)
price = get_price(symbol)
print(chain, flush=True)
print(price, flush=True)
I am trying to create an asyncio task, perform some db query and then a for loop process and get the result back in the task. However, in the code sample below, it seems like my result is not being put to total_result.result() but rather, just to total_result.
Not sure if there is any misunderstanding that I"m having regarding my implementation of asyncio below?
class DatabaseHandler:
def __init__(self):
self.loop = get_event_loop()
self.engine = create_engine("postgres stuffs here")
self.conn = self.engine.connect()
async def _fetch_sql_data(self, query):
return self.conn.execute(query)
async def get_all(self, item):
total_result = []
if item == "all":
data = create_task(self._fetch_sql_data("select col1 from table1;"))
else:
data = create_task(self._fetch_sql_data(f"select col1 from table1 where quote = '{item}';"))
await data
for i in data.result().fetchall():
total_result.append(i[0])
return total_result
async def update(self):
total_result = create_task(self.get_all("all"))
print(await total_result) # prints out the result immediately and not the task object.
# this means that `total_result.result()` produces an error
loop = get_event_loop()
a = DatabaseHandler()
loop.run_until_complete(a.update())
I have a feeling that it is because of total_result being a list object. But not sure how to resolve this.
task.result() returns the result of your task (the return value of the wrapped coro) and not another Task. This means this
task = asyncio.create_task(coro())
await task
result = task.result()
is actually equivalent to
result = await coro()
Using tasks is especially useful, if you want to execute multiple coroutines concurrently. But as you are not doing that here, your code is a bit overcomplicated. You can just do
async def get_all(self, item):
total_result = []
if item == "all":
result = await self._fetch_sql_data("select col1 from table1;")
else:
result = await self._fetch_sql_data(f"select col1 from table1 where quote = '{item}';")
for i in result.fetchall():
total_result.append(i[0])
return total_result # holds the results of your db query just as called from sync code
I have the following method in my Tornado handler:
async def get(self):
url = 'url here'
try:
async for batch in downloader.fetch(url):
self.write(batch)
await self.flush()
except Exception as e:
logger.warning(e)
This is the code for downloader.fetch():
async def fetch(url, **kwargs):
timeout = kwargs.get('timeout', aiohttp.ClientTimeout(total=12))
response_validator = kwargs.get('response_validator', json_response_validator)
extractor = kwargs.get('extractor', json_extractor)
try:
async with aiohttp.ClientSession(timeout=timeout) as session:
async with session.get(url) as resp:
response_validator(resp)
async for batch in extractor(resp):
yield batch
except aiohttp.client_exceptions.ClientConnectorError:
logger.warning("bad request")
raise
except asyncio.TimeoutError:
logger.warning("server timeout")
raise
I would like yield the "batch" object from multiple downloaders in paralel.
I want the first available batch from the first downloader and so on until all downloaders finished. Something like this (this is not working code):
async for batch in [downloader.fetch(url1), downloader.fetch(url2)]:
....
Is this possible? How can I modify what I am doing in order to be able to yield from multiple coroutines in parallel?
How can I modify what I am doing in order to be able to yield from multiple coroutines in parallel?
You need a function that merges two async sequences into one, iterating over both in parallel and yielding elements from one or the other, as they become available. While such a function is not included in the current standard library, you can find one in the aiostream package.
You can also write your own merge function, as shown in this answer:
async def merge(*iterables):
iter_next = {it.__aiter__(): None for it in iterables}
while iter_next:
for it, it_next in iter_next.items():
if it_next is None:
fut = asyncio.ensure_future(it.__anext__())
fut._orig_iter = it
iter_next[it] = fut
done, _ = await asyncio.wait(iter_next.values(),
return_when=asyncio.FIRST_COMPLETED)
for fut in done:
iter_next[fut._orig_iter] = None
try:
ret = fut.result()
except StopAsyncIteration:
del iter_next[fut._orig_iter]
continue
yield ret
Using that function, the loop would look like this:
async for batch in merge(downloader.fetch(url1), downloader.fetch(url2)):
....
Edit:
As mentioned in the comment, below method does not execute given routines in parallel.
Checkout aitertools library.
import asyncio
import aitertools
async def f1():
await asyncio.sleep(5)
yield 1
async def f2():
await asyncio.sleep(6)
yield 2
async def iter_funcs():
async for x in aitertools.chain(f2(), f1()):
print(x)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(iter_funcs())
It seems that, functions being iterated must be couroutine.
I have a situation - I'm using the asyncio package with Python 3.x, and persisting data in a with block, something like this:
test_repo = TestRepository()
with (yield from test_repo):
res = yield from test_repo.get_by_lim_off(
page_size=int(length),
offset=start,
customer_name=customer_name,
customer_phone=customer_phone,
return_type=return_type
)
I need to get res data in the with block, but persistence and fetching data should happen when I exit from the with block. How can I achieve this?
This behavior is only supported in Python 3.5+, via asynchronous context managers (__aenter__/__aexit__), and async with, both of which were added in PEP 492:
class TestRepository:
# All your normal methods go here
async def __aenter__(self):
# You can call coroutines here
await self.some_init()
async def __aexit__(self, exc_type, exc, tb):
# You can call coroutines here
await self.do_persistence()
await self.fetch_data()
async def do_work():
test_repo = TestRepository()
async with test_repo:
res = await test_repo.get_by_lim_off(
page_size=int(length),
offset=start,
customer_name=customer_name,
customer_phone=customer_phone,
return_type=return_type
)
asyncio.get_event_loop().run_until_complete(do_work())
Prior to 3.5, you have to use a try/finally block with explicit calls to the init/cleanup coroutines, unfortunately:
#asyncio.coroutine
def do_work():
test_repo = TestRepository()
yield from test_repo.some_init()
try:
res = yield from test_repo.get_by_lim_off(
page_size=int(length),
offset=start,
customer_name=customer_name,
customer_phone=customer_phone,
return_type=return_type
)
finally:
yield from test_repo.do_persistence()
yield from test_repo.fetch_data()