Graphene run all resolvers in a context manager - python

Aiohttp provides a context manager to create client session. It's recommended to use one session per many http queries ( in most cases per application ) https://aiohttp.readthedocs.io/en/stable/client_quickstart.html#make-a-request
But graphene uses resolvers needs to be declared as a class method:
http://docs.graphene-python.org/en/latest/execution/execute/
For graphene also exists asyncio executor https://github.com/graphql-python/aiohttp-graphql
Is there any way to execute all resolvers in async with context?
Example:
async def get_task(session, api_url, id):
""" Function to resolve task from rest API"""
async with session.get(api_url+id) as response:
return await response.json()
class Query(graphene.ObjectType):
task = graphene.Field(Task)
async def resolve_task(self, info, session, id=1):
"""This method needs to be run
in async with aiohttp.ClientSession() as session:
context"""
return await get_task(session, url, id)
I think about decorator or middleware with global variable, but it looks ugly. Is there more estate and pythonic way to do it?

I would use context for that. See https://docs.graphene-python.org/en/latest/execution/execute/
Example:
import aiohttp
import asyncio
import graphene
from graphql.execution.executors.asyncio import AsyncioExecutor
from pprint import pprint
async def get_task(session, api_url, id):
async with session.get(api_url + str(id)) as response:
print(f'> Retrieving {id} using session {session}')
return await response.json()
class Query(graphene.ObjectType):
task = graphene.Field(
type=graphene.types.json.JSONString,
id=graphene.Int())
async def resolve_task(self, info, id=1):
return await get_task(
session=info.context['session'],
api_url=info.context['api_url'],
id=id)
schema = graphene.Schema(query=Query)
async def main():
query = '''
query q1 {
t1: task(id: 1)
t2: task(id: 2)
}
'''
async with aiohttp.ClientSession() as session:
res = await schema.execute(
query,
context={
'session': session,
'api_url': 'https://jsonplaceholder.typicode.com/todos/',
},
executor=AsyncioExecutor(loop=asyncio.get_running_loop()),
return_promise=True)
assert not res.errors, repr(res.errors)
pprint(res.data, width=150)
if __name__ == '__main__':
asyncio.run(main())
Output:
$ python3 example.py
> Retrieving 2 using session <aiohttp.client.ClientSession object at 0x10917bfd0>
> Retrieving 1 using session <aiohttp.client.ClientSession object at 0x10917bfd0>
OrderedDict([('t1', '{"userId": 1, "id": 1, "title": "delectus aut autem", "completed": false}'),
('t2', '{"userId": 1, "id": 2, "title": "quis ut nam facilis et officia qui", "completed": false}')])

Related

Python: How to use asyncio with huge list of urls to send asynchronous requests?

I have a huge list of urls that I need to send request and retrieve a json data.But the problem is Since the list with the urls is too big to load it at once, I would like to read the urls one by one, and each time the url is loaded, it should start a request. My code work for small list(~20k) with no problem but I got stuck with a huge list.
It would be great if you could tell me how to change my code, to get it to send asynchronous requests for each url of the urls list. Thank you in advance.
Here is my code:
import json
import urllib
from urllib.parse import quote
import time
import asyncio
import aiohttp
import json
from json.decoder import JSONDecodeError
urls = ["url_1", "url_2". "url_3"........"url_3,000,000"]
START = time.monotonic()
class RateLimiter:
RATE = 20
MAX_TOKENS = 10
def __init__(self, client):
self.client = client
self.tokens = self.MAX_TOKENS
self.updated_at = time.monotonic()
async def get(self, *args, **kwargs):
await self.wait_for_token()
now = time.monotonic() - START
print(f'{now:.0f}s: ask {args[0]}')
return self.client.get(*args, **kwargs)
async def wait_for_token(self):
while self.tokens < 1:
self.add_new_tokens()
await asyncio.sleep(0.1)
self.tokens -= 1
def add_new_tokens(self):
now = time.monotonic()
time_since_update = now - self.updated_at
new_tokens = time_since_update * self.RATE
if self.tokens + new_tokens >= 1:
self.tokens = min(self.tokens + new_tokens, self.MAX_TOKENS)
self.updated_at = now
async def fetch_one(client, url):
# Watch out for the extra 'await' here!
async with await client.get(url) as resp:
for response in resp:
try:
results = await response.json()
try:
answer = results['results'][0]['locations']
output = {
"Provided location" : results['results'][0]['providedLocation'].get('location'),
"City": answer[0].get('adminArea5'),
"State" : answer[0].get('adminArea3'),
"Country": answer[0].get('adminArea1')
}
json_results.append(output)
except (IndexError,JSONDecodeError):
output = {
"Provided location": 'null',
"City": 'null',
"State" : 'null',
"Country":'null'
}
json_results.append(output)
except:
output = {
"Provided location": None,
"City": 'null',
"State" : 'null',
"Country":'null'
}
json_results.append(output)
now = time.monotonic() - START
async def main():
async with aiohttp.ClientSession() as client:
client = RateLimiter(client)
tasks = [asyncio.ensure_future(fetch_one(client, url)) for url in urls]
await asyncio.gather(*tasks)
if __name__ == '__main__':
asyncio.run(main())

GOG Galaxy 2.0 PSN account (offline)

I have been trying to connect my PSN account to Galaxy 2.0 for a while now and it keeps telling me that it's (offline), I have tried the solutions that come up first on a google search and they didn't work for me.
All the solutions I found have a different code (I don't have it) than the one the app installs by default or the one I can find on Github.
I will provide the code that I have which is the one the app installed by default.
If you have any solutions or know how to solve this please help me, thanks in advance.
This is the log in case it is needed
https://www.mediafire.com/file/3b3921wgyq9357m/plugin-psn-38087aea-3c30-439f-867d-ddf9fae8fe6f.log/file
import sys
from typing import List, Any, AsyncGenerator
from galaxy.api.consts import Platform, LicenseType
from galaxy.api.errors import InvalidCredentials
from galaxy.api.plugin import Plugin, create_and_run_plugin
from galaxy.api.types import Authentication, Game, NextStep, SubscriptionGame, \
Subscription, LicenseInfo
from http_client import HttpClient
from http_client import OAUTH_LOGIN_URL, OAUTH_LOGIN_REDIRECT_URL
from psn_client import PSNClient
from version import __version__
AUTH_PARAMS = {
"window_title": "Login to My PlayStation\u2122",
"window_width": 536,
"window_height": 675,
"start_uri": OAUTH_LOGIN_URL,
"end_uri_regex": "^" + OAUTH_LOGIN_REDIRECT_URL + ".*"
}
logger = logging.getLogger(__name__)
class PSNPlugin(Plugin):
def __init__(self, reader, writer, token):
super().__init__(Platform.Psn, __version__, reader, writer, token)
self._http_client = HttpClient()
self._psn_client = PSNClient(self._http_client)
logging.getLogger("urllib3").setLevel(logging.FATAL)
async def _do_auth(self, cookies):
if not cookies:
raise InvalidCredentials()
self._http_client.set_cookies_updated_callback(self._update_stored_cookies)
self._http_client.update_cookies(cookies)
await self._http_client.refresh_cookies()
user_id, user_name = await self._psn_client.async_get_own_user_info()
if user_id == "":
raise InvalidCredentials()
return Authentication(user_id=user_id, user_name=user_name)
async def authenticate(self, stored_credentials=None):
stored_cookies = stored_credentials.get("cookies") if stored_credentials else None
if not stored_cookies:
return NextStep("web_session", AUTH_PARAMS)
auth_info = await self._do_auth(stored_cookies)
return auth_info
async def pass_login_credentials(self, step, credentials, cookies):
cookies = {cookie["name"]: cookie["value"] for cookie in cookies}
self._store_cookies(cookies)
return await self._do_auth(cookies)
def _store_cookies(self, cookies):
credentials = {
"cookies": cookies
}
self.store_credentials(credentials)
def _update_stored_cookies(self, morsels):
cookies = {}
for morsel in morsels:
cookies[morsel.key] = morsel.value
self._store_cookies(cookies)
async def get_subscriptions(self) -> List[Subscription]:
is_plus_active = await self._psn_client.get_psplus_status()
return [Subscription(subscription_name="PlayStation PLUS", end_time=None, owned=is_plus_active)]
async def get_subscription_games(self, subscription_name: str, context: Any) -> AsyncGenerator[List[SubscriptionGame], None]:
yield await self._psn_client.get_subscription_games()
async def get_owned_games(self):
def game_parser(title):
return Game(
game_id=title["titleId"],
game_title=title["name"],
dlcs=[],
license_info=LicenseInfo(LicenseType.SinglePurchase, None)
)
def parse_played_games(titles):
return [{"titleId": title["titleId"], "name": title["name"]} for title in titles]
purchased_games = await self._psn_client.async_get_purchased_games()
played_games = parse_played_games(await self._psn_client.async_get_played_games())
unique_all_games = {game['titleId']: game for game in played_games + purchased_games}.values()
return [game_parser(game) for game in unique_all_games]
async def shutdown(self):
await self._http_client.close()
def main():
create_and_run_plugin(PSNPlugin, sys.argv)
if __name__ == "__main__":
main()```

Testing in FastAPI using Tortoise-ORM

I'm trying to write some async tests in FastAPI using Tortoise ORM under Python 3.8 but I keep getting the same errors (seen at the end). I've been trying to figure this out for the past few days but somehow all my recent efforts in creating tests have been unsuccessful.
I'm following the fastapi docs and tortoise docs on this one.
main.py
# UserPy is a pydantic model
#app.post('/testpost')
async def world(user: UserPy) -> UserPy:
await User.create(**user.dict())
# Just returns the user model
return user
simple_test.py
from fastapi.testclient import TestClient
from httpx import AsyncClient
#pytest.fixture
def client1():
with TestClient(app) as tc:
yield tc
#pytest.fixture
def client2():
initializer(DATABASE_MODELS, DATABASE_URL)
with TestClient(app) as tc:
yield tc
finalizer()
#pytest.fixture
def event_loop(client2): # Been using client1 and client2 on this
yield client2.task.get_loop()
# The test
#pytest.mark.asyncio
def test_testpost(client2, event_loop):
name, age = ['sam', 99]
data = json.dumps(dict(username=name, age=age))
res = client2.post('/testpost', data=data)
assert res.status_code == 200
# Sample query
async def getx(id):
return await User.get(pk=id)
x = event_loop.run_until_complete(getx(123))
assert x.id == 123
# end of code
My errors vary on whether I'm usinng client1 or client2
Using client1 error
RuntimeError: Task <Task pending name='Task-9' coro=<TestClient.wait_shutdown() running at <my virtualenv path>/site-packages/starlette/testclient.py:487> cb=[_run_until_complete_cb() at /usr/lib/python3.8/asyncio/base_events.py:184]> got Future <Future pending> attached to a different loop
Using client2 error
asyncpg.exceptions.ObjectInUseError: cannot drop the currently open database
Oh, I've also tried using httpx.AsyncClient but still no success (and more errors). Any ideas because I'm out of my own.
It cost me about one hour to make the async test worked. Here is the example:
(Python3.8+ is required)
conftest.py
import pytest
from httpx import AsyncClient
from tortoise import Tortoise
from main import app
DB_URL = "sqlite://:memory:"
async def init_db(db_url, create_db: bool = False, schemas: bool = False) -> None:
"""Initial database connection"""
await Tortoise.init(
db_url=db_url, modules={"models": ["models"]}, _create_db=create_db
)
if create_db:
print(f"Database created! {db_url = }")
if schemas:
await Tortoise.generate_schemas()
print("Success to generate schemas")
async def init(db_url: str = DB_URL):
await init_db(db_url, True, True)
#pytest.fixture(scope="session")
def anyio_backend():
return "asyncio"
#pytest.fixture(scope="session")
async def client():
async with AsyncClient(app=app, base_url="http://test") as client:
print("Client is ready")
yield client
#pytest.fixture(scope="session", autouse=True)
async def initialize_tests():
await init()
yield
await Tortoise._drop_databases()
settings.py
import os
from dotenv import load_dotenv
load_dotenv()
DB_NAME = "async_test"
DB_URL = os.getenv(
"APP_DB_URL", f"postgres://postgres:postgres#127.0.0.1:5432/{DB_NAME}"
)
ALLOW_ORIGINS = [
"http://localhost",
"http://localhost:8080",
"http://localhost:8000",
"https://example.com",
]
main.py
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from models.users import User, User_Pydantic, User_Pydantic_List, UserIn_Pydantic
from settings import ALLOW_ORIGINS, DB_URL
from tortoise.contrib.fastapi import register_tortoise
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=ALLOW_ORIGINS,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
#app.post("/testpost", response_model=User_Pydantic)
async def world(user: UserIn_Pydantic):
return await User.create(**user.dict())
#app.get("/users", response_model=User_Pydantic_List)
async def user_list():
return await User.all()
register_tortoise(
app,
config={
"connections": {"default": DB_URL},
"apps": {"models": {"models": ["models"]}},
"use_tz": True,
"timezone": "Asia/Shanghai",
"generate_schemas": True,
},
)
models/base.py
from typing import List, Set, Tuple, Union
from tortoise import fields, models
from tortoise.queryset import Q, QuerySet
def reduce_query_filters(args: Tuple[Q, ...]) -> Set:
fields = set()
for q in args:
fields |= set(q.filters)
c: Union[List[Q], Tuple[Q, ...]] = q.children
while c:
_c: List[Q] = []
for i in c:
fields |= set(i.filters)
_c += list(i.children)
c = _c
return fields
class AbsModel(models.Model):
id = fields.IntField(pk=True)
created_at = fields.DatetimeField(auto_now_add=True, description="Created At")
updated_at = fields.DatetimeField(auto_now=True, description="Updated At")
is_deleted = fields.BooleanField(default=False, description="Mark as Deleted")
class Meta:
abstract = True
ordering = ("-id",)
#classmethod
def filter(cls, *args, **kwargs) -> QuerySet:
field = "is_deleted"
if not args or (field not in reduce_query_filters(args)):
kwargs.setdefault(field, False)
return super().filter(*args, **kwargs)
class PydanticMeta:
exclude = ("created_at", "updated_at", "is_deleted")
def __repr__(self):
return f"<{self.__class__.__name__} {self.id}>"
models/users.py
from tortoise.contrib.pydantic import pydantic_model_creator, pydantic_queryset_creator
from .base import AbsModel, fields
class User(AbsModel):
username = fields.CharField(60)
age = fields.IntField()
class Meta:
table = "users"
def __str__(self):
return self.name
User_Pydantic = pydantic_model_creator(User)
UserIn_Pydantic = pydantic_model_creator(User, name="UserIn", exclude_readonly=True)
User_Pydantic_List = pydantic_queryset_creator(User)
models/__init__.py
from .users import User # NOQA: F401
tests/test_users.py
import pytest
from httpx import AsyncClient
from models.users import User
#pytest.mark.anyio
async def test_testpost(client: AsyncClient):
name, age = ["sam", 99]
assert await User.filter(username=name).count() == 0
data = {"username": name, "age": age}
response = await client.post("/testpost", json=data)
assert response.json() == dict(data, id=1)
assert response.status_code == 200
response = await client.get("/users")
assert response.status_code == 200
assert response.json() == [dict(data, id=1)]
assert await User.filter(username=name).count() == 1
Source code of the demo had been post to github:
https://github.com/waketzheng/fastapi-tortoise-pytest-demo.git

A blocked Python async function invocation also block another async function

I use FastAPI to develope data layer APIs accessing SQL Server.
No mater using pytds or pyodbc,
if there is a database transaction caused any request hangs,
all the other requests would be blocked. (even without database operation)
Reproduce:
Intentaionally do a serializable SQL Server session, begin a transaction and do not rollback or commit
INSERT INTO [dbo].[KVStore] VALUES ('1', '1', 0)
begin tran
SET TRANSACTION ISOLATION LEVEL Serializable
SELECT * FROM [dbo].[KVStore]
Send a request to the API with async handler function like this:
def kv_delete_by_key_2_sql():
conn = pytds.connect(dsn='192.168.0.1', database=cfg.kvStore_db, user=cfg.kvStore_uid,
password=cfg.kvStore_upwd, port=1435, autocommit=True)
engine = conn.cursor()
try:
sql = "delete KVStore; commit"
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(engine.execute, sql)
rs = future.result()
j = {
'success': True,
'rowcount': rs.rowcount
}
return jsonable_encoder(j)
except Exception as exn:
j = {
'success': False,
'reason': exn_handle(exn)
}
return jsonable_encoder(j)
#app.post("/kvStore/delete")
async def kv_delete(request: Request, type_: Optional[str] = Query(None, max_length=50)):
request_data = await request.json()
return kv_delete_by_key_2_sql()
And send a request to the API of the same app with async handler function like this:
async def hangit0(request: Request, t: int = Query(0)):
print(t, datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3])
await asyncio.sleep(t)
print(t, datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3])
j = {
'success': True
}
return jsonable_encoder(j)
#app.get("/kvStore/hangit/")
async def hangit(request: Request, t: int = Query(0)):
return await hangit0(request, t)
I expected step.2 would hang and step.3 should directly return after 2 seconds.
However step.3 never return if the transaction doesn't commit or rollback...
How do I make these handler functions work concurrently?
The reason is that rs = future.result() is actually a blocking call - see python docs. Unfortunately, executor.submit() doesn't return an awaitable object (concurrent.futures.Future is different from asyncio.Future.
You can use asyncio.wrap_future which takes concurrent.futures.Future and returns asyncio.Future (see python docs). The new Future object is awaitable thus you can convert your blocking function into an async function.
An Example:
import asyncio
import concurrent.futures
async def my_async():
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(lambda x: x + 1, 1)
return await asyncio.wrap_future(future)
print(asyncio.run(my_async()))
In your code, simply change the rs = future.result() to rs = await asyncio.wrap_future(future) and make the whole function async. That should do the magic, good luck! :)

Asyncio and aiohttp returning task instead of results

I have a script to run parallel requests against an API within a class. However, the results I'm getting is basically a task instead of the actual results. Any reason why?
I mimicked the modified Client code on https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html.
import asyncio
from aiohttp import ClientSession
class Requestor:
async def _async_request(self, url, session, sema_sz=10):
sema = asyncio.Semaphore(sema_sz)
async with sema:
async with session.get(url) as response:
req = await response.json()
return req
async def _async_chunk_request(self, url, chunks, headers=None, sema_sz=10):
async with ClientSession(headers=headers) as session:
futures = [asyncio.ensure_future(self._async_request(url.format(chunk), session, sema_sz)) for chunk in chunks]
responses = asyncio.gather(*futures)
await responses
def get_request(self, url, chunks):
loop = asyncio.get_event_loop()
bulk_req = asyncio.ensure_future(self._async_chunk_request(url, chunks))
loop.run_until_complete(bulk_req)
return bulk_req
bulk_req is actually a Task variable and not the results and shows this in PyCharm, Task finished coro=<Requestor._async_chunk_request() done, defined at ...
When I debug, I see that req has a full and proper response value, so there's no issue with that. I feel like it's something to do with the actual gathering of the futures?
Your _chunk_request does not return anything.
async def _chunk_request(...):
...
...
await responses
I made a toy example trying to mimic your process. If I ended _chunk_request like you did, i got the same result - a finished Task with No results. Changing _chunk_request to return something fixed it:
async def _chunk_request(...):
...
...
return await responses
If you only need the return values from the tasks, get_request should return the result of the loop.run_until_complete() call.
My toy example
import asyncio
import random
from pprint import pprint
async def response(n):
asyncio.sleep(random.choice([1,3,5]))
return f'i am {n}'
async def _request(n):
req = await response(n)
#print(req)
return req
async def _chunk_request(chunks):
futures = [asyncio.ensure_future(_request(chunk)) for chunk in chunks]
#pprint(futures)
responses = asyncio.gather(*futures, return_exceptions=True)
#pprint(responses)
return await responses
def get_request(chunks):
loop = asyncio.get_event_loop()
bulk_req = asyncio.ensure_future(_chunk_request(chunks))
return loop.run_until_complete(bulk_req)
In [7]: result = get_request(range(1,6))
In [8]: print(result)
['i am 1', 'i am 2', 'i am 3', 'i am 4', 'i am 5']

Categories