this is my code:
payload = {'text': input_text,
'question_info': '',
'include_intonation': 1,
'stress_version': stress_version,
'include_fluency': 1,
'include_ielts_subscore': 1}
files = [
('user_audio_file', open(saved_file_path, 'rb'))
]
headers = {}
form = aiohttp.FormData()
for key, value in payload.items():
form.add_field(key, value)
form.add_field('user_audio_file', open(saved_file_path, 'rb'))
async with aiohttp.ClientSession() as session:
async with session.post(url,data=form) as response:
response_json = await response.json()
and I want to send file with aiohttp to URL but I got this exception
'Can not serialize value type: <class \'int\'> headers: {} value: 1'
I do that with requests library like this
response = request(
"POST", url, headers=headers, data=payload, files=files)
response_json = response.json()
but I decided to use aiohttp because it shoud be async
please help me for this decision
thanks
you need to serialize payload data using data= b'form'
e.g.
async with aiohttp.ClientSession() as session:
async with session.post(url,data=b'form') as response:
response_json = await response.json()
By default session uses python’s standard json module for serialization. But it is possible to use different serializer. ClientSession accepts json_serialize parameter. Then you dont need to explicitly serialize your payload.
import ujson
async with aiohttp.ClientSession(
json_serialize=ujson.dumps) as session:
await session.post(url,data=form) as response:
response_json = await response.json()
....
Warning: above code is not tested.
Update
I tried setting up a local http server and upload a json. I am getting past your error and able to upload data. Are your serializing form data using b'form'?
As per this GitHub issue discussion, we need asyncio to control async event loop and execute async/await through a function.
Here's relevant code.
async def uploadForm():
async with aiohttp.ClientSession() as session:
async with session.post(url,data=b'form') as response: #Converting form to binary payload using b'form'
response_json = await response.json(content_type='text/html')
print(response_json)
def main():
loop = asyncio.get_event_loop()
loop.run_until_complete(uploadForm())
loop.close()
if __name__ == '__main__':
main()
Hope this helps you.
Related
function needs to be tested:
api.py
async def __call__(self, http_request: Request):
form = await http_request.form()
garment_image_filename = form["Image1"].filename
garment_image_contents = await form["Image1"].read()
this is invoked here.
consumeApi.py
form_data = aiohttp.FormData()
form_data.add_field("Image1", open(ImgaePath, 'rb'))
session_timeout = aiohttp.ClientTimeout(total=None, sock_connect=10.0, sock_read=120)
async with aiohttp.ClientSession(timeout=session_timeout) as session:
async with session.post(
"http://someurl",
data = form_data,
timeout=120
) as resp:
result = await resp.read()
I am facing a challenge to create a fake HTTP request for call function so that it catches formData in http_request.form()
How can I mock the session? to test the call function ?
I have checked responses, but I'm unable to figure out how to send formData through it?
P.S : Please let me know if any more details required.
I am using aiohttp and asyncio to run multiple requests asynchronously, the problem is when i try to print the data i receive i end up getting the data of another request in the task queue. I have tried to debug this and look at the docs for any answers but i am unable to solve this problem.
here's my code:
from time import sleep
import aiohttp
import asyncio
async def search(query, session):
search_params = {
"query":query
}
async with session.get(
url,
params=search_params,
) as response:
json_response = await response.json()
data = json_response["data"]
print(data)
"""the above line always prints the data from the response of the first task to get executed
and not the current data from this request with a different query"""
async def main():
async with aiohttp.ClientSession() as session:
await init_session(session)
await enable_search(session)
while True:
tasks = [asyncio.create_task(search(session=session, query)) for query in inputs]
await asyncio.gather(*tasks)
sleep(5)
if __name__ == "__main__":
asyncio.run(main())
i work on reverse proxy based on fastapi. I want transparenty send data requested by AsyncClient. I have problem with gziped pages. Please can you help me, how to prevent default ungzipping of resp.content on this example?
#app.get("/{path:path}")
async def _get ( path: str, request: Request ):
url = await my_proxy_logic (path, request)
async with httpx.AsyncClient() as client:
req = client.build_request("GET", url)
resp = await client.send(req, stream=False)
return Response( status_code=resp.status_code, headers=resp.headers, content=resp.content)```
It is possible to extract undecoded data from httpx response only in case of streaming mode stream=True or httpx.stream. In the example below, I collect the entire response using aiter_raw and return it from the path operation. Keep in mind that the entire response is loaded into memory, if you want to avoid this use fastapi StreamingResponse
import httpx
from fastapi import FastAPI, Request, Response
app = FastAPI()
#app.get("/pass")
async def root(request: Request):
async with httpx.AsyncClient() as client:
req = client.build_request('GET', 'http://httpbin.org/gzip')
resp = await client.send(req, stream=True)
return Response(status_code=resp.status_code, headers=resp.headers,
content=b"".join([part async for part in resp.aiter_raw()]))
I have the static url, headers, and data.
Is it possible to make million post requests simultaneously with python?
This is the file.py:
import json
import requests
url = "https://abcd.com"
headers = "headers"
body = "body"
resp = requests.post(url, headers=headers, data=body)
json_resp = json.loads(resp.content)["data"]
print(json_resp)
You might want to use some python tools for that such as:
https://locust.io/
Your file would look like:
from locust import HttpUser, task, between
class QuickstartUser(HttpUser):
#task
def task_name(self):
self.client.post(url, headers=headers, data=body)
You could feed it to locust in such a way:
locust --headless --users <number_of_user> -f <your_file.py>
You can do this in several ways, which is the best method and idea of async work
The second method is ThreadPoolExecutor, which I do not highly recommend
there's a example for do this.
# modified fetch function with semaphore
import random
import asyncio
from aiohttp import ClientSession
async def fetch(url, session):
async with session.get(url) as response:
delay = response.headers.get("DELAY")
date = response.headers.get("DATE")
print("{}:{} with delay {}".format(date, response.url, delay))
return await response.read()
async def bound_fetch(sem, url, session):
# Getter function with semaphore.
async with sem:
await fetch(url, session)
async def run(r):
url = "http://localhost:8080/{}"
tasks = []
# create instance of Semaphore
sem = asyncio.Semaphore(1000)
# Create client session that will ensure we dont open new connection
# per each request.
async with ClientSession() as session:
for i in range(r):
# pass Semaphore and session to every GET request
task = asyncio.ensure_future(bound_fetch(sem, url.format(i), session))
tasks.append(task)
responses = asyncio.gather(*tasks)
await responses
number = 10000
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(run(number))
loop.run_until_complete(future)
I'm currently trying to audit a large number of redirect URL handles to make sure that their destinations are still valid.
I'm using aiohttp to go through the large volume in order to produce a report.
try:
with aiohttp.Timeout(timeout):
async with session.get(url) as resp:
return {"Handle URL": url,
"Status Code": resp.status,
"Redirects": resp.url != url,
"Resolving URL": resp.url,
"Success": resp.status == 200,
"Message": ""}
except asyncio.TimeoutError:
return {"Handle URL": url,
"Success": False,
"Message": "Handle server timed out. >{} seconds".format(timeout)}
For the most part, this has been fine for identifying which URL redirect no longer sends to a valid URL. However, I'd really like to know the final address where times out.
Any ideas?
The only way to do it is disabling redirects by allow_redirects=False and performing redirections manually.
async with aiohttp.ClientSession() as session:
async with session.get(URL, allow_redirects=False) as response:
Location = str(response).split("Location': \'")[1].split("\'")[0]
return Location
I don't think it is necessary anymore to parse that string for a Location. Here is a small example.
Local flask server with a redirect:
from flask import Flask, redirect
app = Flask(__name__)
#app.route('/')
def hello_world():
return 'Hello World!'
#app.route('/redirect')
def redir():
return redirect('/')
if __name__ == '__main__':
app.run()
aiohttp request to that redirect:
# coding: utf-8
import asyncio
import aiohttp
async def fetch(URL):
async with aiohttp.ClientSession() as session:
async with session.get(URL, allow_redirects=False) as response:
print(response.url, response.real_url, 'location' in str(response).lower())
async with session.get(URL, allow_redirects=True) as response:
print(response.url, response.real_url, 'location' in str(response).lower())
url = "http://127.0.0.1:5000/redirect"
async def main():
await fetch(local_url)
loop = asyncio.new_event_loop()
loop.run_until_complete(main())
prints:
http://127.0.0.1:5000/redirect http://127.0.0.1:5000/redirect True
http://127.0.0.1:5000/ http://127.0.0.1:5000/ False
According to the docs, the difference between url and real_url is that real_url is the raw string of the original request, not stripped.