503 error when downloading wikipedia dumps - python

I have the following script to download (and later on process) Wikipedia's pageviews dumps. I am getting 503 errors on all the pages (whose urls are correct).
import argparse
import aiohttp
import asyncio
import async_timeout
import re
base_url = "http://dumps.wikimedia.org/other/pagecounts-raw/{year}/{year}-{month:02d}/pagecounts-{year}{month:02d}{day:02d}-{hour:02d}0000.gz"
async def downloadFile(semaphore, session, url):
try:
async with semaphore:
with async_timeout.timeout(10):
async with session.get(url) as remotefile:
if remotefile.status == 200:
data = await remotefile.read()
outfile = re.sub("/", "_", url[7:])
with open(outfile, 'wb') as fp:
print('Saving')
fp.write(data)
else:
print(remotefile.status)
return
except Exception as e:
print(e)
return
async def aux(urls):
sem = asyncio.Semaphore(10)
tasks = []
async with aiohttp.ClientSession() as session:
for url in urls:
print(url)
task = asyncio.ensure_future(downloadFile(sem, session, url))
tasks.append(task)
await asyncio.gather(*tasks)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--year", type=int, default=2016)
parser.add_argument("--month", type=int, default=4)
parser.add_argument("--temp_folder", type=str)
args = parser.parse_args()
urls = []
for day in range(1, 32)[:3]:
for hour in range(24)[:2]:
urls.append(base_url.format(
year=args.year, month=args.month, day=day, hour=hour))
loop = asyncio.get_event_loop()
asyncio.ensure_future(aux(urls))
loop.run_until_complete(aux(urls))
if __name__ == "__main__":
main()
The error I'm getting is:
<ClientResponse(https://dumps.wikimedia.org/other/pagecounts-raw/2016/2016-04/pagecounts-20160402-000000.gz) [503 Service Temporarily Unavailable]>
<CIMultiDictProxy('Server': 'nginx/1.13.6', 'Date': 'Wed, 24 Oct 2018 21:27:58 GMT', 'Content-Type': 'text/html; charset=utf-8', 'Content-Length': '213', 'Connection': 'keep-alive', 'Strict-Transport-Security': 'max-age=106384710; includeSubDomains; preload')>
But this is really weird as copy-pasting the same urls on my chrome browser does the job!

I played with code and I can say following:
Wikipedia doesn't allow multiple requests per IP
Timeout 10 for this url is too low
To make your code work:
Change asyncio.Semaphore(10) to asyncio.Semaphore(1)
Change async_timeout.timeout(10) to async_timeout.timeout(120)
Completely remove line asyncio.ensure_future(aux(urls)), you don't need it since you pass you pass aux(urls) to run_until_complete
Final version that successfully downloads single archive:
import argparse
import aiohttp
import asyncio
import async_timeout
import re
base_url = "http://dumps.wikimedia.org/other/pagecounts-raw/{year}/{year}-{month:02d}/pagecounts-{year}{month:02d}{day:02d}-{hour:02d}0000.gz"
async def downloadFile(semaphore, session, url):
try:
async with semaphore:
with async_timeout.timeout(120):
async with session.get(url, ssl=False) as remotefile:
if remotefile.status == 200:
data = await remotefile.read()
outfile = re.sub("/", "_", url[7:])
with open(outfile, 'wb') as fp:
print('Saving')
fp.write(data)
else:
print('status:', remotefile.status)
return
except Exception as e:
print('exception:', type(e), str(e))
return
async def aux(urls):
sem = asyncio.Semaphore(1)
tasks = []
async with aiohttp.ClientSession() as session:
for url in urls:
print('url:', url)
task = asyncio.ensure_future(downloadFile(sem, session, url))
tasks.append(task)
await asyncio.gather(*tasks)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--year", type=int, default=2016)
parser.add_argument("--month", type=int, default=4)
parser.add_argument("--temp_folder", type=str)
args = parser.parse_args()
urls = []
for day in range(1, 32)[:1]:
for hour in range(24)[:1]:
urls.append(base_url.format(
year=args.year, month=args.month, day=day, hour=hour))
loop = asyncio.get_event_loop()
loop.run_until_complete(aux(urls))
if __name__ == "__main__":
main()

Related

Python, Concurrency and asyncio: Problem adding a rotating proxy

I'm creating an optimized multi-threading app using asyncio and want to add a rotating proxy into the mix.
Starting with a sample taken from this outstanding article:
Speed Up Your Python Program With Concurrency
I added a rotating proxy and it stopped working. The code simply exits the function after touching the line for the proxy.
This little snippet of code works, but not when added to the main script as shown in the screenshot above.
import asyncio
import random as rnd
async def download_site():
proxy_list = [
('38.39.205.220:80'),
('38.39.204.100:80'),
('38.39.204.101:80'),
('38.39.204.94:80')
]
await asyncio.sleep(1)
proxy = rnd.choice(proxy_list)
print(proxy)
asyncio.run(download_site())
And here's the full sample:
import asyncio
import time
import aiohttp
# Sample code taken from here:
# https://realpython.com/python-concurrency/#asyncio-version
# Info for adding headers for the proxy (Scroll toward the bottom)
# https://docs.aiohttp.org/en/stable/client_advanced.html
# Good read to possible improve performance on large lists of URLs
# https://asyncio.readthedocs.io/en/latest/webscraper.html
# RUN THIS METHOD TO SEE HOW IT WORKS.
# # Original Code (working...)
# async def download_site(session, url):
# async with session.get(url, proxy="http://proxy.com") as response:
# print("Read {0} from {1}".format(response.content_length, url))
def get_proxy(self):
proxy_list = [
(754, '38.39.205.220:80'),
(681, '38.39.204.100:80'),
(682, '38.39.204.101:80'),
(678, '38.39.204.94:80')
]
proxy = random.choice(proxy_list)
print(proxy[1])
return proxy
async def download_site(session, url):
proxy_list = [
('38.39.205.220:80'),
('38.39.204.100:80'),
('38.39.204.101:80'),
('38.39.204.94:80')
]
await asyncio.sleep(1)
proxy = rnd.choice(proxy_list)
print(proxy)
async with session.get(url, proxy="http://" + proxy) as response:
print("Read {0} from {1}".format(response.content_length, url))
async def download_all_sites(sites):
async with aiohttp.ClientSession() as session:
tasks = []
for url in sites:
task = asyncio.ensure_future(download_site(session, url))
tasks.append(task)
await asyncio.gather(*tasks, return_exceptions=True)
# Modified to loop thru only 1 URL to make debugging simple
if __name__ == "__main__":
sites = [
"https://www.jython.org",
# "http://olympus.realpython.org/dice",
] #* 80
start_time = time.time()
asyncio.get_event_loop().run_until_complete(download_all_sites(sites))
duration = time.time() - start_time
print(f"Downloaded {len(sites)} sites in {duration} seconds")
Thank you for any help you can offer.
You use return_exceptions=True but you don't actually check the returned results for errors. You can use asyncio.as_completed to handle exceptions and get the earliest next result:
import asyncio
import random
import traceback
import aiohttp
URLS = ("https://stackoverflow.com",)
TIMEOUT = 5
PROXIES = (
"http://38.39.205.220:80",
"http://38.39.204.100:80",
"http://38.39.204.101:80",
"http://38.39.204.94:80",
)
def get_proxy():
return random.choice(PROXIES)
async def download_site(session, url):
proxy = get_proxy()
print(f"Got proxy: {proxy}")
async with session.get(url, proxy=f"{proxy}", timeout=TIMEOUT) as resp:
print(f"{url}: {resp.status}")
return await resp.text()
async def main():
tasks = []
async with aiohttp.ClientSession() as session:
for url in URLS:
tasks.append(asyncio.create_task(download_site(session, url)))
for coro in asyncio.as_completed(tasks):
try:
html = await coro
except Exception:
traceback.print_exc()
else:
print(len(html))
if __name__ == "__main__":
asyncio.run(main())

How can I asynchronously request URLs in a growing queue with asyncio?

I have X initial urls that are paginated - in order to get the next set of data, I have to grab the next url from the response header until there is no next url. I am having trouble getting this going right. I'm trying a queue approach that I found here.
import asyncio
from aiohttp import ClientSession, TCPConnector
async def get(session, url):
headers = {
'Authorization': 'Bearer KEY',
}
async with session.get(url, headers=headers) as response:
json = await response.json()
return json, response
async def process(session, url, q):
try:
try:
views, response = await get(session, url)
scode = response.status
if scode == 404:
return
except Exception as e:
print(e)
return
try:
await q.put(str(response.links["next"]["url"]))
except:
pass
<do something with views>
except Exception as e:
print(e)
async def fetch_worker(session, q):
while True:
url = await q.get()
try:
await process(session, url, q)
except Exception as e:
print(e)
finally:
q.task_done()
async def d():
<code to query and put data into stdrows>
connector = TCPConnector(limit=500)
async with ClientSession(connector=connector) as session:
url = '<some base url>'
for i in range(500):
tasks.append(asyncio.create_task(fetch_worker(session, url_queue)))
for row in stdrows:
await url_queue.put(url.format(row[1]))
await asyncio.gather(*tasks)
await url_queue.join()
asyncio.run(d())
This appears not to be going at 500 tasks/sec. is it even possible to get to this rate without knowing all the URLs ahead of time? I am hoping to fetch the next url from whatever initial url (or from its paginated url) while i work with views.

Using aiohttp to get the status of a number of websites

I have this code i am using to get the status of a list of websites.
import aiohttp
import asyncio
import json
import sys
import time
async def get_statuses(websites):
statuses = {}
tasks = [get_website_status(website) for website in websites]
for status in await asyncio.gather(*tasks):
if not statuses.get(status):
statuses[status] = 0
statuses[status] += 1
print(json.dumps(statuses))
async def get_website_status(url):
response = await aiohttp.get(url)
status = response.status
response.close()
return status
if __name__ == '__main__':
with open(sys.argv[1], 'r') as f:
websites = f.read().splitlines()
t0 = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(get_statuses(websites))
t1 = time.time()
print("getting website statuses took {0:.1f} seconds".format(t1-t0))
and since get is depreciated await aiohttp.get(url) i edited the code as such
import aiohttp
import asyncio
import json
import sys
import time
async def fetch(session, url):
async with session.get(url) as response:
return await response.text()
async def get_statuses(websites):
statuses = {}
tasks = [get_website_status(website) for website in websites]
for status in await asyncio.gather(*tasks):
if not statuses.get(status):
statuses[status] = 0
statuses[status] += 1
print(json.dumps(statuses))
async def get_website_status(url):
async with aiohttp.ClientSession() as session:
response = await fetch(session, url)
#response = await aiohttp.get(url)
status = response.status
response.close()
return status
if __name__ == '__main__':
with open(sys.argv[1], 'r') as f:
websites = f.read().splitlines()
t0 = time.time()
loop = asyncio.get_event_loop()
loop.run_until_complete(get_statuses(websites))
t1 = time.time()
print("getting website statuses took {0:.1f} seconds".format(t1-t0))
I copied the session code from the docs https://aiohttp.readthedocs.io/en/stable/
However when i run my code i get this error:
c:\asyncio>a.py list.txt
Traceback (most recent call last):
File "C:\asyncio\a.py", line 35, in <module>
loop.run_until_complete(get_statuses(websites))
File "C:\Users\user\AppData\Local\Programs\Python\Python37\lib\asyncio\base_ev
ents.py", line 579, in run_until_complete
return future.result()
File "C:\asyncio\a.py", line 14, in get_statuses
for status in await asyncio.gather(*tasks):
File "C:\asyncio\a.py", line 25, in get_website_status
status = response.status
AttributeError: 'str' object has no attribute 'status'
c:\asyncio>
here is a sample list.txt
https://facebook.com/
https://twitter.com/
https://google.com/
https://youtube.com/
https://linkedin.com/
https://instagram.com/
https://pinterest.com/
get_website_status routine delegates call to fetch function which returns text content response.text(), not the response itself.
That's why , in further, response.status throws an obvious error.
In case if response content is not needed, to fix the error, change fetch function to return the response object:
async def fetch(session, url):
response = await session.get(url)
return response

Python asyncio / aiohttp error

I am writing a simple producer/consumer app to call multiple URL's asynchronously.
In the following code if I set the conn_count=1, and add 2 items to the Queue it works fine as only one consumer is created. But if I make conn_count=2 and add 4 items to the Queue only 3 request are being made. The other request fails with ClientConnectorError.
Can you please help be debug the reason for failure with multiple consumers? Thank You.
I am using a echo server I created.
Server:
import os
import logging.config
import yaml
from aiohttp import web
import json
def start():
setup_logging()
app = web.Application()
app.router.add_get('/', do_get)
app.router.add_post('/', do_post)
web.run_app(app)
async def do_get(request):
return web.Response(text='hello')
async def do_post(request):
data = await request.json()
return web.Response(text=json.dumps(data))
def setup_logging(
default_path='logging.yaml',
default_level=logging.INFO,
env_key='LOG_CFG'
):
path = default_path
value = os.getenv(env_key, None)
if value:
path = value
if os.path.exists(path):
with open(path, 'rt') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
else:
logging.basicConfig(level=default_level)
if __name__ == '__main__':
start()
Client:
import asyncio
import collections
import json
import sys
import async_timeout
from aiohttp import ClientSession, TCPConnector
MAX_CONNECTIONS = 100
URL = 'http://localhost:8080'
InventoryAccount = collections.namedtuple("InventoryAccount", "op_co customer_id")
async def produce(queue, num_consumers):
for i in range(num_consumers * 2):
await queue.put(InventoryAccount(op_co=i, customer_id=i * 100))
for j in range(num_consumers):
await queue.put(None)
async def consumer(n, queue, session, responses):
print('consumer {}: starting'.format(n))
while True:
try:
account = await queue.get()
if account is None:
queue.task_done()
break
else:
print(f"Consumer {n}, Updating cloud prices for account: opCo = {account.op_co!s}, customerId = {account.customer_id!s}")
params = {'opCo': account.op_co, 'customerId': account.customer_id}
headers = {'content-type': 'application/json'}
with async_timeout.timeout(10):
print(f"Consumer {n}, session state " + str(session.closed))
async with session.post(URL,
headers=headers,
data=json.dumps(params)) as response:
assert response.status == 200
responses.append(await response.text())
queue.task_done()
except:
e = sys.exc_info()[0]
print(f"Consumer {n}, Error updating cloud prices for account: opCo = {account.op_co!s}, customerId = {account.customer_id!s}. {e}")
queue.task_done()
print('consumer {}: ending'.format(n))
async def start(loop, session, num_consumers):
queue = asyncio.Queue(maxsize=num_consumers)
responses = []
consumers = [asyncio.ensure_future(loop=loop, coro_or_future=consumer(i, queue, session, responses)) for i in range(num_consumers)]
await produce(queue, num_consumers)
await queue.join()
for consumer_future in consumers:
consumer_future.cancel()
return responses
async def run(loop, conn_count):
async with ClientSession(loop=loop, connector=TCPConnector(verify_ssl=False, limit=conn_count)) as session:
result = await start(loop, session, conn_count)
print("Result: " + str(result))
if __name__ == '__main__':
conn_count = 2
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(run(loop, conn_count))
finally:
loop.close()
Reference:
https://pymotw.com/3/asyncio/synchronization.html
https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html
https://hackernoon.com/asyncio-for-the-working-python-developer-5c468e6e2e8e

Aiohttp not performing any requests

First of all heres the code:
import random
import asyncio
from aiohttp import ClientSession
import csv
headers =[]
def extractsites(file):
sites = []
readfile = open(file, "r")
reader = csv.reader(readfile, delimiter=",")
raw = list(reader)
for a in raw:
sites.append((a[1]))
return sites
async def bound_fetch(sem, url):
async with sem:
print("doing request for "+ url)
async with ClientSession() as session:
async with session.get(url) as response:
responseheader = await response.headers
print(headers)
async def run():
urls = extractsites("cisco-umbrella.csv")
tasks = []
sem = asyncio.Semaphore(100)
for i in urls:
task = asyncio.ensure_future(bound_fetch(sem, "http://"+i))
tasks.append(task)
headers = await asyncio.wait(*tasks)
print(headers)
def main():
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(run())
loop.run_until_complete(future)
if __name__ == '__main__':
main()
As per my last question I'm following this blog post:
https://pawelmhm.github.io/asyncio/python/aiohttp/2016/04/22/asyncio-aiohttp.html
I tried to adapt my code as close as possible to the example implementation but this code is still not making any requests and printing the headers in bound_headers as I wish.
Can somebody spot whats wrong with this code ?
response.headers is a regular property, no need to put await before the call
asyncio.wait on other hand accepts a list of futures and returns (done, pending) pair.
Looks like you should replace await wait() call with await asyncio.gather(*tasks) (gather doc)

Categories