For learning purposes I am trying to download all the posts images of a Buzzfeed article.
Here is my code:
import lxml.html
import string
import random
import requests
url ='http://www.buzzfeed.com/mjs538/messages-from-creationists-to-people-who-believe-in-evolutio?bftw'
headers = headers = {
'User-Agent': 'Mozilla/5.0',
'From': 'admin#jhvisser.com'
}
page= requests.get(url)
tree = lxml.html.fromstring(page.content)
#print(soup.prettify()).encode('ascii', 'ignore')
images = tree.cssselect("div.sub_buzz_content img")
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for x in range(size))
for image in images:
with open(id_generator() + '.jpg', 'wb') as handle:
request = requests.get(image.attrib['src'], headers=headers, stream=True)
for block in request.iter_content(1024):
if not block:
break
handle.write(block)
What is retrieved are images all 110 bytes in size, and viewing them is just an empty image. Am I do something wrong in my code here that is causing the issue? I don't have to use requests if there is an easier way to do this.
If you look closely at the source code of the webpage you are trying to crawl, you'll see that the image url's you want are not specified in the src attribute of the img tags, but in the rel:bf_image_src attribute.
Changing image.attrib['src'] to image.attrib['rel:bf_image_src'] should fix your problem.
I didn't manage to replicate your code (it claims that cssselect isn't installed), but this code with BeautifulSoup and urllib2 run smoothly on my computer, and download all 22 pictures.
from itertools import count
from bs4 import BeautifulSoup
import urllib2
from time import sleep
url ='http://www.buzzfeed.com/mjs538/messages-from-creationists-to-people-who-believe-in-evolutio?bftw'
headers = {
'User-Agent': 'Non-commercical crawler, Steinar Lima. Contact: https://stackoverflow.com/questions/21616904/images-downloaded-are-blank-images-instead-of-actual-images'
}
r = urllib2.Request(url, headers=headers)
soup = BeautifulSoup(urllib2.urlopen(r))
c = count()
for div in soup.find_all('div', id='buzz_sub_buzz'):
for img in div.find_all('img'):
print img['rel:bf_image_src']
with open('images/{}.jpg'.format(next(c)), 'wb') as img_out:
req = urllib2.Request(img['rel:bf_image_src'], headers=headers)
img_out.write(urllib2.urlopen(req).read())
sleep(5)
Related
I'm new to Python and would like your advice for the issue I've encountered recently. I'm doing a small project where I tried to scrape a comic website to download a chapter (pictures). However, when printing out the page content for testing (because i tried to use Beautifulsoup.select() and got no result), it only showed a line of html:
'document.cookie="VinaHost-Shield=a7a00919549a80aa44d5e1df8a26ae20"+"; path=/";window.location.reload(true);'
Any help would be really appreciated.
from requests_html import HTMLSession
session = HTMLSession()
res = session.get("https://truyenqqpro.com/truyen-tranh/dao-hai-tac-128-chap-1060.html")
res.html.render()
print(res.content)
I also tried this but the resutl was the same.
import requests, bs4
url = "https://truyenqqpro.com/truyen-tranh/dao-hai-tac-128-chap-1060.html"
res = requests.get(url, headers={"User-Agent": "Requests"})
res.raise_for_status()
# soup = bs4.BeautifulSoup(res.text, "html.parser")
# onePiece = soup.select(".page-chapter")
print(res.content)
update: I installed docker and splash (on Windows 11) and it worked. I included the update code. Thanks Franz and others for yor help.
import os
import requests, bs4
os.makedirs("OnePiece", exist_ok=True)
url = "https://truyenqqpro.com/truyen-tranh/dao-hai-tac-128-chap-1060.html"
res = requests.get("http://localhost:8050/render.html", params={"url": url, "wait": 5})
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, "html.parser")
onePiece = soup.find_all("img", class_="lazy")
for element in onePiece:
imageLink = "https:" + element["data-cdn"]
res = requests.get(imageLink)
imageFile = open(os.path.join("OnePiece", os.path.basename(imageLink)), "wb")
for chunk in res.iter_content(100000):
imageFile.write(chunk)
imageFile.close()
import urllib.request
request_url = urllib.request.urlopen('https://truyenqqpro.com/truyen-tranh/dao-hai-tac-128-chap-1060.html')
print(request_url.read())
it will return html code of the page.
by the way in that html it is loading several images. you need to use regx to trakdown those img urls and download them.
This response means that we need a javascript render that reload the page using this cookie. for you get the content some workaround must be added.
I commonly use splash scrapinhub render engine and putting a sleep in the page just renders ok all the content. Some tools that render in same way are selenium for python or pupitter in JS.
Link for Splash and Pupeteer
I suspect this has happened due to my misunderstanding of how either lxml or html works and I'd appreciate if someone could fill in this blank in my knowledge.
My code is:
url = "https://prnt.sc/ca0000"
response = requests.get(url,headers={'User-Agent': 'Chrome'})
# Navigate to the correct img src.
tree = html.fromstring(response.content)
xpath = '/html/body/div[3]/div/div/img/#src'
imageURL = tree.xpath(xpath)[0]
print(imageURL)
I expect when I do this to get a result such as:
data:image/png;base64,iVBORw0KGgoAAA...((THIS IS REALLY LONG))...Jggg==
Which if I understand correctly is where the image is stored locally on my computer.
However when I run the code I get:
"https://prnt.sc/ca0000"
Why are these different?
Problem is that this page uses javaScript to put data:image/png;base64 ... in place of https://prnt.sc/ca0000 but requests can't use JavaScript.
But there are two img with different scr - first has standard URL to image (https:///....) and other has fake https://prnt.sc/ca0000
So this xpath works for me even without JavaScript
xpath = '//img[#id="screenshot-image"]/#src'
This code get correct url and download image.
import requests
from lxml import html
url = "https://prnt.sc/ca0000"
response = requests.get(url, headers={'User-Agent': 'Chrome'})
tree = html.fromstring(response.content)
image_url = tree.xpath('//img[#id="screenshot-image"]/#src')[0]
print(image_url)
# -- download ---
response = requests.get(image_url, headers={'User-Agent': 'Chrome'})
with open('image.png', 'wb') as fh:
fh.write(response.content)
Result
https://image.prntscr.com/image/797501c08d0a46ae93ff3a477b4f771c.png
I'm trying to download all the PGNs from this site.
I think I have to use urlopen to open each url and then use urlretrieve to download each pgn by accessing it from the download button near the bottom of each game. Do I have to create a new BeautifulSoup object for each game? I'm also unsure of how urlretrieve works.
import urllib
from urllib.request import urlopen, urlretrieve, quote
from bs4 import BeautifulSoup
url = 'http://www.chessgames.com/perl/chesscollection?cid=1014492'
u = urlopen(url)
html = u.read().decode('utf-8')
soup = BeautifulSoup(html, "html.parser")
for link in soup.find_all('a'):
urlopen('http://chessgames.com'+link.get('href'))
There is no short answer to your question. I will show you a complete solution and comment this code.
First, import necessary modules:
from bs4 import BeautifulSoup
import requests
import re
Next, get index page and create BeautifulSoup object:
req = requests.get("http://www.chessgames.com/perl/chesscollection?cid=1014492")
soup = BeautifulSoup(req.text, "lxml")
I strongly advice to use lxml parser, not common html.parser
After that, you should prepare game's links list:
pages = soup.findAll('a', href=re.compile('.*chessgame\?.*'))
You can do it by searching links containing 'chessgame' word in it.
Now, you should prepare function which will download files for you:
def download_file(url):
path = url.split('/')[-1].split('?')[0]
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'wb') as f:
for chunk in r:
f.write(chunk)
And final magic is to repeat all previous steps preparing links for file downloader:
host = 'http://www.chessgames.com'
for page in pages:
url = host + page.get('href')
req = requests.get(url)
soup = BeautifulSoup(req.text, "lxml")
file_link = soup.find('a',text=re.compile('.*download.*'))
file_url = host + file_link.get('href')
download_file(file_url)
(first you search links containing text 'download' in their description, then construct full url - concatenate hostname and path, and finally download file)
I hope you can use this code without correction!
The accepted answer is fantastic but the task is embarrassingly parallel; there's no need to retrieve these sub-pages and files one at a time. This answer shows how to speed things up.
The first step is to use requests.Session() when sending multiple requests to a single host. Quoting Advanced Usage: Session Objects from the requests docs:
The Session object allows you to persist certain parameters across requests. It also persists cookies across all requests made from the Session instance, and will use urllib3's connection pooling. So if you're making several requests to the same host, the underlying TCP connection will be reused, which can result in a significant performance increase (see HTTP persistent connection).
Next, asyncio, multiprocessing or multithreading are available to parallelize the workload. Each has tradeoffs respective to the task at hand and which you choose is likely best determined by benchmarking and profiling. This page offers great examples of all three.
For the purposes of this post, I'll show multithreading. The impact of the GIL shouldn't be too much of a bottleneck because the tasks are mostly IO-bound, consisting of babysitting requests on the air to wait for the response. When a thread is blocked on IO, it can yield to a thread parsing HTML or doing other CPU-bound work.
Here's the code:
import os
import re
import requests
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
def download_pgn(task):
session, host, page, destination_path = task
response = session.get(host + page)
response.raise_for_status()
soup = BeautifulSoup(response.text, "lxml")
game_url = host + soup.find("a", text="download").get("href")
filename = re.search(r"\w+\.pgn", game_url).group()
path = os.path.join(destination_path, filename)
response = session.get(game_url, stream=True)
response.raise_for_status()
with open(path, "wb") as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
def main():
host = "http://www.chessgames.com"
url_to_scrape = host + "/perl/chesscollection?cid=1014492"
destination_path = "pgns"
max_workers = 8
if not os.path.exists(destination_path):
os.makedirs(destination_path)
with requests.Session() as session:
session.headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36"
response = session.get(url_to_scrape)
response.raise_for_status()
soup = BeautifulSoup(response.text, "lxml")
pages = soup.find_all("a", href=re.compile(r".*chessgame\?.*"))
tasks = [
(session, host, page.get("href"), destination_path)
for page in pages
]
with ThreadPoolExecutor(max_workers=max_workers) as pool:
pool.map(download_pgn, tasks)
if __name__ == "__main__":
main()
I used response.iter_content here which is unnecessary on such tiny text files, but is a generalization so the code will handle larger files in a memory-friendly way.
Results from a rough benchmark (the first request is a bottleneck):
max workers
session?
seconds
1
no
126
1
yes
111
8
no
24
8
yes
22
32
yes
16
I am trying to automatically download PDFs from URLs like this to make a library of UN resolutions.
If I use beautiful soup or mechanize to open that URL, I get "Your browser does not support frames" -- and I get the same thing if I use the copy as curl feature in chrome dev tools.
The standard advice for the "Your browser does not support frames" when using mechanize or beautiful soup is to follow the source of each individual frame and load that frame. But if I do so, I get to an error message that the page is not authorized.
How can I proceed? I guess I could try this in zombie or phantom but I would prefer to not use those tools as I am not that familiar with them.
Okay, this was an interesting task to do with requests and BeautifulSoup.
There is a set of underlying calls to un.org and daccess-ods.un.org that are important and set relevant cookies. This is why you need to maintain requests.Session() and visit several urls before getting access to the pdf.
Here's the complete code:
import re
from urlparse import urljoin
from bs4 import BeautifulSoup
import requests
BASE_URL = 'http://www.un.org/en/ga/search/'
URL = "http://www.un.org/en/ga/search/view_doc.asp?symbol=A/RES/68/278"
BASE_ACCESS_URL = 'http://daccess-ods.un.org'
# start session
session = requests.Session()
response = session.get(URL, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36'})
# get frame links
soup = BeautifulSoup(response.text)
frames = soup.find_all('frame')
header_link, document_link = [urljoin(BASE_URL, frame.get('src')) for frame in frames]
# get header
session.get(header_link, headers={'Referer': URL})
# get document html url
response = session.get(document_link, headers={'Referer': URL})
soup = BeautifulSoup(response.text)
content = soup.find('meta', content=re.compile('URL='))['content']
document_html_link = re.search('URL=(.*)', content).group(1)
document_html_link = urljoin(BASE_ACCESS_URL, document_html_link)
# follow html link and get the pdf link
response = session.get(document_html_link)
soup = BeautifulSoup(response.text)
# get the real document link
content = soup.find('meta', content=re.compile('URL='))['content']
document_link = re.search('URL=(.*)', content).group(1)
document_link = urljoin(BASE_ACCESS_URL, document_link)
print document_link
# follow the frame link with login and password first - would set the important cookie
auth_link = soup.find('frame', {'name': 'footer'})['src']
session.get(auth_link)
# download file
with open('document.pdf', 'wb') as handle:
response = session.get(document_link, stream=True)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)
You should probably extract separate blocks of code into functions to make it more readable and reusable.
FYI, all of this could be more easily done through the real browser with the help of selenium of Ghost.py.
Hope that helps.
i'm having a very tough time searching google image search with python. I need to do it using only standard python libraries (so urllib, urllib2, json, ..)
Can somebody please help? Assume the image is jpeg.jpg and is in same folder I'm running python from.
I've tried a hundred different code versions, using headers, user-agent, base64 encoding, different urls (images.google.com, http://images.google.com/searchbyimage?hl=en&biw=1060&bih=766&gbv=2&site=search&image_url={{URL To your image}}&sa=X&ei=H6RaTtb5JcTeiALlmPi2CQ&ved=0CDsQ9Q8, etc....)
Nothing works, it's always an error, 404, 401 or broken pipe :(
Please show me some python script that will actually seach google images with my own image as the search data ('jpeg.jpg' stored on my computer/device)
Thank you for whomever can solve this,
Dave:)
I use the following code in Python to search for Google images and download the images to my computer:
import os
import sys
import time
from urllib import FancyURLopener
import urllib2
import simplejson
# Define search term
searchTerm = "hello world"
# Replace spaces ' ' in search term for '%20' in order to comply with request
searchTerm = searchTerm.replace(' ','%20')
# Start FancyURLopener with defined version
class MyOpener(FancyURLopener):
version = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'
myopener = MyOpener()
# Set count to 0
count= 0
for i in range(0,10):
# Notice that the start changes for each iteration in order to request a new set of images for each loop
url = ('https://ajax.googleapis.com/ajax/services/search/images?' + 'v=1.0&q='+searchTerm+'&start='+str(i*4)+'&userip=MyIP')
print url
request = urllib2.Request(url, None, {'Referer': 'testing'})
response = urllib2.urlopen(request)
# Get results using JSON
results = simplejson.load(response)
data = results['responseData']
dataInfo = data['results']
# Iterate for each result and get unescaped url
for myUrl in dataInfo:
count = count + 1
print myUrl['unescapedUrl']
myopener.retrieve(myUrl['unescapedUrl'],str(count)+'.jpg')
# Sleep for one second to prevent IP blocking from Google
time.sleep(1)
You can also find very useful information here.
The Google Image Search API is deprecated, we use google search to download the images using REgex and Beautiful soup
from bs4 import BeautifulSoup
import requests
import re
import urllib2
import os
def get_soup(url,header):
return BeautifulSoup(urllib2.urlopen(urllib2.Request(url,headers=header)))
image_type = "Action"
# you can change the query for the image here
query = "Terminator 3 Movie"
query= query.split()
query='+'.join(query)
url="https://www.google.co.in/searches_sm=122&source=lnms&tbm=isch&sa=X&ei=4r_cVID3NYayoQTb4ICQBA&ved=0CAgQ_AUoAQ&biw=1242&bih=619&q="+query
print url
header = {'User-Agent': 'Mozilla/5.0'}
soup = get_soup(url,header)
images = [a['src'] for a in soup.find_all("img", {"src": re.compile("gstatic.com")})]
#print images
for img in images:
raw_img = urllib2.urlopen(img).read()
#add the directory for your image here
DIR="C:\Users\hp\Pictures\\valentines\\"
cntr = len([i for i in os.listdir(DIR) if image_type in i]) + 1
print cntr
f = open(DIR + image_type + "_"+ str(cntr)+".jpg", 'wb')
f.write(raw_img)
f.close()