Need help with a Python scraper - python

I am trying to use urllib with python to make a scraper, I can download the images, but they are a thumbnail, 250x250 or less.(I am trying of 4chan, Because I like some of the picture threads)
How can I get the full image?
here is my code
import urllib2, urllib
from BeautifulSoup import BeautifulSoup
import re
import urlparse
i = 0
ext = "'src' : re.compile(r'(jpe?g)|(png)|$'"
url = raw_input("Enter URL here:")
ender = raw_input("Enter File Type Here(For Images enter 'img'):")
if ender == "img":
ender = 'img', {'src' : re.compile(r'(.jpe?g)|(.png)|(.gif)$')}
else:
if "." in ender:
end = ender
else:
end = ".%s" % ender
raw = urllib.urlopen(url)
soup = BeautifulSoup(raw)
parse = list(urlparse.urlparse(url))
for ender in soup.findAll(ender):
links = "%(src)s"% ender
print links
str(links)
if ".jpg" in links:
end = ".jpg"
if ".jpeg" in links:
end = ".jpeg"
if ".gif" in links:
end = ".gif"
if ".png" in links:
end = ".png"
i += 1
urllib.urlretrieve(links, "%s%s" % (i, end))

Because you can click to see a larger link, the URL in the <a href="url"> that is around the image tag points to the full image.
So just read the value of the href property, and download that instead of the src property of the image.

Related

Python How to Scrape Images, texts, and a link to the audio file url

I am trying to scrape data from the following url (http://www.ancient-hebrew.org/m/dictionary/1000.html).
So each Hebrew word section starts with img urls followed by 2 texts, the actual hebrew word and its pronunciation. For example the first entry in the url is the following "img1 img2 img3 אֶלֶף e-leph " the hebrew word is a unicode after downloading the html using wget
I am trying to collect these information in order so that I get the image files first then the hebrew word and then the pronunciation. At last I would like to find the URL for the audio file.
Also each line for each word seems to be starting with < A tag.
I am new to web scraping so the following is all I was able to do.
from urllib.request import urlopen
from bs4 import BeautifulSoup
url = '1000.html'
try:
page = urlopen(url)
except:
print("Error opening the URL")
soup = BeautifulSoup(page, 'html.parser')
content = soup.find('<!--501-1000-->', {"<A Name= "})
images = ''
for i in content.findAll('*.jpg'):
images = images + ' ' + i.text
with open('scraped_text.txt', 'w') as file:
file.write(images)
As you can see my code doesn't really do the job. In the end I would like to get information for every word in the URL and save it as a text file or a json file whichever is the easiers.
For example,
images: URLsOfImages, HebrewWord: txt, Pronunciation: txt, URLtoAudio: txt
and for the next word and so on.
I wrote a script that should help you out. It has all of the information you requested. This cannot be saved as a json file due to the hebrew letters or it would have been stored as bytes. I know you posted this question a while ago, but I found it today and decided to give it a shot. Anyway, here it is:
import requests
from bs4 import BeautifulSoup
import re
import json
url = 'http://www.ancient-hebrew.org/m/dictionary/1000.html'
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
def images():
#Gathers all the images (this includes unwanted gifs)
imgs = soup.find_all('img')
#Gets the src attribute to form the full url
srcs = [img['src'] for img in imgs]
base_url = 'https://www.ancient-hebrew.org/files/'
imgs = {}
section = 0
#Goes through each source of all the images
for src in srcs:
#Checks if it is a gif, these act as a separator
if src.endswith('.gif'):
#If it is a gif, change sections (acts as separator)
section += 1
else:
#If it is a letter image, use regex to extract the part of src we want and form full url
actual_link = re.search(r'files/(.+\.jpg)', src)
imgs.setdefault(section, []).append(base_url + actual_link.group(1))
return imgs
def hebrew_letters():
#Gets hebrew letters, strips whitespace, reverses letter order since hebrew letters get messed up
h_letters = [h_letter.text.strip() for h_letter in soup.find_all('font', attrs={'face': 'arial'})]
return h_letters
def english_letters():
#Gets english letters by regex, this part was difficult because these letters are not surrounded by tags in the html
letters = ''.join(str(content) for content in soup.find('table', attrs={'width': '90%'}).td.contents)
search_text = re.finditer(r'/font>\s+(.+?)\s+<br/>', letters)
e_letters = [letter.group(1) for letter in search_text]
return e_letters
def get_audio_urls():
#Gets all the mp3 hrefs for the audio part
base_url = 'https://www.ancient-hebrew.org/m/dictionary/'
links = soup.find_all('a', href=re.compile(r'\d+\s*.mp3$'))
audio_urls = [base_url+link['href'].replace('\t','') for link in links]
return audio_urls
def main():
#Gathers scraped data
imgs = images()
h_letters = hebrew_letters()
e_letters = english_letters()
audio_urls = get_audio_urls()
#Encodes data into utf-8 (due to hebrew letters) and saves it to text file
with open('scraped_hebrew.txt', 'w', encoding='utf-8') as text_file:
for img, h_letter, e_letter, audio_url in zip(imgs.values(), h_letters, e_letters, audio_urls):
text_file.write('Image Urls: ' + ' - '.join(im for im in img) + '\n')
text_file.write('Hebrew Letters: ' + h_letter + '\n')
text_file.write('English Letters: ' + e_letter + '\n')
text_file.write('Audio Urls: ' + audio_url + '\n\n')
if __name__ == '__main__':
main()

scrape image and title from https://www.open2study.com/courses using python and beatifulsoup

from bs4 import BeautifulSoup
import urllib
r = urllib.urlopen('https://www.open2study.com/courses').read()
soup = BeautifulSoup(r)
links = soup.find('figure').find_all('img', src=True)
for link in links:
txt = open('test.txt' , "w")
link = link["src"].split("src=")[-1]
download_img = urllib.urlopen('https://www.open2study.com/courses')
txt.write(download_img.read())
txt.close()
I need to scrape image and title from this website.
Instead of doing a split you can grab the src directly with beautifulsoup
Use this to get the div that has the title and image in it
for link in soup.find_all("div",attrs={"class" : "courses_adblock_start"}):
Then use this to grab the title and image in each div:
link.find("h2",attrs={"class":"adblock_course_title"}).get_text())
link.find("img", attrs={"class":"image-style-course-logo-subjects-block"}).get("src"))
You also open the page every time in the loop which you want to avoid, you only need to open it once and then use it for the loop like so:
url = "http://www.open2study.com/courses"
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page.read())
for link in soup.find_all("div",attrs={"class" : "courses_adblock_start"}):
try:
print("Title : " + link.find("h2",attrs={"class":"adblock_course_title"}).get_text())
print("Image : " + link.find("img", attrs={"class":"image-style-course-logo-subjects-block"}).get("src"))
except:
print("error")
Here is the new output:
Title : World Music
Image : https://www.open2study.com/sites/default/files/styles/course_logo_subjects_block/public/Course%20Tile_world_music.jpg?itok=CG6pvXHp
Title : Writing for the Web
Image : https://www.open2study.com/sites/default/files/styles/course_logo_subjects_block/public/3_writing_for_web_C_0.jpg?itok=exQApr-1
Something like this?
import urllib
from bs4 import BeautifulSoup
titles = []
images = []
r = urllib.urlopen('https://www.open2study.com/courses').read()
soup = BeautifulSoup(r)
for i in soup.find_all('div', {'class': "courses_adblock_rollover"}):
titles.append(i.h2.text)
for i in soup.find_all(
'img', {
'class': "image-style-course-logo-subjects-block"}):
images.append(i.get('src'))
with open('test.txt', "w") as f:
for i in zip(titles, images):
f.write(i[0].encode('ascii', 'ignore') +
'\n'+i[1].encode('ascii', 'ignore') +
'\n\n')

How to extract and download all images from a website using beautifulSoup?

I am trying to extract and download all images from a url.
I wrote a script
import urllib2
import re
from os.path import basename
from urlparse import urlsplit
url = "http://filmygyan.in/katrina-kaifs-top-10-cutest-pics-gallery/"
urlContent = urllib2.urlopen(url).read()
# HTML image tag: <img src="url" alt="some_text"/>
imgUrls = re.findall('img .*?src="(.*?)"', urlContent)
# download all images
for imgUrl in imgUrls:
try:
imgData = urllib2.urlopen(imgUrl).read()
fileName = basename(urlsplit(imgUrl)[2])
output = open(fileName,'wb')
output.write(imgData)
output.close()
except:
pass
I don't want to extract image of this page see this image http://i.share.pho.to/1c9884b1_l.jpeg
I just want to get all the images without clicking on "Next" button
I am not getting how can I get the all pics within "Next" class.?What changes I should do in findall?
The following should extract all images from a given page and write it to the directory where the script is being run.
import re
import requests
from bs4 import BeautifulSoup
site = 'http://pixabay.com'
response = requests.get(site)
soup = BeautifulSoup(response.text, 'html.parser')
img_tags = soup.find_all('img')
urls = [img['src'] for img in img_tags]
for url in urls:
filename = re.search(r'/([\w_-]+[.](jpg|gif|png))$', url)
if not filename:
print("Regex didn't match with the url: {}".format(url))
continue
with open(filename.group(1), 'wb') as f:
if 'http' not in url:
# sometimes an image source can be relative
# if it is provide the base url which also happens
# to be the site variable atm.
url = '{}{}'.format(site, url)
response = requests.get(url)
f.write(response.content)
Slight modification to Jonathan's answer (because I can't comment): adding 'www' to the website will fix most "File Type Not Supported" errors.
import re
import requests
from bs4 import BeautifulSoup
site = 'http://www.google.com'
response = requests.get(site)
soup = BeautifulSoup(response.text, 'html.parser')
img_tags = soup.find_all('img')
urls = [img['src'] for img in img_tags]
for url in urls:
filename = re.search(r'/([\w_-]+[.](jpg|gif|png))$', url)
if not filename:
print("Regex didn't match with the url: {}".format(url))
continue
with open(filename.group(1), 'wb') as f:
if 'http' not in url:
# sometimes an image source can be relative
# if it is provide the base url which also happens
# to be the site variable atm.
url = '{}{}'.format(site, url)
response = requests.get(url)
f.write(response.content)
from bs4 import *
import requests
import os
def folder_create(images):
try:
folder_name = input("Enter Folder Name:- ")
# folder creation
os.mkdir(folder_name)
except:
print("Folder Exist with that name!")
folder_create()
download_images(images, folder_name)
def download_images(images, folder_name):
count = 0
print(f"Total {len(images)} Image Found!")
if len(images) != 0:
for i, image in enumerate(images):
try:
image_link = image["data-srcset"]
except:
try:
image_link = image["data-src"]
except:
try:
image_link = image["data-fallback-src"]
except:
try:
image_link = image["src"]
except:
pass
try:
r = requests.get(image_link).content
try:
# possibility of decode
r = str(r, 'utf-8')
except UnicodeDecodeError:
with open(f"{folder_name}/images{i+1}.jpg", "wb+") as f:
f.write(r)
count += 1
except:
pass
if count == len(images):
print("All Images Downloaded!")
else:
print(f"Total {count} Images Downloaded Out of {len(images)}")
def main(url):
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
images = soup.findAll('img')
folder_create(images)
url = input("Enter URL:- ")
main(url)`
If you want only pictures then you can just download them without even scrapping the webpage. The all have the same URL:
http://filmygyan.in/wp-content/gallery/katrina-kaifs-top-10-cutest-pics-gallery/cute1.jpg
http://filmygyan.in/wp-content/gallery/katrina-kaifs-top-10-cutest-pics-gallery/cute2.jpg
...
http://filmygyan.in/wp-content/gallery/katrina-kaifs-top-10-cutest-pics-gallery/cute10.jpg
So simple code as that will give you all images:
import os
import urllib
import urllib2
baseUrl = "http://filmygyan.in/wp-content/gallery/katrina-kaifs-top-10-"\
"cutest-pics-gallery/cute%s.jpg"
for i in range(1,11):
url = baseUrl % i
urllib.urlretrieve(url, os.path.basename(url))
With Beautifulsoup you will have to click or go to the next page to scrap the images. If you want ot scrap each page individually try to scrathem using there class which is shutterset_katrina-kaifs-top-10-cutest-pics-gallery

Downloading a image using Python Mechanize

I'm trying to write a Python script to download a image and set it as my wallpaper. Unfortunately, the Mechanize documentation is quite poor. My script is following the link correctly, but I'm having a hard time to actually save the image on my computer. From what I researched, the .retrieve() method should do the work, but How do I specify the path to where the file should be downloaded to? Here is what I have...
def followLink(browser, fixedLink):
browser.open(fixedLink)
if browser.find_link(url_regex = r'1600x1200'):
browser.follow_link(url_regex = r'1600x1200')
elif browser.find_link(url_regex = r'1400x1050'):
browser.follow_link(url_regex = r'1400x1050')
elif browser.find_link(url_regex = r'1280x960'):
browser.follow_link(url_regex = r'1280x960')
return
import mechanize, os
from BeautifulSoup import BeautifulSoup
browser = mechanize.Browser()
html = browser.open(url)
soup = BeautifulSoup(html)
image_tags = soup.findAll('img')
for image in image_tags:
filename = image['src'].lstrip('http://')
filename = os.path.join(dir, filename.replace('/', '_'))
data = browser.open(image['src']).read()
browser.back()
save = open(filename, 'wb')
save.write(data)
save.close()
This can help you download all the images from a web page. As for parsing html you'd better use BeautifulSoup or lxml. And download is just read the data and then write it to a local file. You should assign your own value to dir. It is where you images exist.
Not sure why this solution hasn't come up, but you can use the mechanize.Browser.retrieve function as well. Perhaps this only works in newer versions of mechanize and has thus not been mentioned?
Anyway, if you wanted to shorten the answer by zhangyangyu, you could do this:
import mechanize, os
from BeautifulSoup import BeautifulSoup
browser = mechanize.Browser()
html = browser.open(url)
soup = BeautifulSoup(html)
image_tags = soup.findAll('img')
for image in image_tags:
filename = image['src'].lstrip('http://')
filename = os.path.join(dir, filename.replace('/', '_'))
browser.retrieve(image['src'], filename)
browser.back()
Also keep in mind that you'll likely want to put all of this into a try except block like this one:
import mechanize, os
from BeautifulSoup import BeautifulSoup
browser = mechanize.Browser()
html = browser.open(url)
soup = BeautifulSoup(html)
image_tags = soup.findAll('img')
for image in image_tags:
filename = image['src'].lstrip('http://')
filename = os.path.join(dir, filename.replace('/', '_'))
try:
browser.retrieve(image['src'], filename)
browser.back()
except (mechanize.HTTPError,mechanize.URLError) as e:
pass
# Use e.code and e.read() with HTTPError
# Use e.reason.args with URLError
Of course you'll want to adjust this to your needs. Perhaps you want it to bomb out if it encounters an issue. It totally depends on what you want to achieve.
You can get/download the image by opening the url of the img src.
image_response = browser.open_novisit(img['src'])
to save the file now, just use fopen:
with open('image_out.png', 'wb') as f:
f.write(image_response.read())
It's really crappy but It "works" for me, with 0xc0000022l anwer's
import mechanize, os
from BeautifulSoup import BeautifulSoup
import urllib2
def DownloadIMGs(url): # IMPORTANT URL WITH HTTP OR HTTPS
print "From", url
dir = 'F:\Downloadss' #Dir for Downloads
basicImgFileTypes = ['png','bmp','cur','ico','gif','jpg','jpeg','psd','raw','tif']
browser = mechanize.Browser()
html = browser.open(url)
soup = BeautifulSoup(html)
image_tags = soup.findAll('img')
print "N Images:", len(image_tags)
print
#---------SAVE PATH
#check if available
if not os.path.exists(dir):
os.makedirs(dir)
#---------SAVE PATH
for image in image_tags:
#---------SAVE PATH + FILENAME (Where It is downloading)
filename = image['src']
fileExt = filename.split('.')[-1]
fileExt = fileExt[0:3]
if (fileExt in basicImgFileTypes):
print 'File Extension:', fileExt
filename = filename.replace('?', '_')
filename = os.path.join(dir, filename.split('/')[-1])
num = filename.find(fileExt) + len(fileExt)
filename = filename[:num]
else:
filename = filename.replace('?', '_')
filename = os.path.join(dir, filename.split('/')[-1]) + '.' + basicImgFileTypes[0]
print 'File Saving:', filename
#---------SAVE PATH + FILENAME (Where It is downloading)
#--------- FULL URL PATH OF THE IMG
imageUrl = image['src']
print 'IMAGE SRC:', imageUrl
if (imageUrl.find('http://') > -1 or imageUrl.find('https://') > -1):
pass
else:
if (url.find('http://') > -1):
imageUrl = url[:len('http://')]
imageUrl = 'http://' + imageUrl.split('/')[0] + image['src']
elif(url.find('https://') > -1):
imageUrl = url[:len('https://')]
imageUrl = 'https://' + imageUrl.split('/')[0] + image['src']
else:
imageUrl = image['src']
print 'IMAGE URL:', imageUrl
#--------- FULL URL PATH OF THE IMG
#--------- TRY DOWNLOAD
try:
browser.retrieve(imageUrl, filename)
print "Downloaded:", image['src'].split('/')[-1]
print
except (mechanize.HTTPError,mechanize.URLError) as e:
print "Can't Download:", image['src'].split('/')[-1]
print
pass
#--------- TRY DOWNLOAD
browser.close()
DownloadIMGs('https://stackoverflow.com/questions/15593925/downloading-a-image-using-python-mechanize')

Extract image links from the webpage using Python

So I wanted to get all of the pictures on this page(of the nba teams).
http://www.cbssports.com/nba/draft/mock-draft
However, my code gives a bit more than that. It gives me,
<img src="http://sports.cbsimg.net/images/nba/logos/30x30/ORL.png" alt="Orlando Magic" width="30" height="30" border="0" />
How can I shorten it to only give me, http://sports.cbsimg.net/images/nba/logos/30x30/ORL.png.
My code:
import urllib2
from BeautifulSoup import BeautifulSoup
# or if your're using BeautifulSoup4:
# from bs4 import BeautifulSoup
soup = BeautifulSoup(urllib2.urlopen('http://www.cbssports.com/nba/draft/mock-draft').read())
rows = soup.findAll("table", attrs = {'class': 'data borderTop'})[0].tbody.findAll("tr")[2:]
for row in rows:
fields = row.findAll("td")
if len(fields) >= 3:
anchor = row.findAll("td")[1].find("a")
if anchor:
print anchor
I know this can be "traumatic", but for those automatically generated pages, where you just want to grab the damn images away and never come back, a quick-n-dirty regular expression that takes the desired pattern tends to be my choice (no Beautiful Soup dependency is a great advantage):
import urllib, re
source = urllib.urlopen('http://www.cbssports.com/nba/draft/mock-draft').read()
## every image name is an abbreviation composed by capital letters, so...
for link in re.findall('http://sports.cbsimg.net/images/nba/logos/30x30/[A-Z]*.png', source):
print link
## the code above just prints the link;
## if you want to actually download, set the flag below to True
actually_download = False
if actually_download:
filename = link.split('/')[-1]
urllib.urlretrieve(link, filename)
Hope this helps!
To save all the images on http://www.cbssports.com/nba/draft/mock-draft,
import urllib2
import os
from BeautifulSoup import BeautifulSoup
URL = "http://www.cbssports.com/nba/draft/mock-draft"
default_dir = os.path.join(os.path.expanduser("~"),"Pictures")
opener = urllib2.build_opener()
urllib2.install_opener(opener)
soup = BeautifulSoup(urllib2.urlopen(URL).read())
imgs = soup.findAll("img",{"alt":True, "src":True})
for img in imgs:
img_url = img["src"]
filename = os.path.join(default_dir, img_url.split("/")[-1])
img_data = opener.open(img_url)
f = open(filename,"wb")
f.write(img_data.read())
f.close()
To save any particular image on http://www.cbssports.com/nba/draft/mock-draft,
use
soup.find("img",{"src":"image_name_from_source"})
You can use this functions for getting the list of all images url from url.
#
#
# get_url_images_in_text()
#
# #param html - the html to extract urls of images from him.
# #param protocol - the protocol of the website, for append to urls that not start with protocol.
#
# #return list of imags url.
#
#
def get_url_images_in_text(html, protocol):
urls = []
all_urls = re.findall(r'((http\:|https\:)?\/\/[^"\' ]*?\.(png|jpg))', html, flags=re.IGNORECASE | re.MULTILINE | re.UNICODE)
for url in all_urls:
if not url[0].startswith("http"):
urls.append(protocol + url[0])
else:
urls.append(url[0])
return urls
#
#
# get_images_from_url()
#
# #param url - the url for extract images url from him.
#
# #return list of images url.
#
#
def get_images_from_url(url):
protocol = url.split('/')[0]
resp = requests.get(url)
return get_url_images_in_text(resp.text, protocol)

Categories