python save image from url - python

I got a problem when I am using python to save an image from url either by urllib2 request or urllib.urlretrieve. That is the url of the image is valid. I could download it manually using the explorer. However, when I use python to download the image, the file cannot be opened. I use Mac OS preview to view the image. Thank you!
UPDATE:
The code is as follow
def downloadImage(self):
request = urllib2.Request(self.url)
pic = urllib2.urlopen(request)
print "downloading: " + self.url
print self.fileName
filePath = localSaveRoot + self.catalog + self.fileName + Picture.postfix
# urllib.urlretrieve(self.url, filePath)
with open(filePath, 'wb') as localFile:
localFile.write(pic.read())
The image URL that I want to download is
http://site.meishij.net/r/58/25/3568808/a3568808_142682562777944.jpg
This URL is valid and I can save it through the browser but the python code would download a file that cannot be opened. The Preview says "It may be damaged or use a file format that Preview doesn't recognize."
I compare the image that I download by Python and the one that I download manually through the browser. The size of the former one is several byte smaller. So it seems that the file is uncompleted, but I don't know why python cannot completely download it.

import requests
img_data = requests.get(image_url).content
with open('image_name.jpg', 'wb') as handler:
handler.write(img_data)

A sample code that works for me on Windows:
import requests
with open('pic1.jpg', 'wb') as handle:
response = requests.get(pic_url, stream=True)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block)

It is the simplest way to download and save the image from internet using urlib.request package.
Here, you can simply pass the image URL(from where you want to download and save the image) and directory(where you want to save the download image locally, and give the image name with .jpg or .png) Here I given "local-filename.jpg" replace with this.
Python 3
import urllib.request
imgURL = "http://site.meishij.net/r/58/25/3568808/a3568808_142682562777944.jpg"
urllib.request.urlretrieve(imgURL, "D:/abc/image/local-filename.jpg")
You can download multiple images as well if you have all the image URLs from the internet. Just pass those image URLs in for loop, and the code automatically download the images from the internet.

Python code snippet to download a file from an url and save with its name
import requests
url = 'http://google.com/favicon.ico'
filename = url.split('/')[-1]
r = requests.get(url, allow_redirects=True)
open(filename, 'wb').write(r.content)

import random
import urllib.request
def download_image(url):
name = random.randrange(1,100)
fullname = str(name)+".jpg"
urllib.request.urlretrieve(url,fullname)
download_image("http://site.meishij.net/r/58/25/3568808/a3568808_142682562777944.jpg")

For linux in case; you can use wget command
import os
url1 = 'YOUR_URL_WHATEVER'
os.system('wget {}'.format(url1))

You can pick any arbitrary image from Google Images, copy the url, and use the following approach to download the image.
Note that the extension isn't always included in the url, as some of the other answers seem to assume. You can automatically detect the correct extension using imghdr, which is included with Python 3.9.
import requests, imghdr
gif_url = 'https://media.tenor.com/images/eff22afc2220e9df92a7aa2f53948f9f/tenor.gif'
img_url = 'https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQwXRq7zbWry0MyqWq1Rbq12g_oL-uOoxo4Yw&usqp=CAU'
for url, save_basename in [
(gif_url, 'gif_download_test'),
(img_url, 'img_download_test')
]:
response = requests.get(url)
if response.status_code != 200:
raise URLError
extension = imghdr.what(file=None, h=response.content)
save_path = f"{save_basename}.{extension}"
with open(save_path, 'wb') as f:
f.write(response.content)

Anyone who is wondering how to get the image extension then you can try split method of string on image url:
str_arr = str(img_url).split('.')
img_ext = '.' + str_arr[3] #www.bigbasket.com/patanjali-atta.jpg (jpg is after 3rd dot so)
img_data = requests.get(img_url).content
with open(img_name + img_ext, 'wb') as handler:
handler.write(img_data)

download and save image to directory
import requests
headers = {"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.9"
}
img_data = requests.get(url=image_url, headers=headers).content
with open(create_dir() + "/" + 'image_name' + '.png', 'wb') as handler:
handler.write(img_data)
for creating directory
def create_dir():
# Directory
dir_ = "CountryFlags"
# Parent Directory path
parent_dir = os.path.dirname(os.path.realpath(__file__))
# Path
path = os.path.join(parent_dir, dir_)
os.mkdir(path)
return path

if you want to stick to 2 lines? :
with open(os.path.join(dir_path, url[0]), 'wb') as f:
f.write(requests.get(new_url).content)

Related

EASY PYTHON SELENIUM: How do I download an mp4 WITHOUT using urllib?

I'm trying to download this video: https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4
I tried the following but it doesn't work.
link = "https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4"
urllib.request.urlretrieve(link, 'video.mp4')
I'm getting:
urllib.error.HTTPError: HTTP Error 403: Forbidden
Is there another way to download an mp4 file without using urllib?
I have no problem to download with module requests
import requests
url = 'https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4'
response = requests.get(url)
with open('video.mp4', 'wb') as f: # use `"b"` to open in `bytes mode`
f.write(response.content) # use `.content` to get `bytes`
It was small file ~10MB but for bigger file you may download in chunks.
import requests
url = 'https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4'
response = requests.get(url, stream=True)
with open('video.mp4', 'wb') as f:
for chunk in response.iter_content(10000): # 10_000 bytes
if chunk:
#print('.', end='') # every dot will mean 10_000 bytes
f.write(chunk)
Documentation shows Streaming Requests but for text data.
url is a string so you can use string-functions to get element after last /
filename = url.split('/')[-1]
Or you can try to use os.path
At least it works on Linux - maybe because Linux also use / in local paths.
import os
head, tail = os.path.split(url)
# head: 'https://www.learningcontainer.com/wp-content/uploads/2020/05'
# tail: 'sample-mp4-file.mp4'

File corrupted when I try to download via requests.get()

I'm trying to automate the download of docs via Selenium.
I'm using requests.get() to download the file after extracting the url from the website:
import requests
url= 'https://www.schroders.com/hkrprewrite/retail/en/attach.aspx?fileid=e47b0366c44e4f33b04c20b8b6878aa7.pdf'
myfile = requests.get(url)
open('/Users/hemanthj/Downloads/AB Test/' + "A-Acc-USD" + '.pdf', 'wb').write(myfile.content)
time.sleep(3)
The file is downloaded but is corrupted when I try to open. The file size is only a few KB at most.
I tried adding the header info from this thread too but no luck:
Corrupted PDF file after requests.get() with Python
What within the headers makes the download work? Any solutions?
The problem was in an incorrect URL.
It loaded HTML instead of PDF.
Looking throw the site I found the URL that you were looking for.
Try this code and then open the document with pdf reader program.
import requests
import pathlib
def load_pdf_from(url:str, filename:pathlib.Path) -> None:
response:requests.Response = requests.get(url, stream=True)
if response.status_code == 200:
with open(filename, 'wb') as pdf_file:
for chunk in response.iter_content(chunk_size=1024):
pdf_file.write(chunk)
else:
print(f"Failed to load pdf: {url}")
url:str = 'https://www.schroders.com/hkrprewrite/retail/en/attachment2.aspx?fileid=e47b0366c44e4f33b04c20b8b6878aa7.pdf'
target_filename:pathlib.Path = pathlib.Path.cwd().joinpath('loaded_pdf.pdf')
load_pdf_from(url, target_filename)

Python Selenium download images (jpeg, png) or PDF using ChromeDriver

I have a Selenium script in Python (using ChromeDriver on Windows) that fetches the download links of various attachments(of different file types) from a page and then opens these links to download the attachments. This works fine for the file types which ChromeDriver can't preview as they get downloaded by default. But images(JPEG, PNG) and PDFs are previewed by default and hence aren't automatically downloaded.
The ChromeDriver options I am currently using (work for non preview-able files) :
chrome_options = webdriver.ChromeOptions()
prefs = {'download.default_directory' : 'custom_download_dir'}
chrome_options.add_experimental_option('prefs', prefs)
driver = webdriver.Chrome("./chromedriver.exe", chrome_options=chrome_options)
This downloads the files to 'custom_download_dir', no issues. But the preview-able files are just previewed in the ChromeDriver instance and not downloaded.
Are there any ChromeDriver Settings that can disable this preview behavior and directly download all files irrespective of the extensions?
If not, can this be done using Firefox for instance?
Instead of relying in specific browser / driver options I would implement a more generic solution using the image url to perform the download.
You can get the image URL using similar code:
driver.find_element_by_id("your-image-id").get_attribute("src")
And then I would download the image using, for example, urllib.
Here's some pseudo-code for Python2:
import urllib
url = driver.find_element_by_id("your-image-id").get_attribute("src")
urllib.urlretrieve(url, "local-filename.jpg")
Here's the same for Python3:
import urllib.request
url = driver.find_element_by_id("your-image-id").get_attribute("src")
urllib.request.urlretrieve(url, "local-filename.jpg")
Edit after the comment, just another example about how to download a file once you know its URL:
import requests
from PIL import Image
from io import StringIO
image_name = 'image.jpg'
url = 'http://example.com/image.jpg'
r = requests.get(url)
i = Image.open(StringIO(r.content))
i.save(image_name)
With selenium-wire library, it is possible to download images via ChromeDriver.
I have defined the following function to parse each request and save the request body to a file when necessary.
import os
from mimetypes import guess_extension
from seleniumwire import webdriver
def download_assets(requests, asset_dir="temp", default_fname="untitled", exts=[".png", ".jpeg", ".jpg", ".svg", ".gif", ".pdf", ".ico"]):
asset_list = {}
for req_idx, request in enumerate(requests):
# request.headers
# request.response.body is the raw response body in bytes
ext = guess_extension(request.response.headers['Content-Type'].split(';')[0].strip())
if ext is None or ext not in exts:
#Don't know the file extention, or not in the whitelist
continue
# Construct a filename
fname = os.path.basename(request.url.split('?')[0])
fname = "".join(x for x in fname if (x.isalnum() or x in "._- "))
if fname == "":
fname = f"{default_fname}_{req_idx}"
if not fname.endswith(ext):
fname = f"{fname}{ext}"
fpath = os.path.join(asset_dir, fname)
# Save the file
print(f"{request.url} -> {fpath}")
asset_list[fpath] = request.url
with open(fpath, "wb") as file:
file.write(request.response.body)
return asset_list
Let's download some images from Google homepage to temp folder.
# Create a new instance of the Chrome/Firefox driver
driver = webdriver.Chrome()
# Go to the Google home page
driver.get('https://www.google.com')
# Download content to temp folder
asset_dir = "temp"
os.makedirs(asset_dir, exist_ok=True)
download_assets(driver.requests, asset_dir=asset_dir)
driver.close()
Note that the function can be improved such that the directory structure can be kept as well.
Here is another simple way, but #Pitto's answer above is slightly more succinct.
import requests
webelement_img = ff.find_element(By.XPATH, '//img')
url = webelement_img.get_attribute('src') or 'https://someimages.com/path-to-image.jpg'
data = requests.get(url).content
local_filename = 'filename_on_your_computer.jpg'
with open (local_filename, 'wb') as f:
f.write(data)

Certain image links resulting in corrupt downloads when using requests with shutil/f.write

I've noticed that when I try to download different images,
certain images are ending up corrupt despite being valid image links.
I've used shutil and f.write(context manager) to write images to a file, and I've notice that depending on which one of those I use, different images tend to get corrupt.
Here is one such image link that gets corrupted when using shutil:
'http://www.floridagofishing.com/_images/fish/bonito-atlantic.JPG'
Here is a sample of the code I was using (made some changes to fit this single example, so hopefully there aren't any unintentional errors)
import shutil
import requests
from fake_useragent import UserAgent
ua = UserAgent()
headers = {'User-Agent': ua.firefox}
try:
r = requests.get(link, stream=True, headers = headers)
except (requests.exceptions.RequestException) as e:
print(e)
continue
with open('testing' + '.jpg', 'wb') as f:
shutil.copyfileobj(r.raw, f)
print('image downloaded')
If I use f.write in this case, it works but f.write
produces different corrupted images such as this link:
'https://journal.amberjack.com/wp-content/uploads/2016/09/pmack-e1473782709607.png' #valid image file
try:
r = requests.get(link, headers = headers)
except (requests.exceptions.RequestException) as e:
print(e)
continue
with open('testing' + '.' + 'jpg', 'wb') as f:
f.write(r.content)
print('image downloaded: Total = ', image_count)
This is resulting in a corrupt image.
What exactly is going on and how can I avoid the corruption of images?
Thank you.

How to download image using requests

I'm trying to download and save an image from the web using python's requests module.
Here is the (working) code I used:
img = urllib2.urlopen(settings.STATICMAP_URL.format(**data))
with open(path, 'w') as f:
f.write(img.read())
Here is the new (non-working) code using requests:
r = requests.get(settings.STATICMAP_URL.format(**data))
if r.status_code == 200:
img = r.raw.read()
with open(path, 'w') as f:
f.write(img)
Can you help me on what attribute from the response to use from requests?
You can either use the response.raw file object, or iterate over the response.
To use the response.raw file-like object will not, by default, decode compressed responses (with GZIP or deflate). You can force it to decompress for you anyway by setting the decode_content attribute to True (requests sets it to False to control decoding itself). You can then use shutil.copyfileobj() to have Python stream the data to a file object:
import requests
import shutil
r = requests.get(settings.STATICMAP_URL.format(**data), stream=True)
if r.status_code == 200:
with open(path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
To iterate over the response use a loop; iterating like this ensures that data is decompressed by this stage:
r = requests.get(settings.STATICMAP_URL.format(**data), stream=True)
if r.status_code == 200:
with open(path, 'wb') as f:
for chunk in r:
f.write(chunk)
This'll read the data in 128 byte chunks; if you feel another chunk size works better, use the Response.iter_content() method with a custom chunk size:
r = requests.get(settings.STATICMAP_URL.format(**data), stream=True)
if r.status_code == 200:
with open(path, 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
Note that you need to open the destination file in binary mode to ensure python doesn't try and translate newlines for you. We also set stream=True so that requests doesn't download the whole image into memory first.
Get a file-like object from the request and copy it to a file. This will also avoid reading the whole thing into memory at once.
import shutil
import requests
url = 'http://example.com/img.png'
response = requests.get(url, stream=True)
with open('img.png', 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
How about this, a quick solution.
import requests
url = "http://craphound.com/images/1006884_2adf8fc7.jpg"
response = requests.get(url)
if response.status_code == 200:
with open("/Users/apple/Desktop/sample.jpg", 'wb') as f:
f.write(response.content)
I have the same need for downloading images using requests. I first tried the answer of Martijn Pieters, and it works well. But when I did a profile on this simple function, I found that it uses so many function calls compared to urllib and urllib2.
I then tried the way recommended by the author of requests module:
import requests
from PIL import Image
# python2.x, use this instead
# from StringIO import StringIO
# for python3.x,
from io import StringIO
r = requests.get('https://example.com/image.jpg')
i = Image.open(StringIO(r.content))
This much more reduced the number of function calls, thus speeded up my application.
Here is the code of my profiler and the result.
#!/usr/bin/python
import requests
from StringIO import StringIO
from PIL import Image
import profile
def testRequest():
image_name = 'test1.jpg'
url = 'http://example.com/image.jpg'
r = requests.get(url, stream=True)
with open(image_name, 'wb') as f:
for chunk in r.iter_content():
f.write(chunk)
def testRequest2():
image_name = 'test2.jpg'
url = 'http://example.com/image.jpg'
r = requests.get(url)
i = Image.open(StringIO(r.content))
i.save(image_name)
if __name__ == '__main__':
profile.run('testUrllib()')
profile.run('testUrllib2()')
profile.run('testRequest()')
The result for testRequest:
343080 function calls (343068 primitive calls) in 2.580 seconds
And the result for testRequest2:
3129 function calls (3105 primitive calls) in 0.024 seconds
This might be easier than using requests. This is the only time I'll ever suggest not using requests to do HTTP stuff.
Two liner using urllib:
>>> import urllib
>>> urllib.request.urlretrieve("http://www.example.com/songs/mp3.mp3", "mp3.mp3")
There is also a nice Python module named wget that is pretty easy to use. Found here.
This demonstrates the simplicity of the design:
>>> import wget
>>> url = 'http://www.futurecrew.com/skaven/song_files/mp3/razorback.mp3'
>>> filename = wget.download(url)
100% [................................................] 3841532 / 3841532>
>> filename
'razorback.mp3'
Enjoy.
Edit: You can also add an out parameter to specify a path.
>>> out_filepath = <output_filepath>
>>> filename = wget.download(url, out=out_filepath)
Following code snippet downloads a file.
The file is saved with its filename as in specified url.
import requests
url = "http://example.com/image.jpg"
filename = url.split("/")[-1]
r = requests.get(url, timeout=0.5)
if r.status_code == 200:
with open(filename, 'wb') as f:
f.write(r.content)
There are 2 main ways:
Using .content (simplest/official) (see Zhenyi Zhang's answer):
import io # Note: io.BytesIO is StringIO.StringIO on Python2.
import requests
r = requests.get('http://lorempixel.com/400/200')
r.raise_for_status()
with io.BytesIO(r.content) as f:
with Image.open(f) as img:
img.show()
Using .raw (see Martijn Pieters's answer):
import requests
r = requests.get('http://lorempixel.com/400/200', stream=True)
r.raise_for_status()
r.raw.decode_content = True # Required to decompress gzip/deflate compressed responses.
with PIL.Image.open(r.raw) as img:
img.show()
r.close() # Safety when stream=True ensure the connection is released.
Timing both shows no noticeable difference.
As easy as to import Image and requests
from PIL import Image
import requests
img = Image.open(requests.get(url, stream = True).raw)
img.save('img1.jpg')
This is how I did it
import requests
from PIL import Image
from io import BytesIO
url = 'your_url'
files = {'file': ("C:/Users/shadow/Downloads/black.jpeg", open('C:/Users/shadow/Downloads/black.jpeg', 'rb'),'image/jpg')}
response = requests.post(url, files=files)
img = Image.open(BytesIO(response.content))
img.show()
Here is a more user-friendly answer that still uses streaming.
Just define these functions and call getImage(). It will use the same file name as the url and write to the current directory by default, but both can be changed.
import requests
from StringIO import StringIO
from PIL import Image
def createFilename(url, name, folder):
dotSplit = url.split('.')
if name == None:
# use the same as the url
slashSplit = dotSplit[-2].split('/')
name = slashSplit[-1]
ext = dotSplit[-1]
file = '{}{}.{}'.format(folder, name, ext)
return file
def getImage(url, name=None, folder='./'):
file = createFilename(url, name, folder)
with open(file, 'wb') as f:
r = requests.get(url, stream=True)
for block in r.iter_content(1024):
if not block:
break
f.write(block)
def getImageFast(url, name=None, folder='./'):
file = createFilename(url, name, folder)
r = requests.get(url)
i = Image.open(StringIO(r.content))
i.save(file)
if __name__ == '__main__':
# Uses Less Memory
getImage('http://www.example.com/image.jpg')
# Faster
getImageFast('http://www.example.com/image.jpg')
The request guts of getImage() are based on the answer here and the guts of getImageFast() are based on the answer above.
I'm going to post an answer as I don't have enough rep to make a comment, but with wget as posted by Blairg23, you can also provide an out parameter for the path.
wget.download(url, out=path)
This is the first response that comes up for google searches on how to download a binary file with requests. In case you need to download an arbitrary file with requests, you can use:
import requests
url = 'https://s3.amazonaws.com/lab-data-collections/GoogleNews-vectors-negative300.bin.gz'
open('GoogleNews-vectors-negative300.bin.gz', 'wb').write(requests.get(url, allow_redirects=True).content)
my approach was to use response.content (blob) and save to the file in binary mode
img_blob = requests.get(url, timeout=5).content
with open(destination + '/' + title, 'wb') as img_file:
img_file.write(img_blob)
Check out my python project that downloads images from unsplash.com based on keywords.
You can do something like this:
import requests
import random
url = "https://images.pexels.com/photos/1308881/pexels-photo-1308881.jpeg? auto=compress&cs=tinysrgb&dpr=1&w=500"
name=random.randrange(1,1000)
filename=str(name)+".jpg"
response = requests.get(url)
if response.status_code.ok:
with open(filename,'w') as f:
f.write(response.content)
Agree with Blairg23 that using urllib.request.urlretrieve is one of the easiest solutions.
One note I want to point out here. Sometimes it won't download anything because the request was sent via script (bot), and if you want to parse images from Google images or other search engines, you need to pass user-agent to request headers first, and then download the image, otherwise, the request will be blocked and it will throw an error.
Pass user-agent and download image:
opener=urllib.request.build_opener()
opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582')]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(URL, 'image_name.jpg')
Code in the online IDE that scrapes and downloads images from Google images using requests, bs4, urllib.requests.
Alternatively, if your goal is to scrape images from search engines like Google, Bing, Yahoo!, DuckDuckGo (and other search engines), then you can use SerpApi. It's a paid API with a free plan.
The biggest difference is that there's no need to figure out how to bypass blocks from search engines or how to extract certain parts from the HTML or JavaScript since it's already done for the end-user.
Example code to integrate:
import os, urllib.request
from serpapi import GoogleSearch
params = {
"api_key": os.getenv("API_KEY"),
"engine": "google",
"q": "pexels cat",
"tbm": "isch"
}
search = GoogleSearch(params)
results = search.get_dict()
print(json.dumps(results['images_results'], indent=2, ensure_ascii=False))
# download images
for index, image in enumerate(results['images_results']):
# print(f'Downloading {index} image...')
opener=urllib.request.build_opener()
opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582')]
urllib.request.install_opener(opener)
# saves original res image to the SerpApi_Images folder and add index to the end of file name
urllib.request.urlretrieve(image['original'], f'SerpApi_Images/original_size_img_{index}.jpg')
-----------
'''
]
# other images
{
"position": 100, # 100 image
"thumbnail": "https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQK62dIkDjNCvEgmGU6GGFZcpVWwX-p3FsYSg&usqp=CAU",
"source": "homewardboundnj.org",
"title": "pexels-helena-lopes-1931367 - Homeward Bound Pet Adoption Center",
"link": "https://homewardboundnj.org/upcoming-event/black-cat-appreciation-day/pexels-helena-lopes-1931367/",
"original": "https://homewardboundnj.org/wp-content/uploads/2020/07/pexels-helena-lopes-1931367.jpg",
"is_product": false
}
]
'''
Disclaimer, I work for SerpApi.
Here is a very simple code
import requests
response = requests.get("https://i.imgur.com/ExdKOOz.png") ## Making a variable to get image.
file = open("sample_image.png", "wb") ## Creates the file for image
file.write(response.content) ## Saves file content
file.close()
for download Image
import requests
Picture_request = requests.get(url)

Categories