python - NameError global name 'buf' is not defined - python

I don't know what this error means. Any advice about the error or the rest of the code is greatly appreciated.
import urllib
import urllib2
import os
import re
from bs4 import BeautifulSoup
def image_scrape():
url = raw_input("Type url for image scrape: ")
content = urllib2.urlopen(url).read()
soup = BeautifulSoup(content)
name = 0
for tag in soup.find_all(re.compile("img")):
path = 'C:\Users\Sorcerer\Downloads'
name += 1
filename = name
file_path = "%s%s" % (path, filename)
downloaded_image = file(file_path, "wb")
downloaded_image.write(buf)
downloaded_image.close()
image_scrape()

You have a line in your code:
downloaded_image.write(buf)
The Python interpreter has not seen this variable buf before in your code. And hence the error.
Thoughts on the rest of your code:
It is advisable to use the os module to do what you are doing with this line:
file_path = "%s%s" % (path, filename)
like this:
import os
path = os.path.normpath('C:\\Users\\Sorcerer\\Downloads')
file_path = os.path.join(path, name)
Looks like you are trying to find all the image links in the page and trying to save it to the file system at the location referenced by file_path. Assuming the link to the image is in the variable tag, this is what you do:
import requests
r = requests.get(tag, stream=True)
if r.status_code == 200:
with open(name, 'wb') as f:
for chunk in r.iter_content():
f.write(chunk)
f.close()

Related

Download images from URL Python

I have problem with my script when i try download images from web url. It works on other page (offex.pl) but in my shop images are not working.
i just have all files but i can't open Files
my code:
import os
import time
import requests
from termcolor import colored
def get_folder(url):
all_folders= os.path.dirname(url)
folder=os.path.basename(all_folders)
return folder
def filename(url):
file=url[url.rfind("/") + 1:]
return file
def download(link):
error = []
ok = 0
fail = 0
root_folder = get_folder(link)
path = "{}/{}".format("download", root_folder)
if not os.path.exists(path):
os.makedirs(path)
url = link
file = filename(link)
result = requests.get(url, stream=True)
completeName = os.path.join("download", root_folder, file)
print(completeName)
if result.status_code == 200:
image = result.raw.read()
open(completeName, "wb").write(image)
ok += 1
succes = "{} {} {}".format(ok, colored("Pobrano:", "green"), url)
print(succes)
time.sleep(1)
else:
found_error = "{} {}".format(colored("Brak pliku!:", "red"), url)
print(found_error)
fail += 1
error.append("ID:{} NUMBER:{} link: {}".format(id, url))
with open("log.txt", "w") as filehandle:
for listitem in error:
filehandle.write('%s\n' % listitem)
print(colored("Pobrano plików: ", "green"), ok)
print(colored("Błędy pobierania: ", "red"), fail)
img_url="https://sw19048.smartweb-static.com/upload_dir/shop/misutonida_ec-med-384-ix.jpg"
download(img_url)
What Im doing wrong?
for example (https://offex.pl/images/detailed/11/94102_jeep_sbhn-8h.jpg) download OK
but for my shop url https://sw19048.smartweb-static.com/upload_dir/shop/misutonida_ec-med-384-ix.jpg is not working.
If you want to use requests module,you can use this:
import requests
response = requests.get("https://sw19048.smartweb-static.com/upload_dir/shop/misutonida_ec-med-384-ix.jpg")
with open('./Image.jpg','wb') as f:
f.write(response.content)
The issue is with the URL which you are using to download. Its not an issue, but a difference from other URL you have mentioned.
Let me explain
The URL https://offex.pl/images/detailed/11/94102_jeep_sbhn-8h.jpg returns an image as response with out any compression.
On the other hand, the shop URL https://sw19048.smartweb-static.com/upload_dir/shop/misutonida_ec-med-384-ix.jpg returns the image with gzip compression enabled in the headers.
So the raw response you get is compressed with gzip compression. You can decompress the response with gzip, if you know the compression is always gzip like below
import gzip
import io
image = result.raw.read()
buffer = io.BytesIO(image)
deflatedContent = gzip.GzipFile(fileobj=buffer)
open("D:/sample.jpg", "wb").write(deflatedContent.read())
Or you can use alternative libraries like urllib2 or similar ones, which takes care of decompression. I was trying to explain why it failed for your URL , but not for other. Hope this makes sense.
try :
import urllib2
def download_web_image(url):
request = urllib2.Request(url)
img = urllib2.urlopen(request).read()
with open('test.jpg', 'wb') as f:
f.write(img)
download_web_image("https://sw19048.smartweb-static.com/upload_dir/shop/misutonida_ec-med-384-ix.jpg")
It is working for your URL. I think the issue is with the request response of the used library.
from io import BytesIO
import requests
from PIL import Image
fileRequest = requests.get("https://sw19048.smartweb-static.com/upload_dir/shop/misutonida_ec-med-384-ix.jpg")
doc = Image.open(BytesIO(fileRequest.content))
doc.save("newFile.jpg")

How to download images from the web page?

I have a python script that searches for images on a web page and it's supposed to download them to folder named 'downloaded'. Last 2-3 lines are problematic, I don't know how to write the correct 'with open' code.
The biggest part of the script is fine, lines 42-43 give an error
import os
import requests
from bs4 import BeautifulSoup
downloadDirectory = "downloaded"
baseUrl = "http://pythonscraping.com"
def getAbsoluteURL(baseUrl, source):
if source.startswith("http://www."):
url = "http://"+source[11:]
elif source.startswith("http://"):
url = source
elif source.startswith("www."):
url = source[4:]
url = "http://"+source
else:
url = baseUrl+"/"+source
if baseUrl not in url:
return None
return url
def getDownloadPath(baseUrl, absoluteUrl, downloadDirectory):
path = absoluteUrl.replace("www.", "")
path = path.replace(baseUrl, "")
path = downloadDirectory+path
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
return path
html = requests.get("http://www.pythonscraping.com")
bsObj = BeautifulSoup(html.content, 'html.parser')
downloadList = bsObj.find_all(src=True)
for download in downloadList:
fileUrl = getAbsoluteURL(baseUrl,download["src"])
if fileUrl is not None:
print(fileUrl)
with open(fileUrl, getDownloadPath(baseUrl, fileUrl, downloadDirectory), 'wb') as out_file:
out_file.write(fileUrl.content)
It opens downloaded folder on my computer and misc folder within it. And it gives a traceback error.
Traceback:
http://pythonscraping.com/misc/jquery.js?v=1.4.4
Traceback (most recent call last):
File "C:\Python36\kodovi\downloaded.py", line 43, in <module>
with open(fileUrl, getDownloadPath(baseUrl, fileUrl, downloadDirectory), 'wb
') as out_file:
TypeError: an integer is required (got type str)
It seems your downloadList includes some URLs that aren't images. You could instead look for any <img> tags in the HTML:
downloadList = bsObj.find_all('img')
Then use this to download those images:
for download in downloadList:
fileUrl = getAbsoluteURL(baseUrl,download["src"])
r = requests.get(fileUrl, allow_redirects=True)
filename = os.path.join(downloadDirectory, fileUrl.split('/')[-1])
open(filename, 'wb').write(r.content)
EDIT: I've updated the filename = ... line so that it writes the file of the same name to the directory in the string downloadDirectory. By the way, the normal convention for Python variables is not to use camel case.

Python script that gets images from a webpage throwing error: expected a character buffer object

I have a script that is supposed to copy all image files from a website and then write them to a certain file in the directory. Code goes as follows:
import urllib2
import re
from os.path import basename
from urlparse import urlsplit
url ='https://www.google.com'
response = urllib2.urlopen(url)
source = response.read
file = open("google.txt", "w")
#.seek(0) is for the search
file.seek(0)
file.write(source)
file.close()
patten = '(http)?s?:?(\/\/[^"]*\.(?:png|jpg|jpeg|gif|png|svg))'
for line in open('google.txt'):
for m in re.findall(pattern, line):
filename = basename(urlsplit(m[1])[2])
try:
img = urllib2.urlopen('https:' + m[1]).read()
file = open(filename, "w")
file.write(img)
file.close()
except:
pass
break
The problem is that when it is executed, it throws expected a character buffer object on the line that contains file.write(source).
I tried putting in file.seek(0) before the file.write(source) because I read online that it usually fixes the issue, but it does not work for me.

How to batch read and then write a list of weblink .JSON files to specified locations on C drive in Python v2.7

I have a long list of .json files that I need to download to my computer. I want to download them as .json files (so no parsing or anything like that at this point).
I have some code that works for small files, but it is pretty buggy. Also it doesn't handle multiple links well.
Appreciate any advice to fix up this code:
import os
filename = 'test.json'
path = "C:/Users//Master"
fullpath = os.path.join(path, filename)
import urllib2
url = 'https://www.premierlife.com/secure/index.json'
response = urllib2.urlopen(url)
webContent = response.read()
f = open(fullpath, 'w')
f.write(webContent)
f.close
It's creating a blank file because the f.close at the end should be f.close().
I took your code and made a little function and then called it on a little loop to go through a .txt file with the list of urls called "list_of_urls.txt" having 1 url per line (you can change the delimiter in the split function if you want to format it differently).
def save_json(url):
import os
filename = url.replace('/','').replace(':','')
# this replaces / and : in urls
path = "C:/Users/Master"
fullpath = os.path.join(path, filename)
import urllib2
response = urllib2.urlopen(url)
webContent = response.read()
f = open(fullpath, 'w')
f.write(webContent)
f.close()
And then the loop:
f = open('list_of_urls.txt')
p = f.read()
url_list = p.split('\n') #here's where \n is the line break delimiter that can be changed
for url in url_list:
save_json(url)

How to download a file using python in a 'smarter' way?

I need to download several files via http in Python.
The most obvious way to do it is just using urllib2:
import urllib2
u = urllib2.urlopen('http://server.com/file.html')
localFile = open('file.html', 'w')
localFile.write(u.read())
localFile.close()
But I'll have to deal with the URLs that are nasty in some way, say like this: http://server.com/!Run.aspx/someoddtext/somemore?id=121&m=pdf. When downloaded via the browser, the file has a human-readable name, ie. accounts.pdf.
Is there any way to handle that in python, so I don't need to know the file names and hardcode them into my script?
Download scripts like that tend to push a header telling the user-agent what to name the file:
Content-Disposition: attachment; filename="the filename.ext"
If you can grab that header, you can get the proper filename.
There's another thread that has a little bit of code to offer up for Content-Disposition-grabbing.
remotefile = urllib2.urlopen('http://example.com/somefile.zip')
remotefile.info()['Content-Disposition']
Based on comments and #Oli's anwser, I made a solution like this:
from os.path import basename
from urlparse import urlsplit
def url2name(url):
return basename(urlsplit(url)[2])
def download(url, localFileName = None):
localName = url2name(url)
req = urllib2.Request(url)
r = urllib2.urlopen(req)
if r.info().has_key('Content-Disposition'):
# If the response has Content-Disposition, we take file name from it
localName = r.info()['Content-Disposition'].split('filename=')[1]
if localName[0] == '"' or localName[0] == "'":
localName = localName[1:-1]
elif r.url != url:
# if we were redirected, the real file name we take from the final URL
localName = url2name(r.url)
if localFileName:
# we can force to save the file as specified name
localName = localFileName
f = open(localName, 'wb')
f.write(r.read())
f.close()
It takes file name from Content-Disposition; if it's not present, uses filename from the URL (if redirection happened, the final URL is taken into account).
Combining much of the above, here is a more pythonic solution:
import urllib2
import shutil
import urlparse
import os
def download(url, fileName=None):
def getFileName(url,openUrl):
if 'Content-Disposition' in openUrl.info():
# If the response has Content-Disposition, try to get filename from it
cd = dict(map(
lambda x: x.strip().split('=') if '=' in x else (x.strip(),''),
openUrl.info()['Content-Disposition'].split(';')))
if 'filename' in cd:
filename = cd['filename'].strip("\"'")
if filename: return filename
# if no filename was found above, parse it out of the final URL.
return os.path.basename(urlparse.urlsplit(openUrl.url)[2])
r = urllib2.urlopen(urllib2.Request(url))
try:
fileName = fileName or getFileName(url,r)
with open(fileName, 'wb') as f:
shutil.copyfileobj(r,f)
finally:
r.close()
2 Kender:
if localName[0] == '"' or localName[0] == "'":
localName = localName[1:-1]
it is not safe -- web server can pass wrong formatted name as ["file.ext] or [file.ext'] or even be empty and localName[0] will raise exception.
Correct code can looks like this:
localName = localName.replace('"', '').replace("'", "")
if localName == '':
localName = SOME_DEFAULT_FILE_NAME
Using wget:
custom_file_name = "/custom/path/custom_name.ext"
wget.download(url, custom_file_name)
Using urlretrieve:
urllib.urlretrieve(url, custom_file_name)
urlretrieve also creates the directory structure if not exists.
You need to look into 'Content-Disposition' header, see the solution by kender.
How to download a file using python in a 'smarter' way?
Posting his solution modified with a capability to specify an output folder:
from os.path import basename
import os
from urllib.parse import urlsplit
import urllib.request
def url2name(url):
return basename(urlsplit(url)[2])
def download(url, out_path):
localName = url2name(url)
req = urllib.request.Request(url)
r = urllib.request.urlopen(req)
if r.info().has_key('Content-Disposition'):
# If the response has Content-Disposition, we take file name from it
localName = r.info()['Content-Disposition'].split('filename=')[1]
if localName[0] == '"' or localName[0] == "'":
localName = localName[1:-1]
elif r.url != url:
# if we were redirected, the real file name we take from the final URL
localName = url2name(r.url)
localName = os.path.join(out_path, localName)
f = open(localName, 'wb')
f.write(r.read())
f.close()
download("https://example.com/demofile", '/home/username/tmp')
I have just updated the answer of kender for python3

Categories