On a django server, I process uploaded zip files sent from a python script. But I am getting "" (a blank string) for file.content_type. What am I doing wrong?
#csrf_exempt
def Import( request ):
if request.method != 'POST':
return HttpResponseNotAllowed('Only POST here')
if not request.FILES or not request.FILES.get( u'file' ):
return HttpResponse('Must upload a file')
file = request.FILES[u'file']
if file.content_type == 'application/zip':
unzipped_dir = unzip_file( file )
uid = create_project( unzipped_dir )
shutil.rmtree( unzipped_dir )
py_ob = { }
py_ob['success'] = uid is not None
if uid is not None:
py_ob['id'] = uid
json_ob = simplejson.dumps(py_ob)
return HttpResponse( json_ob, mimetype="application/json" )
else:
return HttpResponseNotAllowed( 'Only POST zip files here' )
This is the script which sends the zip file up:
import sys
import os
import requests
if len (sys.argv) < 5:
print "pass in url, username, password, file"
else:
url = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
phile = sys.argv[4]
if os.path.exists(phile):
files = {'file': open( phile, 'rb' )}
r = requests.post( url, files=files, auth=( username, password ) )
if r.status_code == 200:
json_response = r.json()
if json_response['success']:
print "id: " + str( json_response['id'] )
else:
print "failure in processing bundle"
else:
print "server problem: " + str(r.status_code)
print r.text
else:
print "cannot find file to upload"
The Content-Type header is completely arbitrary (and optional) and not a good way to detect whether or not you're dealing with a valid ZIP file. Have you made sure your browser is supplying it?
Django's documentation tells us the same:
UploadedFile.content_type
The content-type header uploaded with the file (e.g. text/plain or application/pdf). Like any data supplied by the user, you shouldn’t
trust that the uploaded file is actually this type. You’ll still need
to validate that the file contains the content that the content-type
header claims – “trust but verify.”
You should be using zipfile.is_zipfile instead.
Related
I want to send an email with an attachment using SendGrid API with all extensions in Django. Here is my code for mail sending.
views.py
def submitTicket(request):
try:
if request.method == 'POST':
name = request.POST.get('name')
email = request.POST.get('email')
subject = request.POST.get('subject')
comment = request.POST.get('comment')
atchfile = request.FILES['fileinput']
allinfo = " Name : " + name + "\n E-Mail : " + email + "\n Comment : " + comment
recipients_list = ['abc#gmail.com']
if allinfo:
message = Mail(from_email='xxx#gmail.com',
to_emails=recipients_list,
subject=subject,
html_content=allinfo)
with atchfile.open() as f:
data = f.read()
f.close()
encoded_file = base64.b64encode(data).decode()
attachedFile = Attachment(
FileContent(encoded_file),
FileName(atchfile.name),
FileType(atchfile.content_type),
Disposition('attachment')
)
message.attachment = attachedFile
sg = SendGridAPIClient('0000000000000000000000000000000')
sg.send(message)
return HttpResponseRedirect('submitTicket')
except Exception as e:
print("Exception = ", e)
return render(request, 'submitTicket.html')
I am getting below error while trying to perform this.
TypeError at /submitTicket expected str, bytes or os.PathLike object, not InMemoryUploadedFile
I think the issue is that the open method does not take an InMemoryUploadedFile as an argument. It normally expects a path to open.
However, because your atchfile is an InMemoryUploadedFile which inherits from File you can actually call open on the file itself. So I think you can do this instead:
with atchfile.open() as f:
data = f.read()
f.close()
attachedFile = Attachment(
FileContent(encoded_file),
FileName(atchfile.name),
FileType(atchfile.content_type),
Disposition('attachment')
)
Try this.
I am writing a python script to fetch mail attachments through Graph API.
In the Graph Explorer, I can perfectly download file attachments by manually pressing the download button after calling:
https://graph.microsoft.com/v1.0/me/messages/{message-id}/attachments/{attachment-id}/$value
However, when trying to make the same request in my Python script, all I get returned is 'Response [200]' (so the request works, but the file is not reachable).
I try to make the request like this:
def get_mails_json():
requestHeaders = {'Authorization': 'Bearer ' +result["access_token"],'Content-Type': 'application/json'}
queryResults = msgraph_request(graphURI + "/v1.0/me/messages?$filter=isRead ne true",requestHeaders)
return json.dumps(queryResults)
try:
data = json.loads(mails)
values = data['value']
for i in values:
mail_id = i['id']
mail_subj = i['subject']
if i['hasAttachments'] != False:
attachments = o365.get_attachments(mail_id)
attachments = json.loads(attachments)
attachments = attachments['value']
for i in attachments:
details = o365.get_attachment_details(mail_id,i["id"])
except Exception as e:
print(e)
def get_attachment_details(mail,attachment):
requestHeaders = {'Authorization': 'Bearer ' + result["access_token"],'Content-Type': 'application/json'}
queryResults = msgraph_request(graphURI + "/v1.0/me/messages/"+mail+"/attachments/"+attachment+'/$value',requestHeaders)
return json.dumps(queryResults)
Is there a way for me to download the file to AT ALL through my python script ?
I found a simple solution to downloading a file through a python script!
I used chip's answer, found on this thread:
thread containing chip's answer
I make the request for the attachment like so:
def get_attachment_details(mail,attachment):
requestHeaders = {'Authorization': 'Bearer ' + result["access_token"],'Content-Type': 'application/file'}
resource= graphURI + "/v1.0/me/messages/"+mail+"/attachments/"+attachment+'/$value'
payload = {}
results = requests.request("GET", resource,headers=requestHeaders,data=payload, allow_redirects=False)
return results.content
This gets me the encoded bytes of the file, which I then decode and write to a file like so:
for i in attachments:
details = o365.get_attachment_details(mail_id,i["id"])
toread = io.BytesIO()
toread.write(details)
with open(i['name'], 'wb') as f:
f.write(toread.getbuffer())
I have the below code and want to write the stream of tweets to a text file. Is there a way to include the output to text file within the same code and save it in the working directory? I am an IDE lover and really don't like using the console. I am new to python (2 weeks), I am an R / R Studio user.
I know I could use:
filename.py > output.txt
I am currently using Rodeo, Python 3.6.1.
import oauth2 as oauth
import urllib.request as urllib
api_key = "##"
api_secret = "##"
access_token_key = "##-##"
access_token_secret = "##"
_debug = 0
oauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)
oauth_consumer = oauth.Consumer(key=api_key, secret=api_secret)
signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
http_method = "GET"
http_handler = urllib.HTTPHandler(debuglevel=_debug)
https_handler = urllib.HTTPSHandler(debuglevel=_debug)
'''
Construct, sign, and open a twitter request
using the hard-coded credentials above.
'''
def twitterreq(url, method, parameters):
req = oauth.Request.from_consumer_and_token(oauth_consumer,
token=oauth_token,
http_method=http_method,
http_url=url,
parameters=parameters)
req.sign_request(signature_method_hmac_sha1, oauth_consumer, oauth_token)
headers = req.to_header()
if http_method == "POST":
encoded_post_data = req.to_postdata()
else:
encoded_post_data = None
url = req.to_url()
opener = urllib.OpenerDirector()
opener.add_handler(http_handler)
opener.add_handler(https_handler)
response = opener.open(url, encoded_post_data)
f = open("output.txt", "wb")
def fetchsamples():
url = "https://stream.twitter.com/1.1/statuses/sample.json"
parameters = []
response = twitterreq(url, "GET", parameters)
for line in response:
f.write(line)
if __name__ == '__main__':
fetchsamples()
# f.close()
Besides the comment I made previously, I would suggesting checking out this stack overflow question: how to direct output into a txt file in python in windows
To quote:
If you want to do it in Python then you would write:
with open('out.txt', 'w') as f:
f.write(something)`
Obviously this is just a trivial example. You'd clearly do more inside the with block.
I'm trying to use Google's app api to authorize my python program to query google-cloud-print queues. I'm using all of the information from https://developers.google.com/cloud-print/docs/pythonCode. After including my login, password, and client-id I made from the cloud app in google. I still get 404 errors. The Gaia method returns nothing in the token dictionary. Has anyone had experience with this? or with using their new OAuth2 system? I can't seem to find anything on google about this problem.
Here is my program with my login details redacted.
import base64
import httplib
import sys
import os
import time
import logging
import mimetools
import urllib
import urllib2
import optparse
import string
import ConfigParser
import json
CRLF = '\r\n'
BOUNDARY = mimetools.choose_boundary()
# The following are used for authentication functions.
FOLLOWUP_HOST = 'www.google.com/cloudprint'
FOLLOWUP_URI = 'select%2Fgaiaauth'
GAIA_HOST = 'www.google.com'
LOGIN_URI = '/accounts/ServiceLoginAuth'
LOGIN_URL = 'https://www.google.com/accounts/ClientLogin'
SERVICE = 'cloudprint'
OAUTH = '175351968146.apps.googleusercontent.com'
# The following are used for general backend access.
CLOUDPRINT_URL = 'http://www.google.com/cloudprint'
# CLIENT_NAME should be some string identifier for the client you are writing.
CLIENT_NAME = 'google-cloud-print'
# The following object is used in the sample code, but is not necessary.
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def EncodeMultiPart(fields, files, file_type='application/xml'):
"""Encodes list of parameters and files for HTTP multipart format.
Args:
fields: list of tuples containing name and value of parameters.
files: list of tuples containing param name, filename, and file contents.
file_type: string if file type different than application/xml.
Returns:
A string to be sent as data for the HTTP post request.
"""
lines = []
for (key, value) in fields:
lines.append('--' + BOUNDARY)
lines.append('Content-Disposition: form-data; name="%s"' % key)
lines.append('') # blank line
lines.append(value)
for (key, filename, value) in files:
lines.append('--' + BOUNDARY)
lines.append(
'Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename))
lines.append('Content-Type: %s' % file_type)
lines.append('') # blank line
lines.append(value)
lines.append('--' + BOUNDARY + '--')
lines.append('') # blank line
return CRLF.join(lines)
def GetUrl(url, tokens, data=None, cookies=False, anonymous=False):
"""Get URL, with GET or POST depending data, adds Authorization header.
Args:
url: Url to access.
tokens: dictionary of authentication tokens for specific user.
data: If a POST request, data to be sent with the request.
cookies: boolean, True = send authentication tokens in cookie headers.
anonymous: boolean, True = do not send login credentials.
Returns:
String: response to the HTTP request.
"""
request = urllib2.Request(url)
if not anonymous:
if cookies:
logger.debug('Adding authentication credentials to cookie header')
request.add_header('Cookie', 'SID=%s; HSID=%s; SSID=%s' % (
tokens['SID'], tokens['HSID'], tokens['SSID']))
else: # Don't add Auth headers when using Cookie header with auth tokens.
request.add_header('Authorization', 'GoogleLogin auth=%s' % tokens['Auth'])
request.add_header('X-CloudPrint-Proxy', 'api-prober')
if data:
request.add_data(data)
request.add_header('Content-Length', str(len(data)))
request.add_header('Content-Type', 'multipart/form-data;boundary=%s' % BOUNDARY)
# In case the gateway is not responding, we'll retry.
retry_count = 0
while retry_count < 5:
try:
result = urllib2.urlopen(request).read()
return result
except urllib2.HTTPError, e:
# We see this error if the site goes down. We need to pause and retry.
err_msg = 'Error accessing %s\n%s' % (url, e)
logger.error(err_msg)
logger.info('Pausing %d seconds', 60)
time.sleep(60)
retry_count += 1
if retry_count == 5:
return err_msg
def GetCookie(cookie_key, cookie_string):
"""Extract the cookie value from a set-cookie string.
Args:
cookie_key: string, cookie identifier.
cookie_string: string, from a set-cookie command.
Returns:
string, value of cookie.
"""
logger.debug('Getting cookie from %s', cookie_string)
id_string = cookie_key + '='
cookie_crumbs = cookie_string.split(';')
for c in cookie_crumbs:
if id_string in c:
cookie = c.split(id_string)
return cookie[1]
return None
def ConvertJson(json_str):
"""Convert json string to a python object.
Args:
json_str: string, json response.
Returns:
dictionary of deserialized json string.
"""
j = {}
try:
j = json.loads(json_str)
j['json'] = True
except ValueError, e:
# This means the format from json_str is probably bad.
logger.error('Error parsing json string %s\n%s', json_str, e)
j['json'] = False
j['error'] = e
return j
def GetKeyValue(line, sep=':'):
"""Return value from a key value pair string.
Args:
line: string containing key value pair.
sep: separator of key and value.
Returns:
string: value from key value string.
"""
s = line.split(sep)
return StripPunc(s[1])
def StripPunc(s):
"""Strip puncuation from string, except for - sign.
Args:
s: string.
Returns:
string with puncuation removed.
"""
for c in string.punctuation:
if c == '-': # Could be negative number, so don't remove '-'.
continue
else:
s = s.replace(c, '')
return s.strip()
def Validate(response):
"""Determine if JSON response indicated success."""
if response.find('"success": true') > 0:
return True
else:
return False
def GetMessage(response):
"""Extract the API message from a Cloud Print API json response.
Args:
response: json response from API request.
Returns:
string: message content in json response.
"""
lines = response.split('\n')
for line in lines:
if '"message":' in line:
msg = line.split(':')
return msg[1]
return None
def ReadFile(pathname):
"""Read contents of a file and return content.
Args:
pathname: string, (path)name of file.
Returns:
string: contents of file.
"""
try:
f = open(pathname, 'rb')
try:
s = f.read()
except IOError, e:
logger('Error reading %s\n%s', pathname, e)
finally:
f.close()
return s
except IOError, e:
logger.error('Error opening %s\n%s', pathname, e)
return None
def WriteFile(file_name, data):
"""Write contents of data to a file_name.
Args:
file_name: string, (path)name of file.
data: string, contents to write to file.
Returns:
boolean: True = success, False = errors.
"""
status = True
try:
f = open(file_name, 'wb')
try:
f.write(data)
except IOError, e:
logger.error('Error writing %s\n%s', file_name, e)
status = False
finally:
f.close()
except IOError, e:
logger.error('Error opening %s\n%s', file_name, e)
status = False
return status
def Base64Encode(pathname):
"""Convert a file to a base64 encoded file.
Args:
pathname: path name of file to base64 encode..
Returns:
string, name of base64 encoded file.
For more info on data urls, see:
http://en.wikipedia.org/wiki/Data_URI_scheme
"""
b64_pathname = pathname + '.b64'
file_type = mimetypes.guess_type(pathname)[0] or 'application/octet-stream'
data = ReadFile(pathname)
# Convert binary data to base64 encoded data.
header = 'data:%s;base64,' % file_type
b64data = header + base64.b64encode(data)
if WriteFile(b64_pathname, b64data):
return b64_pathname
else:
return None
def GaiaLogin(email, password):
"""Login to gaia using HTTP post to the gaia login page.
Args:
email: string,
password: string
Returns:
dictionary of authentication tokens.
"""
tokens = {}
cookie_keys = ['SID', 'LSID', 'HSID', 'SSID']
email = email.replace('+', '%2B')
# Needs to be some random string.
galx_cookie = base64.b64encode('%s%s' % (email, time.time()))
# Simulate submitting a gaia login form.
form = ('ltmpl=login&fpui=1&rm=hide&hl=en-US&alwf=true'
'&continue=https%%3A%%2F%%2F%s%%2F%s'
'&followup=https%%3A%%2F%%2F%s%%2F%s'
'&service=%s&Email=%s&Passwd=%s&GALX=%s' % (FOLLOWUP_HOST,
FOLLOWUP_URI, FOLLOWUP_HOST, FOLLOWUP_URI, SERVICE, email,
password, galx_cookie))
login = httplib.HTTPS(GAIA_HOST, 443)
login.putrequest('POST', LOGIN_URI)
login.putheader('Host', GAIA_HOST)
login.putheader('content-type', 'application/x-www-form-urlencoded')
login.putheader('content-length', str(len(form)))
login.putheader('Cookie', 'GALX=%s' % galx_cookie)
logger.info('Sent POST content: %s', form)
login.endheaders()
logger.info('HTTP POST to https://%s%s', GAIA_HOST, LOGIN_URI)
login.send(form)
(errcode, errmsg, headers) = login.getreply()
login_output = login.getfile()
logger.info(headers)
login_output.close()
login.close()
logger.info('Login complete.')
if errcode != 302:
logger.error('Gaia HTTP post returned %d, expected 302', errcode)
logger.error('Message: %s', errmsg)
for line in str(headers).split('\r\n'):
if not line: continue
(name, content) = line.split(':', 1)
if name.lower() == 'set-cookie':
for k in cookie_keys:
if content.strip().startswith(k):
tokens[k] = GetCookie(k, content)
if not tokens:
logger.error('No cookies received, check post parameters.')
return None
else:
logger.debug('Received the following authorization tokens.')
for t in tokens:
logger.debug(t)
return tokens
def GetAuthTokens(email, password):
"""Assign login credentials from GAIA accounts service.
Args:
email: Email address of the Google account to use.
password: Cleartext password of the email account.
Returns:
dictionary containing Auth token.
"""
# First get GAIA login credentials using our GaiaLogin method.
logger.debug("GetAuthTokens")
tokens = GaiaLogin(email, password)
print tokens
if tokens:
# We still need to get the Auth token.
params = {'accountType': 'GOOGLE',
'Email': email,
'Passwd': password,
'service': SERVICE,
'source': CLIENT_NAME}
stream = urllib.urlopen(LOGIN_URL, urllib.urlencode(params))
for line in stream:
if line.strip().startswith('Auth='):
tokens['Auth'] = line.strip().replace('Auth=', '')
# All of the calls to GetUrl assume you've run something like this:
tokens = GetAuthTokens('email', 'password')
All of this code is straight from the google-cloud-print developer site.
Here is the last bit of the output.
INFO:__main__:Login complete.
ERROR:__main__:Gaia HTTP post returned 404, expected 302
ERROR:__main__:Message: Not Found
ERROR:__main__:No cookies received, check post parameters.
None
Thanks in advance!
replace this code
GAIA_HOST = 'www.google.com'
LOGIN_URI = '/accounts/ServiceLoginAuth'
by this
GAIA_HOST = 'accounts.google.com'
LOGIN_URI = '/ServiceLoginAuth'
I had the same problem.
If you wish you can use a simple library and command line program I just published.
https://github.com/escube/GoogleCloudSpooler
I’m trying to make a program that will do the following:
check if auth_file exists
if yes -> read file and try to login using data from that file
- if data is wrong -> request new data
if no -> request some data and then create the file and fill it with requested data
So far:
import json
import getpass
import os
import requests
filename = ".auth_data"
auth_file = os.path.realpath(filename)
url = 'http://example.com/api'
headers = {'content-type': 'application/json'}
def load_auth_file():
try:
f = open(auth_file, "r")
auth_data = f.read()
r = requests.get(url, auth=auth_data, headers=headers)
if r.reason == 'OK':
return auth_data
else:
print "Incorrect login..."
req_auth()
except IOError:
f = file(auth_file, "w")
f.write(req_auth())
f.close()
def req_auth():
user = str(raw_input('Username: '))
password = getpass.getpass('Password: ')
auth_data = (user, password)
r = requests.get(url, auth=auth_data, headers=headers)
if r.reason == 'OK':
return user, password
elif r.reason == "FORBIDDEN":
print "Incorrect login information..."
req_auth()
return False
I have the following problems(understanding and applying the correct way):
I can't find a correct way of storing the returned data from req_auth() to auth_file in a format that can be read and used in load_auth file
PS: Of course I'm a beginner in Python and I'm sure I have missed some key elements here :(
To read and write data, you can use json:
>>> with open('login.json','w') as f:
f.write(json.dumps({'user': 'abc', 'pass': '123'}))
>>> with open('login.json','r') as f:
data=json.loads(f.read())
>>> print data
{u'user': u'abc', u'pass': u'123'}
A few improvements I'd suggest:
Have a function that tests login (arguments: user,pwd) and returns True/False
Save data inside req_data, because req_data is called only when you have incorrect/missing data
Add an optional argument tries=0 to req_data, and test against it for a maximum number of tries
(1):
def check_login(user,pwd):
r = requests.get(url, auth=(user, pwd), headers=headers)
return r.reason == 'OK':
for (2), you can use json (as described above), csv, etc. Both of those are extremely easy, though json might make more sense since you're already using it.
for (3):
def req_auth(tries = 0) #accept an optional argument for no. of tries
#your existing code here
if check_login(user, password):
#Save data here
else:
if tries<3: #an exit condition and an error message:
req_auth(tries+1) #increment no. of tries on every failed attempt
else:
print "You have exceeded the number of failed attempts. Exiting..."
There are a couple of things I would approach differently, but you're off to a good start.
Instead of trying to open the file initially I'd check for it's existence:
if not os.path.isfile(auth_file):
Next, when you're working with writing the output you should use context managers:
with open(auth_file, 'w') as fh:
fh.write(data)
And finally, as a storage open (not terribly secure), it might work well to put the information you're saving in json format:
userdata = dict()
userdata['username'] = raw_input('Username: ')
userdata['password'] = getpass.getpass('Password: ')
# saving
with open(auth_file, 'w') as fho:
fho.write(josn.dumps(userdata))
# loading
with open(auth_file) as fhi:
userdata = json.loads(fhi.read())