Google Drive API Wont Download Files - python

I am trying to download the latest version of my software from google drive (I dont care about security, Im the only one running it, and im not sharing it), but it says the file is not found when its downloading.
Here is my code:
import os
import re
import sys
import functions
from functions import *
import json
from Google import Create_Service
import io
from googleapiclient.http import MediaIoBaseDownload
versionList = []
onlineVersionList = []
version = ""
#Google Api Stuff
CLIENT_SECRET_FILE = 'client_secret_GoogleCloud.json'
API_NAME = 'drive'
API_VERSION = 'v3'
SCOPES = ['https://www.googleapis.com/auth/drive', 'https://www.googleapis.com/auth/drive.install']
service = Create_Service(CLIENT_SECRET_FILE, API_NAME, API_VERSION, SCOPES)
#Searches for the highest version on google drive
page_token = None
while True:
response = service.files().list(q="mimeType = 'application/vnd.google-apps.folder' and name contains 'Version'"
"and not name contains 'ClockOS'",
fields='nextPageToken, files(id, name)',
pageToken=page_token).execute()
for file in response.get('files', []):
# Process change
onlineVersionList.append(file.get('name'))
page_token = response.get('nextPageToken', None)
if page_token is None:
break
for filename in os.listdir('Versions'):
if filename.startswith('Version'):
versionList.append(filename)
print(f"Loaded {filename}")
def major_minor_micro(version):
major, minor, micro = re.search('(\d+)\.(\d+)\.(\d+)', version).groups()
return int(major), int(minor), int(micro)
def major_minor(version):
major, minor, micro = re.search('(\d+)\.(\d+)\.(\d+)', version).groups()
return int(major), int(minor)
def major(version):
major, minor, micro = re.search('(\d+)\.(\d+)\.(\d+)', version).groups()
return int(major)
if versionType() == "stable":
latest = str(max(versionList, key=major))
onlineLatest = str(max(onlineVersionList, key=major))
elif versionType() == "standard":
latest = str(max(versionList, key=major_minor))
onlineLatest = str(max(onlineVersionList, key=major_minor))
elif versionType() == "beta":
latest = str(max(versionList, key=major_minor_micro))
onlineLatest = str(max(onlineVersionList, key=major_minor_micro))
else:
print("An error has occurred and a wrong version type was picked.")
sys.exit()
if str(onlineLatest) > str(latest):
#Gets the api of the highest version
page_token = None
while True:
response = service.files().list(q=f"mimeType = 'application/vnd.google-apps.folder' and name contains '{onlineLatest}'",
fields='nextPageToken, files(id, name)',
pageToken=page_token).execute()
for file in response.get('files', []):
# Process change
print('Found file id: %s (%s)' % (file.get('name'), file.get('id')))
onlineVersionID = file.get('name')
page_token = response.get('nextPageToken', None)
if page_token is None:
break
request = service.files().get_media(fileId=onlineVersionID)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
print
"Download %d%%." % int(status.progress() * 100)
print("Ran", latest)
setInfo("version", latest)
os.chdir(("Versions/"+latest))
cwd = os.getcwd()
sys.path.append(cwd)
import main
And I get this error, after I log into google:
googleapiclient.errors.HttpError:
<HttpError 404 when requesting (link to file)?alt=media returned "File not found: Version 0.1.1.". Details: "[
{'domain': 'global', 'reason': 'notFound', 'message': 'File not found: Version 0.1.1.',
'locationType': 'parameter', 'location': 'fileId'}
]">
It clearly finds it, as it returns the name I gave it, so why wont it download, anyone know how to fix?
I already gave it scopes needed, and gave my account tester, and this is stored in the same google account as the one I log into.

From It clearly finds it, as it returns the name I gave it, I thought that you might be able to retrieve the file. And, when I saw your script, it seems that the filename is used as the file ID with onlineVersionID = file.get('name') and request = service.files().get_media(fileId=onlineVersionID). So in this case, how about the following modification?
From:
onlineVersionID = file.get('name')
To:
onlineVersionID = file.get('id')

Related

Python Google Drive API not creating csv with list of all files within sub-folders

I am trying to get a list of files with names, dates, etc into a csv file from files on my Google Drive folder which has around 15k sub-folders (one level below) within the main folder that have about 1-10 files each that amount to around 65k files in total.
I used the following code in Python to create my csv file which generates the information for all the sub-folders but only about 18k of the individual files within those sub-folders (the most recently uploaded files in those sub-folders).
I am not quite sure why my code is not able to get the list for all the files in those sub-folders. Is there a limit I am hitting that stops me from getting the information for all the files?
Note: The folder I am storing the files is a shared folder but I don't think that should be affecting anything.
import httplib2
import os
from apiclient import discovery
from oauth2client import client
from oauth2client import tools
from oauth2client.file import Storage
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
import pandas as pd
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/drive-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/drive.metadata.readonly'
CLIENT_SECRET_FILE = 'credentials.json'
APPLICATION_NAME = 'Drive API Python Quickstart'
folder_id = '########' # Set to id of the parent folder you want to list (should be the content folder)
folder_list = []
all_folders = []
file_list = []
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,'drive-python-quickstart.json')
store = Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def get_root_folder(): # Gets folder list from original root folder
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
results = service.files().list(q="mimeType = 'application/vnd.google-apps.folder' and '"+folder_id+"' in parents",
pageSize=1000, fields="nextPageToken, files(id, mimeType)", supportsAllDrives=True, includeItemsFromAllDrives=True).execute()
folders = results.get('files', [])
if not folders:
print('No folders found.')
else:
for folder in folders:
id = folder.get('id')
folder_list.append(id)
def get_all_folders(folder_list): # Creates list of all sub folder under root, keeps going until no folders underneath
for folder in folder_list:
additional_folders = []
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
results = service.files().list(
q="mimeType = 'application/vnd.google-apps.folder' and '" +folder+ "' in parents",
pageSize=1000, fields="nextPageToken, files(id, mimeType)", supportsAllDrives=True, includeItemsFromAllDrives=True).execute()
items = results.get('files', [])
for item in items:
id = item.get('id')
additional_folders.append(id)
if not additional_folders:
pass
else:
all_folders.extend(additional_folders)
folder_list = additional_folders
get_all_folders(folder_list)
def merge(): # Merges sub folder list with full list
global full_list
full_list = all_folders + folder_list
full_list.append(folder_id)
def get_file_list(): # Runs over each folder generating file list, for files over 1000 uses nextpagetoken to run additional requests, picks up metadata included in the request
for folder in full_list:
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
page_token = None
while True:
results = service.files().list(
q="'" + folder + "' in parents",
pageSize=1000, fields="nextPageToken, files(name, md5Checksum, mimeType, size, createdTime, modifiedTime, id, parents, trashed)", pageToken=page_token, supportsAllDrives=True, includeItemsFromAllDrives=True).execute()
items = results.get('files', [])
for item in items:
name = item['name']
checksum = item.get('md5Checksum')
size = item.get('size', '-')
id = item.get('id')
mimeType = item.get('mimeType', '-')
createdTime = item.get('createdTime', 'No date')
modifiedTime = item.get('modifiedTime', 'No date')
parents = item.get('parents')
trashed = item.get('trashed')
file_list.append([name, checksum, mimeType, size, createdTime, modifiedTime, id, parents, trashed])
page_token = results.get('nextPageToken', None)
if page_token is None:
break
files = pd.DataFrame(file_list,columns=['file_name','checksum_md5','mimeType','size', 'date_created', 'date_last_modified','google_id', 'google_parent_id', 'trashed'])
files.drop(files[files['trashed'] == True].index, inplace=True) #removes files which have True listed in trashed, these are files which had been moved to the recycle bin
foldernumbers = files['mimeType'].str.contains('application/vnd.google-apps.folder').sum()
filenumbers = (~files['mimeType'].str.contains('application/vnd.google-apps.folder')).sum()
print('Number of folders is: ', foldernumbers)
print('Number of files is: ', filenumbers)
files.to_csv('D:/GoogleAPIMetadata.csv', index=False)
if __name__ == '__main__':
print('Collecting folder id list')
get_root_folder()
get_all_folders(folder_list)
merge()
print('Generating file metadata list')
get_file_list()

Unable to download large files from google drive using python

I want to download large size files from google drive using python.
And I did this using below code
import pickle
import os.path
import requests
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import os
import pickle
class DriveAPI:
global SCOPES
SCOPES = ['https://www.googleapis.com/auth/drive.file']
def __init__(self):
self.creds = None
if os.path.exists('token.pickle'):
with open('token.pickle','rb') as token:
self.creds = pickle.load(token)
if not self.creds or not self.creds.valid:
if self.creds and self.creds.expired and self.creds.refresh_token:
self.creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file('credentials.json', SCOPES)
self.creds = flow.run_local_server(port=0)
with open('token.pickle', 'wb') as token:
pickle.dump(self.creds,token)
self.service = build('drive','v3',credentials=self.creds)
results = self.service.files().list(pageSize=100,fields='files(id,name,createdTime)').execute()
items = results.get('files',[])
def download_file_from_google_drive(self,id, destination):
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(self,response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params = { 'id' : id }, stream = True)
token = get_confirm_token(response)
if token:
params = { 'id' : id, 'confirm' : token }
response = session.get(URL, params = params, stream = True)
save_response_content(response, destination)
if __name__ == "__main__":
obj = DriveAPI()
f_id = "File ID"
file_name = "File Name"
obj.service.permissions().create(body={"role":"reader", "type":"anyone"}, fileId=f_id).execute()
obj.FileDownload(f_id,file_name)
By using above code I was able to download 2Gb size file for a certain period of time like 2 months. But now I'm unable to download large size files.
If I run this code the file downloads only 2.2kb file present.
But there is no issues that prints in terminal.

when i listing files in google drive, i got extra file

In this code i got extra file, which not contain in my google drive. I check this files in another disk and now they showed when i ask file from googleDrive, but i cant find this files in google drive. How get only local files from my google disk.
from __future__ import print_function
import httplib2
import io
from apiclient import discovery, errors
from oauth2client import client, tools, file
from apiclient import http
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
import auth
SCOPES = 'https://www.googleapis.com/auth/drive'
CLIENT_SECRET_FILE = 'credentials.json'
APPLICATION_NAME = 'Python Task "Cloud"'
authInst = auth.auth(SCOPES, CLIENT_SECRET_FILE, APPLICATION_NAME)
credentials = authInst.getCredentials()
http_for_drive = credentials.authorize(httplib2.Http())
drive_service = discovery.build('drive', 'v3', http=http_for_drive)
if __name__ == '__main__':
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
creds = tools.run_flow(flow, store)
service = discovery.build('drive', 'v3', http=creds.authorize(http_for_drive))
# Call the Drive v3 API
results = service.files().list(
pageSize=10, fields="nextPageToken, files(id, name)").execute()
items = results.get('files', [])
dictItems = {}
if not items:
print('No files found.')
else:
print('Files:')
for item in items:
dictItems[item['name']] = item['id']
print('{0} ({1})'.format(item['name'], item['id']))
i make function which output files folder being in your google drive folder
def retrieve_all_files(service):
""" RETURNS a list of files, where each file is a dictionary containing
keys: [name, id, parents]
"""
query = "trashed=false"
page_token = None
L = []
while True:
response = service.files().list(q=query,
spaces='drive',
fields='nextPageToken, files(id, name, parents, size, parents, owners)',
pageToken=page_token).execute()
for file in response.get('files', []): # The second argument is the default
if file.get('owners')[0]['me']:
L.append({"name": file.get('name'), "id": file.get('id'), "parents": file.get('parents'), "size": file.get('size'),
"owners": file.get('owners')})
page_token = response.get('nextPageToken', None) # The second argument is the default
if page_token is None: # The base My Drive folder has None
break
return L

Get createdDate from Google Drive Python API for downloads

I want to create a Python script to backup Google Drive files as a bit of fun / learning, but I am stuck. My script below did work, but it just made the last modified date and created date of all the files on my local drive on back up as the date they were backed up, and didn't preserve the original created date / modified date as they were on Google Drive.
Here is my script:
from __future__ import print_function
import sys, httplib2, os, datetime, io
from time import gmtime, strftime
from apiclient import discovery
import oauth2client
from oauth2client import client
from oauth2client import tools
from datetime import date
#########################################################################
# Fixing OSX el capitan bug ->AttributeError: 'Module_six_moves_urllib_parse' object has no attribute 'urlencode'
os.environ["PYTHONPATH"] = "/Library/Python/2.7/site-packages"
#########################################################################
CLIENT_SECRET_FILE = 'client_secrets.json'
TOKEN_FILE="drive_api_token.json"
SCOPES = 'https://www.googleapis.com/auth/drive'
APPLICATION_NAME = 'Drive File API - Python'
OUTPUT_DIR=str(date.today())+"_drive_backup"
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
def get_credentials():
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, TOKEN_FILE)
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def prepDest():
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
return True
return False
def downloadFile(file_name, file_id, file_createdDate, mimeType, service):
request = service.files().get_media(fileId=file_id)
if "application/vnd.google-apps" in mimeType:
if "document" in mimeType:
request = service.files().export_media(fileId=file_id, mimeType='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
file_name = file_name + ".docx"
else:
request = service.files().export_media(fileId=file_id, mimeType='application/pdf')
file_name = file_name + ".pdf"
print("Downloading -- " + file_name)
response = request.execute()
with open(os.path.join(OUTPUT_DIR, file_name), "wb") as wer:
wer.write(response)
def listFiles(service):
def getPage(pageTok):
return service.files().list(q="mimeType != 'application/vnd.google-apps.folder'",
pageSize=1000, pageToken=pageTok, fields="nextPageToken,files(id,name, createdDate, mimeType)").execute()
pT = ''; files=[]
while pT is not None:
results = getPage(pT)
pT = results.get('nextPageToken')
files = files + results.get('files', [])
return files
def main():
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v3', http=http)
for item in listFiles(service):
downloadFile(item.get('name'), item.get('id'), item.get('createdDate'), item.get('mimeType'), service)
if __name__ == '__main__':
main()
To try and get the created date, you can see in the above script I added in createdDate, which looks like some of the metadata I can grab from the file:
https://developers.google.com/drive/v2/reference/files
But I don't know if I am grabbing that metadata correctly, and if so, how I actually assign it to my downloaded file.
EDIT: Really sorry but I didn't specify an OS - this is for a mac.
File v2 createdDate renamed in v3 to createdTime
The File reference you linked is for v2, but your code connects to the v3 service. When I ran your code, which uses createdDate from the v2 API, an error occurred (createdDate was an invalid metadata field).
I switched to the v3 File API, which lists the creation time as createdTime, and was able to retrieve the time without error.
File creation time changeable in Windows only
Linux/Unix does not allow setting a file's creation time, but it allows modification to the file's modified and access times via os.utime() (both times required by this function). The Drive API provides createdTime and modifiedTime but nothing for access time (which probably wouldn't make sense there), although the modification time could serve just as well for the access time.
In Windows, the file creation time could be set with win32file.SetFileTime.
Time conversion
Note that the times that are passed to the timestamp functions above are in seconds since epoch. The Drive API returns an ISO 8601 string that we convert to seconds with:
dt = datetime.datetime.strptime(dateTime, "%Y-%m-%dT%H:%M:%S.%fZ")
secs = int(dt.strftime("%s"))
Modifications
Replace all instances of createdDate with createdTime.
In listFiles() > getPage(), add modifiedTime to metadata fields:
def listFiles(service):
def getPage(pageTok):
return service.files().list(q="mimeType != 'application/vnd.google-apps.folder'",
pageSize=1000, pageToken=pageTok, fields="nextPageToken,files(id,name, createdTime, modifiedTime, mimeType)").execute()
In main()'s for-loop, pass modifiedTime to downloadFiles():
downloadFile(item.get('name'), item.get('id'), item.get('createdTime'), item.get('modifiedTime'), item.get('mimeType'), service)
In downloadFiles(), add modifiedTime to parameter list after file_createdTime.
Add these functions to set file timestamps:
def dateToSeconds(dateTime):
return int(datetime.datetime.strptime(dateTime, "%Y-%m-%dT%H:%M:%S.%fZ").strftime("%s"))
def setFileTimestamps(fname, createdTime, modifiedTime):
ctime = dateToSeconds(createdTime)
mtime = dateToSeconds(modifiedTime)
setFileCreationTime(fname, ctime)
setFileModificationTime(fname, mtime)
def setFileModificationTime(fname, newtime):
# Set access time to same value as modified time,
# since Drive API doesn't provide access time
os.utime(fname, (newtime, newtime))
def setFileCreationTime(fname, newtime):
"""http://stackoverflow.com/a/4996407/6277151"""
if os.name != 'nt':
# file creation time can only be changed in Windows
return
import pywintypes, win32file, win32con
wintime = pywintypes.Time(newtime)
winfile = win32file.CreateFile(
fname, win32con.GENERIC_WRITE,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE | win32con.FILE_SHARE_DELETE,
None, win32con.OPEN_EXISTING,
win32con.FILE_ATTRIBUTE_NORMAL, None)
win32file.SetFileTime(winfile, wintime, None, None)
winfile.close()
In downloadFiles(), call setFileTimestamps() right after writing the file (as last line of function):
def downloadFile(file_name, file_id, file_createdTime, modifiedTime, mimeType, service):
request = service.files().get_media(fileId=file_id)
if "application/vnd.google-apps" in mimeType:
if "document" in mimeType:
request = service.files().export_media(fileId=file_id, mimeType='application/vnd.openxmlformats-officedocument.wordprocessingml.document')
file_name = file_name + ".docx"
else:
request = service.files().export_media(fileId=file_id, mimeType='application/pdf')
file_name = file_name + ".pdf"
print("Downloading -- " + file_name)
response = request.execute()
prepDest()
fname = os.path.join(OUTPUT_DIR, file_name)
with open(fname, "wb") as wer:
wer.write(response)
setFileTimestamps(fname, file_createdTime, modifiedTime)
GitHub repo

Is it possible to use OAUTH 2 with the Google Reporting API?

I am currently using OAuth 1 for auth with the Reporting API with GData and Python. Is it possible to use OAuth 2, I can't find a reference that this is doable?
I wasn't able to find any reference for the OAuth 2 and the Reporting api but by following samples for the GData libraries (http://code.google.com/p/gdata-python-client/source/browse/#hg%2Fsamples%2Fapps) I was able to cobble this together:
#!/usr/bin/python
import sys
import os
import time
import gdata.gauth
import gdata.client
import httplib2
import oauth2client.file
import oauth2client.tools
REPORTING_URI = 'https://www.google.com/hosted/services/v1.0/reports/ReportingData'
REPORTING_XML_TEMPLATE = '''<?xml version="1.0" encoding="UTF-8"?>
<rest xmlns="google:accounts:rest:protocol"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<type>Report</type>
<domain>%s</domain>
<date>%s</date>
<page>%s</page>
<reportType>daily</reportType>
<reportName>%s</reportName>
</rest>'''
OAUTH2FILENAME = 'oauth_20.dat'
OAUTH2JSONFILE = 'client_secrets.json'
OAUTH2SCOPES = 'https://www.google.com/hosted/services/v1.0/reports/ReportingData'
OAUTH2USERAGENT = 'REPORTING'
CLIENTSOURCE = 'REPORTING'
MISSING_OAUTHJSON_FILE_MESSAGE = """
WARNING: Please configure OAuth 2.0
To continue you will need to populate the client_secrets.json file:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), OAUTH2JSONFILE)
### Reporting
def RunReport (http_object, domain, report=None, date=None):
if date is None:
now = time.time()
report_time = time.gmtime(now)
date = time.strftime("%Y-%m-%d",report_time)
if report is None:
report='accounts'
report_data = RequestReport(http_object,domain=domain,report=report,date=date)
if not report_data:
print 'No report data'
return report_data
def RequestReport (http_object, domain=None, report=None, date=None):
"""Retrieves a report
Args:
domain: string
report: string: accounts, activity, disk_space, email_clients, summary
date: string: YYYY-MM-DD
Returns:
String, the report data
"""
report_data = ''
uri = REPORTING_URI
if not report or report is None:
return report_data
if not date or date is None:
return report_data
if not domain or domain is None:
domain = self.domain
page = 1
while True:
report_xml = REPORTING_XML_TEMPLATE %(domain, date, page, report)
response = ''
report_page = ''
try:
response, report_page = http_object.request(
uri,method='POST',body=report_xml)
except Exception, rexcept:
print 'Exception: ',rexcept
report_page = ''
break
if response.status != 200:
print 'Error: ',response.status
report_page = ''
break
if not report_page or report_page == 'End-Of-Report':
break
else:
report_data += report_page
page = page + 1
return report_data
scopes = OAUTH2SCOPES
user_agent = OAUTH2USERAGENT
client_source = CLIENTSOURCE
str_oauth2file = OAUTH2FILENAME
str_oauthjsonfile = OAUTH2JSONFILE
domain = 'somedomain'
report_name = 'accounts'
client_id = 'string'
client_secret = 'string'
report_data = ''
oauth2_flow = ''
now = time.time()
report_time = time.gmtime(now)
report_date = time.strftime("%Y-%m-%d",report_time)
if not os.path.isfile(str_oauth2file):
token = gdata.gauth.OAuth2Token(client_id=client_id,
client_secret=client_secret, scope=scopes, user_agent=user_agent)
uri = token.generate_authorize_url()
print 'Please visit this URL to authorize the application:'
print uri
# Get the verification code from the standard input.
code = raw_input('What is the verification code? ').strip()
token.get_access_token(code)
oauth2_flow = oauth2client.client.flow_from_clientsecrets(str_oauthjsonfile,
scope=scopes,message=MISSING_OAUTHJSON_FILE_MESSAGE)
storage = oauth2client.file.Storage(str_oauth2file)
oauth2_credentials = storage.get()
if oauth2_credentials is None or oauth2_credentials.invalid:
if not oauth2_flow:
oauth2_flow = oauth2client.client.flow_from_clientsecrets(str_oauthjsonfile,
scope=scopes,message=MISSING_OAUTHJSON_FILE_MESSAGE)
print '\nYou must authorize access to the request APIS.\n'
# Save the credentials in storage to be used in subsequent runs.
oauth2_credentials = oauth2client.tools.run(oauth2_flow, storage)
http_oauth2_object = httplib2.Http()
http_oauth2_object = oauth2_credentials.authorize(http_oauth2_object)
report_data = RunReport(
http_oauth2_object,domain,report=report_name,date=report_date)
if report_data:
print report_data
sys.exit(0)

Categories