Gdata Export Document----Conflict Error - python

So this is rather worrying--I hope that someone can give me a hand with this one.
I am using a python script to download google doc spreadsheets and then back them up to our servers. MOST of the time, it works well, but every so often I get an error that looks like this:
gdata.service.RequestError: {'status': 409, 'body': '', 'reason': 'Conflict'}
Here is all of the code that I am using. Does somebody know if the Export function has some strange behavior that could be causing this?
QC_GoogleDoc_Spreadsheet_AutoLog
Author: Christopher James Johnson
Date: May 22, 2012
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import gdata.spreadsheet.service
import gdata.service
import atom.service
import gdata.spreadsheet
import gdata.docs.service
import atom
import getopt
import sys
import string
import time
import shutil
import os
import getpass
import tempfile
import csv
import time
import datetime
import glob
def main():
archiver = backUpper()
class backUpper():
def __init__(self):
gd = gdata.docs.service.DocsService()
self.gd_client = gdata.docs.service.DocsService()
self.gd_client.email = 'xxxx.xxxx'
self.gd_client.password = 'xxxxxxxx'
self.gd_client.source = 'Spreadsheets GData Sample'
self.gd_client.ProgrammaticLogin()
self.curr_key = ''
self.curr_wksht_id = ''
self.list_feed = None
self.autoLogPath = ""
spreadsheets_client = gdata.spreadsheet.service.SpreadsheetsService()
spreadsheets_client.email = self.gd_client.email
spreadsheets_client.password = self.gd_client.password
spreadsheets_client.source = "My Fancy Spreadsheet Downloader"
spreadsheets_client.ProgrammaticLogin()
feed = spreadsheets_client.GetSpreadsheetsFeed()
for i, entry in enumerate(feed.entry):
if isinstance(feed, gdata.spreadsheet.SpreadsheetsSpreadsheetsFeed):
if isinstance(entry, gdata.spreadsheet.SpreadsheetsSpreadsheet):
print entry.title.text
x = entry.id.text
print x
self.Download(entry)
self.DeleteTemporaryFiles()
def Download(self, entry):
line = entry.id.text
title = entry.title.text
splitLine = line.split('/')
key = splitLine[-1]
backUpDir = R'\\cob-hds-1\compression\QC\QCing\otherFiles\GoogleDocBackUp' + '\\'
now = datetime.datetime.now()
hour = now.hour
today = datetime.date.today()
if not os.path.exists(backUpDir + str(today)):
os.mkdir(backUpDir + str(today))
if not os.path.exists(backUpDir + str(today) + '\\' + str(hour)):
os.mkdir(backUpDir + str(today) + '\\' + str(hour))
backupDir = backUpDir + str(today) + '\\' + str(hour)
tempfile.tempdir = backupDir
file_path = tempfile.mkstemp(suffix='.xls')
uri = 'http://docs.google.com/feeds/documents/private/full/%s' % key
spreadsheets_client = gdata.spreadsheet.service.SpreadsheetsService()
spreadsheets_client.email = self.gd_client.email
spreadsheets_client.password = self.gd_client.password
spreadsheets_client.source = "My Fancy Spreadsheet Downloader"
spreadsheets_client.ProgrammaticLogin()
# ...
docEntry = self.gd_client.GetDocumentListEntry(uri)
docs_auth_token = self.gd_client.GetClientLoginToken()
self.gd_client.SetClientLoginToken(spreadsheets_client.GetClientLoginToken())
self.gd_client.Export(docEntry, file_path[1])
shutil.copy(file_path[1], backupDir + '//' + title + '.xls')
os.close(file_path[0])
self.gd_client.SetClientLoginToken(docs_auth_token)
if __name__ == '__main__':
main()
So the scary part--This just started happening THIS MORNING! Everything was great before...and this morning...something has started happening with this and other Gdata using python scripts! Please help!
Thanks!
EDIT: So a co-worker of mine was working on one of these spreadsheets at the time and both of our programs crashed. (Mine backs up the google docs and his writes to it. If we are both working on the same spreadsheet at the same time, could this create a problem?)

Related

Print command response into .csv table

I don't have much programming skills, but I need to send a output of a command into .csv table. I managed to create this, but it prints only 1st line of the table instead whole table, and I don't know how to procceed futher with turning it into csv.
Any help would be much appreciated.
from __future__ import print_function
from datetime import date
import sys
import os
import time
today1 = date.today().strftime('%Y_%m_%d')
strTime = time.strftime('%Y_%m_%d')
command = 'My command here'
cmd = session.command()
response = cmd.execute(command)
element_group = response.get_output()
table = element_group.groups()[0]
for cell in table[0]:
print(cell.labels()[0] + ' , ' + '\t', end='')
print('\n')
for cell in table[5]:
print(cell.value() + ' , ', end='')
print('\n')
I have tried script in description. I was expecting to print whole table and turning it into .csv file.
I have figured it out. Here is the script I wanted.
from __future__ import print_function
import csv
from datetime import date
import sys
import os
import time
today1 = date.today().strftime('%Y_%m_%d')
strTime = time.strftime('%Y_%m_%d')
command = 'My command here'
cmd = session.command()
response = cmd.execute(command)
element_group = response.get_output()
table = element_group.groups()[0]
header = [cell.labels()[0] for cell in table[0]]
rows = [[cell.value() for cell in row] for row in table]
directory = 'location'
filename = directory + 'filename' + today1 + '.csv'
with open(filename, mode='w') as file:
writer = csv.writer(file)
writer.writerow(header)
writer.writerows(rows)

CSV File not showing up at file location

So I am writing a script that runs when a simulation running in AirSim runs. It is continuously collecting data (theoretically) and writing it to the csv file. However, when I go to the location that it should be saved, there is no file. Like even if there is an error in my method of saving the data, the file itself is not there.
import setup_path
import airsim
import numpy as np
import os
import os.path
import string
import csv
import tempfile
import pprint
import csv
import datetime
client = airsim.MultirotorClient()
client.confirmConnection()
'''
First create a directory for all the csv files to store into.
'''
dirmain = r"C:\AirSimData"
if not os.path.exists(dirmain):
os.mkdir(dirmain)
'''
Create format for file names
'''
run_date_and_time = datetime.datetime.now()
run_date_and_time_string = str(run_date_and_time)
extension = ".csv"
file_name_base = run_date_and_time_string + extension
imu = "imu"
gps = "gps"
magnetometer = "magnetometer"
barometer = "barometer"
gps_file_name = gps + file_name_base
'''Create csv files
'''
gps_file = open(r"gps_file_name",'w')
gps_header = ['lat','lon','alt']
with open(r"gps_file_name",'w') as gpscsvfile:
gpscsvwriter = csv.writer(gpscsvfile)
gpscsvwriter = gpscsvwriter.writerow(gps_header)
gpscsvfile.close()
while True:
#state = client.getMultirotorState()
#s = pprint.pformat(state)
#print("state: %s" % s)
#imu_data = client.getImuData()
#s = pprint.pformat(imu_data)
#print("imu_data: %s" % s)
#barometer_data = client.getBarometerData()
#s = pprint.pformat(barometer_data)
#print("barometer_data: %s" % s)
#magnetometer_data = client.getMagnetometerData()
#s = pprint.pformat(magnetometer_data)
#print("magnetometer_data: %s" % s)
gps_data = client.getGpsData().gnss.geo_point
alt = (gps_data.altitude)
lat = (gps_data.latitude)
lon = (gps_data.longitude)
gps_data_struct = [lat,lon,alt]
with open(r"gps_file_name",'w') as gpscsvfile:
gpscsvwriter = csv.writer(gpscsvfile)
gpscsvwriter = gpscsvwriter.writerow(gps_data_struct)
gpscsvfile.close()
#print("Altitude: %s\nLatitude %s\nLongitude %s" %(alt,lat,lon) )
if False:
break
Here you are creating a file name literally "gps_file_name"
with open(r"gps_file_name",'w') as gpscsvfile:
gpscsvwriter = csv.writer(gpscsvfile)
gpscsvwriter = gpscsvwriter.writerow(gps_header)
You should instead use the variables with the name elements that you created. os.path.join() is a safe way to join filenames with path names.
gps_file_name = gps + file_name_base
output_file = os.path.join(dirmain, gps_file_name)
# Should read something like this "C:\AirSimData\gps2021-01-21 13:37:39.867152.csv"
Then you can use it here.
with open(output_file,'w') as gpscsvfile:
gpscsvwriter = csv.writer(gpscsvfile)
gpscsvwriter = gpscsvwriter.writerow(gps_header)
gpscsvfile.close()
The next problem is that your datetime string contains invalid characters for filename colons (:) can not be used in file names. so you need to re-think that part.
One option could be to use no colons and have your time look like this.
run_date_and_time_string = run_date_and_time.strftime('%y-%m-%d_%H%M%S')
# 'C:\\AirSimData\\gps21-01-21_134531.csv'

Uploading local images to microsoft cognitive face

Error Screenshot
import sys
import os, time
import cognitive_face as CF
import global_variables as global_var
import urllib
import sqlite3
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
Key = global_var.key
CF.Key.set(Key)
BASE_URL = global_var.BASE_URL # Replace with your regional Base URL
CF.BaseUrl.set(BASE_URL)
def get_person_id():
person_id = ''
extractId = str(sys.argv[1])[-2:]
connect = sqlite3.connect("Face-DataBase")
c = connect.cursor()
cmd = "SELECT * FROM Students WHERE ID = " + extractId
c.execute(cmd)
row = c.fetchone()
person_id = row[3]
connect.close()
return person_id
if len(sys.argv) is not 1:
currentDir = os.path.dirname(os.path.abspath(__file__))
imageFolder = os.path.join(currentDir, "dataset/" + str(sys.argv[1]))
person_id = get_person_id()
for filename in os.listdir(imageFolder):
if filename.endswith(".jpg"):
print(filename)
imgurl = urllib.request.pathname2url(os.path.join(imageFolder, filename))
imgurl = imgurl[3:]
print("imageurl = {}".format(imgurl))
res = CF.face.detect(imgurl)
if len(res) != 1:
print("No face detected in image")
else:
res = CF.person.add_face(imgurl, global_var.personGroupId, person_id)
print(res)
time.sleep(6)
else:
print("supply attributes please from dataset folder")
I hope images should be converted to byte array but I don't know how to do it. Local images have to be uploaded into cognitive API. Tried many ways but cannot solve the error.
imgurl = urllib.request.pathname2url(os.path.join(imageFolder, filename))
Above line is where error exists
Welcome to Stack Overflow, #arun.
First of all, as per here, the API you're using is deprecated, and you should switch to this one instead.
Second, in this new API, there is a method called detect_with_stream (ref here), that will make a request to the Face Recognition endpoint, using the byte stream instead of an URL (it will use different request headers than the URL-based method). This method accepts a stream of bytes containing your image. I've worked with another cognitive services API that performs text recognition, and so I've faced this problem of sending an image URL or the image byte stream. You can generate a bytestream from the file as follows:
image_data = open(image_path, "rb").read()
The variable image_data can be passed to the method.
Edit: Instructions on how to use the new API with the image bytestream
First, install the following pip package:
pip install azure-cognitiveservices-vision-face
Then, you can try this approach.
import sys
import os, time
import global_variables as global_var
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
import urllib
import sqlite3
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
KEY = global_var.key
ENDPOINT = global_var.endpoint
face_client = FaceClient(ENDPOINT,CognitiveServicesCredentials(KEY))
def get_person_id():
person_id = ''
extractId = str(sys.argv[1])[-2:]
connect = sqlite3.connect("Face-DataBase")
c = connect.cursor()
cmd = "SELECT * FROM Students WHERE ID = " + extractId
c.execute(cmd)
row = c.fetchone()
person_id = row[3]
connect.close()
return person_id
if len(sys.argv) is not 1:
currentDir = os.path.dirname(os.path.abspath(__file__))
imageFolder = os.path.join(currentDir, "dataset/" + str(sys.argv[1]))
person_id = get_person_id()
for filename in os.listdir(imageFolder):
if filename.endswith(".jpg"):
print(filename)
img_data = open(filename, "rb").read()
res = face_client.face.detect_with_stream(img_data)
if not res:
print('No face detected from image {}'.format(filename))
continue
res = face_client.person_group_person.add_face_from_stream(global_var.personGroupId, person_id, img_data)
print(res)
time.sleep(6)
else:
print("supply attributes please from dataset folder")
Edit 2: Notes on traversing all the files in a directory
Ok #arun, your current problem stems from the fact that you're using os.listdir which only lists the filenames, so you don't have their paths. The quickest solution would be to open each image inside the loop with:
img_data = open(os.path.join(imageFolder, filename), "rb").read()

Unable to open docx in python

The problem is that a file that I created with Microsoft Word 2010 but can't be opened with python. Some open and some don't. They're all created the same way.
At first I tried to open them via path, it didn't work so I tried to do it the simple. Still no success. This is the error that I get:docx.opc.exceptions.PackageNotFoundError: Package not found at 'COMANDA_TRANSPORT_-_Grecia_SRL.docx'
Here's my spaghetti code:
import os
import re
import Database
import mysql.connector as mysql
from docx import Document
from docx.shared import Inches
from Database import tables
#=============================================#
# == Search for file == #
director = os.path.dirname(os.path.abspath(__file__))
lista_directoare = os.listdir(director)
print(lista_directoare)
print(lista_directoare.sort())
# == Last file== #
ultimul_fisier = lista_directoare[-1]
print('Last file: ' + ultimul_fisier)
def sort(fisier):
fisier = re.search(r'\d+',ultimul_fisier).group()
print(fisier)
string_ultimFisier = str(ultimul_fisier)
print(string_ultimFisier)
print(director + "\\" + string_ultimFisier)
#fisier = open('{}'.format(ultimul_fisier),"rb")
#fisier = open(director + "\\" + string_ultimFisier,"rb")
#document = Document(fisier)
document = Document('COMANDA_TRANSPORT_-_Grecia_SRL.docx')
for paragraph in document.paragraphs:
if "pip" in paragraph.text:
print("Am gasit")
break
else:
print('Nu am gasit')
break
for table in tables:
print(table)
document.save('test.docx')

Return XML Values from Multiple Files in Python?

So I am working on a browser based program, written in Python, that parses XML data from multiple files in a directory, then returns the values of certain XML tags on the page. I have successfully been able to return the values from one of the XML files, but am hoping to collect data from every file within the directory and return the values in spreadsheet format. How do I parse the data from every XML file? Also, the XML files are not static, there will be new files coming and going. Thanks! Below is my code:
from xml.dom.minidom import parseString
import os
path = 'C:\Vestigo\XML'
listing = os.listdir(path)
for infile in listing:
print infile
file = open(os.path.join(path,infile),'r')
data = file.read()
file.close()
dom = parseString(data)
xmlTag0 = dom.getElementsByTagName('Extrinsic')[0].toxml()
xmlData0 = xmlTag0.replace('<Extrinsic>','').replace('</Extrinsic>','')
xmlTag1 = dom.getElementsByTagName('DeliverTo')[0].toxml()
xmlData1 = xmlTag1.replace('<DeliverTo>','').replace('</DeliverTo>','')
xmlTag2 = dom.getElementsByTagName('Street1')[0].toxml()
xmlData2 = xmlTag2.replace('<Street1>','').replace('</Street1>','')
xmlTag3 = dom.getElementsByTagName('City')[0].toxml()
xmlData3 = xmlTag3.replace('<City>','').replace('</City>','')
xmlTag4 = dom.getElementsByTagName('State')[0].toxml()
xmlData4 = xmlTag4.replace('<State>','').replace('</State>','')
xmlTag5 = dom.getElementsByTagName('PostalCode')[0].toxml()
xmlData5 = xmlTag5.replace('<PostalCode>','').replace('</PostalCode>','')
import cherrypy
class Root(object):
def index(self):
return ('Order Number:', ' ', xmlData0, '<br>Name: ', xmlData1, '<br>Street Address: ', xmlData2, '<br>City/State/Zip: ', xmlData3, ', ', xmlData4, ' ', xmlData5, ' ', """<br><br>Quit""")
index.exposed = True
def exit(self):
raise SystemExit(0)
exit.exposed = True
def start():
import webbrowser
cherrypy.tree.mount(Root(), '/')
cherrypy.engine.start_with_callback(
webbrowser.open,
('http://localhost:8080/',),
)
cherrypy.engine.block()
if __name__=='__main__':
start()
EDIT: Updated with my solution below.
In order to pull data from every file in the directory I used this code below:
from xml.dom.minidom import parse, parseString
import os, glob, re
import cherrypy
class Root(object):
def index(self):
path = 'C:\Vestigo\XML'
TOTALXML = len(glob.glob(os.path.join(path, '*.xml')))
print TOTALXML
i = 0
for XMLFile in glob.glob(os.path.join(path, '*.xml')):
xmldoc = parse(XMLFile)
order_number = xmldoc.getElementsByTagName('Extrinsic')[0].firstChild.data
order_name = xmldoc.getElementsByTagName('DeliverTo')[0].firstChild.data
street1 = xmldoc.getElementsByTagName('Street1')[0].firstChild.data
state = xmldoc.getElementsByTagName('State')[0].firstChild.data
zip_code = xmldoc.getElementsByTagName('PostalCode')[0].firstChild.data
OUTPUTi = order_number+' '+order_name+' '+street1+' '+state+' '+zip_code
i += 1
print OUTPUTi
return (OUTPUTi, """<br><br>Quit""")
index.exposed = True
def exit(self):
raise SystemExit(0)
exit.exposed = True
def start():
import webbrowser
cherrypy.tree.mount(Root(), '/')
cherrypy.engine.start_with_callback(
webbrowser.open,
('http://localhost:8080/',),
)
cherrypy.engine.block()
if __name__=='__main__':
start()
Thanks for your help everyone, and for the tip on answering my own question Sheena!

Categories