optimize python processing json retrieved from the fb-graph-api - python

i'm getting json data from the facebook-graph-api about:
my relationship with my friends
my friends relationships with each other.
right now my program looks like this (in python pseudo code, please note some variables have been changed for privacy):
import json
import requests
# protected
_accessCode = "someAccessToken"
_accessStr = "?access_token=" + _accessCode
_myID = "myIDNumber"
r = requests.get("https://graph.facebook.com/" + _myID + "/friends/" + _accessStr)
raw = json.loads(r.text)
terminate = len(raw["data"])
# list used to store the friend/friend relationships
a = list()
for j in range(0, terminate + 1):
# calculate terminating displacement:
term_displacement = terminate - (j + 1)
print("Currently processing: " + str(j) + " of " + str(terminate))
for dj in range(1, term_displacement + 1):
# construct urls based on the raw data:
url = "https://graph.facebook.com/" + raw["data"][j]["id"] + "/friends/" + raw["data"][j + dj]["id"] + "/" + _accessStr
# visit site *THIS IS THE BOTTLENECK*:
reqTemp = requests.get(url)
rawTemp = json.loads(reqTemp.text)
if len(rawTemp["data"]) != 0:
# data dumps to list which dumps to file
a.append(str(raw["data"][j]["id"]) + "," + str(rawTemp["data"][0]["id"]))
outputFile = "C:/Users/franklin/Documents/gen/friendsRaw.csv"
output = open(outputFile, "w")
# write all me/friend relationship to file
for k in range(0, terminate):
output.write(_myID + "," + raw["data"][k]["id"] + "\n")
# write all friend/friend relationships to file
for i in range(0, len(a)):
output.write(a[i])
output.close()
So what its doing is: first it calls my page and gets my friend list (this is allowed through the facebook api using an access_token) calling a friend's friend list is NOT allowed but I can work around that by requesting a relationship between a friend on my list and another friend on my list. so in part two (indicated by the double for loops) i'm making another request to see if some friend, a, is also a friend of b, (both of which are on my list); if so there will be a json object of length one with friend a's name.
but with about 357 friends there's literally thousands of page requests that need to be made. in other words the program is spending a lot of time just waiting around for the json-requests.
my question is then can this be rewritten to be more efficient? currently, due to security restrictions, calling a friend's friend list attribute is disallowed. and it doesn't look like the api will allow this. are there any python tricks that can make this run faster? maybe parallelism?
Update modified code is pasted below in the answers section.

Update this is the solution I came up with. Thanks #DMCS for the FQL suggestion but I just decided to use what I had. I will post the FQL solution up when I get a chance to study the implementation. As you can see this method just makes use of more condensed API calls.
Incidentally for future reference the API call limit is 600 calls per 600 seconds, per token & per IP, so for every unique IP address, with a unique access token, the number of calls is limited to 1 call per second. I'm not sure what that means for asynchronous calling #Gerrat, but there is that.
import json
import requests
# protected
_accessCode = "someaccesscode"
_accessStr = "?access_token=" + _accessCode
_myID = "someidnumber"
r = requests.get("https://graph.facebook.com/"
+ _myID + "/friends/" + _accessStr)
raw = json.loads(r.text)
terminate = len(raw["data"])
a = list()
for k in range(0, terminate - 1):
friendID = raw["data"][k]["id"]
friendName = raw["data"][k]["name"]
url = ("https://graph.facebook.com/me/mutualfriends/"
+ friendID + _accessStr)
req = requests.get(url)
temp = json.loads(req.text)
print("Processing: " + str(k + 1) + " of " + str(terminate))
for j in range(0, len(temp["data"])):
a.append(friendID + "," + temp["data"][j]["id"] + ","
+ friendName + "," + temp["data"][j]["name"])
# dump contents to file:
outputFile = "C:/Users/franklin/Documents/gen/friendsRaw.csv"
output = open(outputFile, "w")
print("Dumping to file...")
# write all me/friend relationships to file
for k in range(0, terminate):
output.write(_myID + "," + raw["data"][k]["id"]
+ ",me," + str(raw["data"][k]["name"].encode("utf-8", "ignore")) + "\n")
# write all friend/friend relationships to file
for i in range(0, len(a)):
output.write(str(a[i].encode("utf-8", "ignore")) + "\n")
output.close()

This isn't likely optimal, but I tweaked your code a bit to use Requests async method (untested):
import json
import requests
from requests import async
# protected
_accessCode = "someAccessToken"
_accessStr = "?access_token=" + _accessCode
_myID = "myIDNumber"
r = requests.get("https://graph.facebook.com/" + _myID + "/friends/" + _accessStr)
raw = json.loads(r.text)
terminate = len(raw["data"])
# list used to store the friend/friend relationships
a = list()
def add_to_list(reqTemp):
rawTemp = json.loads(reqTemp.text)
if len(rawTemp["data"]) != 0:
# data dumps to list which dumps to file
a.append(str(raw["data"][j]["id"]) + "," + str(rawTemp["data"][0]["id"]))
async_list = []
for j in range(0, terminate + 1):
# calculate terminating displacement:
term_displacement = terminate - (j + 1)
print("Currently processing: " + str(j) + " of " + str(terminate))
for dj in range(1, term_displacement + 1):
# construct urls based on the raw data:
url = "https://graph.facebook.com/" + raw["data"][j]["id"] + "/friends/" + raw["data"][j + dj]["id"] + "/" + _accessStr
req = async.get(url, hooks = {'response': add_to_list})
async_list.append(req)
# gather up all the results
async.map(async_list)
outputFile = "C:/Users/franklin/Documents/gen/friendsRaw.csv"
output = open(outputFile, "w")

Related

Change API call in for loop python

I am trying to call several api datasets within a for loop in order to change the call and then append those datasets together into a larger dataframe.
I have written this code which works to call the first dataset but then returns this error for the next call.
`url = base + "max=" + maxrec + "&" "type=" + item + "&" + "freq=" + freq + "&" + "px=" +px + "&" + "ps=" + str(ps) + "&" + "r="+ r + "&" + "p=" + p + "&" + "rg=" +rg + "&" + "cc=" + cc + "&" + "fmt=" + fmt
TypeError: must be str, not Response`
Here is my current code
import requests
import pandas as pd
base = "http://comtrade.un.org/api/get?"
maxrec = "50000"
item = "C"
freq = "A"
px="H0"
ps="all"
r="all"
p="0"
rg="2"
cc="AG2"
fmt="json"
comtrade = pd.DataFrame(columns=[])
for year in range(1991,2018):
ps="{}".format(year)
url = base + "max=" + maxrec + "&" "type=" + item + "&" + "freq=" + freq + "&" + "px=" +px + "&" + "ps=" + str(ps) + "&" + "r="+ r + "&" + "p=" + p + "&" + "rg=" +rg + "&" + "cc=" + cc + "&" + "fmt=" + fmt
r = requests.get(url)
x = r.json()
new = pd.DataFrame(x["dataset"])
comtrade = comtrade.append(new)
Let requests assemble the URL for you.
common_params = {
"max": maxrec,
"type": item,
"freq": freq,
# etc
}
for year in range(1991,2018):
response = requests.get(base, params=dict(common_params, ps=str(year))
response_data = response.json()
new = pd.DataFrame(response_data["dataset"])
comtrade = comtrade.append(new)
Disclaimer: the other answer is correct and you should use it.
However, your actual problem stems from the fact that you are overriding r here:
r = requests.get(url)
x = r.json()
During the next iteration r will still be that value and not the one you initialized it with in the first place. You could simply rename it to result to avoid that problem. Better let the requests library do the work though.

Automatic labeling of LDA generated topics

I'm trying to categorize customer feedback and I ran an LDA in python and got the following output for 10 topics:
(0, u'0.559*"delivery" + 0.124*"area" + 0.018*"mile" + 0.016*"option" + 0.012*"partner" + 0.011*"traffic" + 0.011*"hub" + 0.011*"thanks" + 0.010*"city" + 0.009*"way"')
(1, u'0.397*"package" + 0.073*"address" + 0.055*"time" + 0.047*"customer" + 0.045*"apartment" + 0.037*"delivery" + 0.031*"number" + 0.026*"item" + 0.021*"support" + 0.018*"door"')
(2, u'0.190*"time" + 0.127*"order" + 0.113*"minute" + 0.075*"pickup" + 0.074*"restaurant" + 0.031*"food" + 0.027*"support" + 0.027*"delivery" + 0.026*"pick" + 0.018*"min"')
(3, u'0.072*"code" + 0.067*"gps" + 0.053*"map" + 0.050*"street" + 0.047*"building" + 0.043*"address" + 0.042*"navigation" + 0.039*"access" + 0.035*"point" + 0.028*"gate"')
(4, u'0.434*"hour" + 0.068*"time" + 0.034*"min" + 0.032*"amount" + 0.024*"pay" + 0.019*"gas" + 0.018*"road" + 0.017*"today" + 0.016*"traffic" + 0.014*"load"')
(5, u'0.245*"route" + 0.154*"warehouse" + 0.043*"minute" + 0.039*"need" + 0.039*"today" + 0.026*"box" + 0.025*"facility" + 0.025*"bag" + 0.022*"end" + 0.020*"manager"')
(6, u'0.371*"location" + 0.110*"pick" + 0.097*"system" + 0.040*"im" + 0.038*"employee" + 0.022*"evening" + 0.018*"issue" + 0.015*"request" + 0.014*"while" + 0.013*"delivers"')
(7, u'0.182*"schedule" + 0.181*"please" + 0.059*"morning" + 0.050*"application" + 0.040*"payment" + 0.026*"change" + 0.025*"advance" + 0.025*"slot" + 0.020*"date" + 0.020*"tomorrow"')
(8, u'0.138*"stop" + 0.110*"work" + 0.062*"name" + 0.055*"account" + 0.046*"home" + 0.043*"guy" + 0.030*"address" + 0.026*"city" + 0.025*"everything" + 0.025*"feature"')
Is there a way to automatically label them? I do have a csv file which has feedbacks manually labeled, but I do not want to supply these labels myself. I want the model to create labels. Is it possible?
The comments here link to another SO answer that links to a paper. Let's say you wanted to do the minimum to try to make this work. Here is an MVP-style solution that has worked for me: search Google for the terms, then look for keywords in the response.
Here is some working, though hacky, code:
pip install cssselect
then
from urllib.parse import urlencode, urlparse, parse_qs
from lxml.html import fromstring
from requests import get
from collections import Counter
def get_srp_text(search_term):
raw = get(f"https://www.google.com/search?q={topic_terms}").text
page = fromstring(raw)
blob = ""
for result in page.cssselect("a"):
for res in result.findall("div"):
blob += ' '
blob += res.text if res.text else " "
blob += ' '
return blob
def blob_cleaner(blob):
clean_blob = blob.replace(r'[\/,\(,\),\:,_,-,\-]', ' ')
return ''.join(e for e in blob if e.isalnum() or e.isspace())
def get_name_from_srp_blob(clean_blob):
blob_tokens = list(filter(bool, map(lambda x: x if len(x) > 2 else '', clean_blob.split(' '))))
c = Counter(blob_tokens)
most_common = c.most_common(10)
name = f"{most_common[0][0]}-{most_common[1][0]}"
return name
pipeline = lambda x: get_name_from_srp_blob(blob_cleaner(get_srp_text(x)))
Then you can just get the topic words from your model, e.g.
topic_terms = "delivery area mile option partner traffic hub thanks city way"
name = pipeline(topic_terms)
print(name)
>>> City-Transportation
and
topic_terms = "package address time customer apartment delivery number item support door"
name = pipeline(topic_terms)
print(name)
>>> Parcel-Package
You could improve this up a lot. For example, you could use POS tags to only find the most common nouns, then use those for the name. Or find the most common adjective and noun, and make the name "Adjective Noun". Even better, you could get the text from the linked sites, then run YAKE to extract keywords.
Regardless, this demonstrates a simple way to automatically name clusters, without directly using machine learning (though, Google is most certainly using it to generate the search results, so you are benefitting from it).

Why Python program execution slows down when using functions?

So I have a rather general question I was hoping to get some help with. I put together a Python program that runs through and automates workflows at the state level for all the different counties. The entire program was created for research at school - not actual state work. Anyways, I have two designs shown below. The first is an updated version. It takes about 40 minutes to run. The second design shows the original work. Note that it is not a well structured design. However, it takes about five minutes to run the entire program. Could anybody give any insight why there are such differences between the two? The updated version is still ideal as it is much more reusable (can run and grab any dataset in the url) and easy to understand. Furthermore, 40 minutes to get about a hundred workflows completed is still a plus. Also, this is still a work in progress. A couple minor issues still need to be addressed in the code but it is still a pretty cool program.
Updated Design
import os, sys, urllib2, urllib, zipfile, arcpy
from arcpy import env
path = os.getcwd()
def pickData():
myCount = 1
path1 = 'path2URL'
response = urllib2.urlopen(path1)
print "Enter the name of the files you need"
numZips = raw_input()
numZips2 = numZips.split(",")
myResponse(myCount, path1, response, numZips2)
def myResponse(myCount, path1, response, numZips2):
myPath = os.getcwd()
for each in response:
eachNew = each.split(" ")
eachCounty = eachNew[9].strip("\n").strip("\r")
try:
myCountyDir = os.mkdir(os.path.expanduser(myPath+ "\\counties" + "\\" + eachCounty))
except:
pass
myRetrieveDir = myPath+"\\counties" + "\\" + eachCounty
os.chdir(myRetrieveDir)
myCount+=1
response1 = urllib2.urlopen(path1 + eachNew[9])
for all1 in response1:
allNew = all1.split(",")
allFinal = allNew[0].split(" ")
allFinal1 = allFinal[len(allFinal)-1].strip(" ").strip("\n").strip("\r")
numZipsIter = 0
path8 = path1 + eachNew[9][0:len(eachNew[9])-2] +"/"+ allFinal1
downZip = eachNew[9][0:len(eachNew[9])-2]+".zip"
while(numZipsIter <len(numZips2)):
if (numZips2[numZipsIter][0:3].strip(" ") == "NWI") and ("remap" not in allFinal1):
numZips2New = numZips2[numZipsIter].split("_")
if (numZips2New[0].strip(" ") in allFinal1 and numZips2New[1] != "remap" and numZips2New[2].strip(" ") in allFinal1) and (allFinal1[-3:]=="ZIP" or allFinal1[-3:]=="zip"):
urllib.urlretrieve (path8, allFinal1)
zip1 = zipfile.ZipFile(myRetrieveDir +"\\" + allFinal1)
zip1.extractall(myRetrieveDir)
#maybe just have numzips2 (raw input) as the values before the county number
#numZips2[numZipsIter][0:-7].strip(" ") in allFinal1 or numZips2[numZipsIter][0:-7].strip(" ").lower() in allFinal1) and (allFinal1[-3:]=="ZIP" or allFinal1[-3:]=="zip"
elif (numZips2[numZipsIter].strip(" ") in allFinal1 or numZips2[numZipsIter].strip(" ").lower() in allFinal1) and (allFinal1[-3:]=="ZIP" or allFinal1[-3:]=="zip"):
urllib.urlretrieve (path8, allFinal1)
zip1 = zipfile.ZipFile(myRetrieveDir +"\\" + allFinal1)
zip1.extractall(myRetrieveDir)
numZipsIter+=1
pickData()
#client picks shapefiles to add to map
#section for geoprocessing operations
# get the data frames
#add new data frame, title
#check spaces in ftp crawler
os.chdir(path)
env.workspace = path+ "\\symbology\\"
zp1 = os.listdir(path + "\\counties\\")
def myGeoprocessing(layer1, layer2):
#the code in this function is used for geoprocessing operations
#it returns whatever output is generated from the tools used in the map
try:
arcpy.Clip_analysis(path + "\\symbology\\Stream_order.shp", layer1, path + "\\counties\\" + layer2 + "\\Streams.shp")
except:
pass
streams = arcpy.mapping.Layer(path + "\\counties\\" + layer2 + "\\Streams.shp")
arcpy.ApplySymbologyFromLayer_management(streams, path+ '\\symbology\\streams.lyr')
return streams
def makeMap():
#original wetlands layers need to be entered as NWI_line or NWI_poly
print "Enter the layer or layers you wish to include in the map"
myInput = raw_input();
counter1 = 1
for each in zp1:
print each
print path
zp2 = os.listdir(path + "\\counties\\" + each)
for eachNew in zp2:
#print eachNew
if (eachNew[-4:] == ".shp") and ((myInput in eachNew[0:-7] or myInput.lower() in eachNew[0:-7])or((eachNew[8:12] == "poly" or eachNew[8:12]=='line') and eachNew[8:12] in myInput)):
print eachNew[0:-7]
theMap = arcpy.mapping.MapDocument(path +'\\map.mxd')
df1 = arcpy.mapping.ListDataFrames(theMap,"*")[0]
#this is where we add our layers
layer1 = arcpy.mapping.Layer(path + "\\counties\\" + each + "\\" + eachNew)
if(eachNew[7:11] == "poly" or eachNew[7:11] =="line"):
arcpy.ApplySymbologyFromLayer_management(layer1, path + '\\symbology\\' +myInput+'.lyr')
else:
arcpy.ApplySymbologyFromLayer_management(layer1, path + '\\symbology\\' +eachNew[0:-7]+'.lyr')
# Assign legend variable for map
legend = arcpy.mapping.ListLayoutElements(theMap, "LEGEND_ELEMENT", "Legend")[0]
# add wetland layer to map
legend.autoAdd = True
try:
arcpy.mapping.AddLayer(df1, layer1,"AUTO_ARRANGE")
#geoprocessing steps
streams = myGeoprocessing(layer1, each)
# more geoprocessing options, add the layers to map and assign if they should appear in legend
legend.autoAdd = True
arcpy.mapping.AddLayer(df1, streams,"TOP")
df1.extent = layer1.getExtent(True)
arcpy.mapping.ExportToJPEG(theMap, path + "\\counties\\" + each + "\\map.jpg")
# Save map document to path
theMap.saveACopy(path + "\\counties\\" + each + "\\map.mxd")
del theMap
print "done with map " + str(counter1)
except:
print "issue with map or already exists"
counter1+=1
makeMap()
Original Design
import os, sys, urllib2, urllib, zipfile, arcpy
from arcpy import env
response = urllib2.urlopen('path2URL')
path1 = 'path2URL'
myCount = 1
for each in response:
eachNew = each.split(" ")
myCount+=1
response1 = urllib2.urlopen(path1 + eachNew[9])
for all1 in response1:
#print all1
allNew = all1.split(",")
allFinal = allNew[0].split(" ")
allFinal1 = allFinal[len(allFinal)-1].strip(" ")
if allFinal1[-10:-2] == "poly.ZIP":
response2 = urllib2.urlopen('path2URL')
zipcontent= response2.readlines()
path8 = 'path2URL'+ eachNew[9][0:len(eachNew[9])-2] +"/"+ allFinal1[0:len(allFinal1)-2]
downZip = str(eachNew[9][0:len(eachNew[9])-2])+ ".zip"
urllib.urlretrieve (path8, downZip)
# Set the path to the directory where your zipped folders reside
zipfilepath = 'F:\Misc\presentation'
# Set the path to where you want the extracted data to reside
extractiondir = 'F:\Misc\presentation\counties'
# List all data in the main directory
zp1 = os.listdir(zipfilepath)
# Creates a loop which gives use each zipped folder automatically
# Concatinates zipped folder to original directory in variable done
for each in zp1:
print each[-4:]
if each[-4:] == ".zip":
done = zipfilepath + "\\" + each
zip1 = zipfile.ZipFile(done)
extractiondir1 = extractiondir + "\\" + each[:-4]
zip1.extractall(extractiondir1)
path = os.getcwd()
counter1 = 1
# get the data frames
# Create new layer for all files to be added to map document
env.workspace = "E:\\Misc\\presentation\\symbology\\"
zp1 = os.listdir(path + "\\counties\\")
for each in zp1:
zp2 = os.listdir(path + "\\counties\\" + each)
for eachNew in zp2:
if eachNew[-4:] == ".shp":
wetlandMap = arcpy.mapping.MapDocument('E:\\Misc\\presentation\\wetland.mxd')
df1 = arcpy.mapping.ListDataFrames(wetlandMap,"*")[0]
#print eachNew[-4:]
wetland = arcpy.mapping.Layer(path + "\\counties\\" + each + "\\" + eachNew)
#arcpy.Clip_analysis(path + "\\symbology\\Stream_order.shp", wetland, path + "\\counties\\" + each + "\\Streams.shp")
streams = arcpy.mapping.Layer(path + "\\symbology\\Stream_order.shp")
arcpy.ApplySymbologyFromLayer_management(wetland, path + '\\symbology\\wetland.lyr')
arcpy.ApplySymbologyFromLayer_management(streams, path+ '\\symbology\\streams.lyr')
# Assign legend variable for map
legend = arcpy.mapping.ListLayoutElements(wetlandMap, "LEGEND_ELEMENT", "Legend")[0]
# add the layers to map and assign if they should appear in legend
legend.autoAdd = True
arcpy.mapping.AddLayer(df1, streams,"TOP")
legend.autoAdd = True
arcpy.mapping.AddLayer(df1, wetland,"AUTO_ARRANGE")
df1.extent = wetland.getExtent(True)
# Export the map to a pdf
arcpy.mapping.ExportToJPEG(wetlandMap, path + "\\counties\\" + each + "\\wetland.jpg")
# Save map document to path
wetlandMap.saveACopy(path + "\\counties\\" + each + "\\wetland.mxd")
del wetlandMap
print "done with map " + str(counter1)
counter1+=1
Have a look at this guide:
https://wiki.python.org/moin/PythonSpeed/PerformanceTips
Let me quote:
Function call overhead in Python is relatively high, especially compared with the execution speed of a builtin function. This strongly suggests that where appropriate, functions should handle data aggregates.
So effectively this suggests, to not factor out something as a function that is going to be called hundreds of thousands of times.
In Python functions won't be inlined, and calling them is not cheap. If in doubt use a profiler to find out how many times is each function called, and how long does it take on average. Then optimize.
You might also give PyPy a shot, as they have certain optimizations built in. Reducing the function call overhead in some cases seems to be one of them:
Python equivalence to inline functions or macros
http://pypy.org/performance.html

Twitter API 404 errors - Python (continued)

I'm trying to use the GET users/lookup from the twitter API in a python script to identify screen name based on a list of users IDs. The script doesn't seem to be able to handle 404 errors, as I assume the whole request with 100 user IDs is not found by twitter, so the for loop doesn't even begin. How do I iterate over 100 user IDs at a time, while still respecting the rate limit, if one of the 100 IDs will cause the whole request to 404? Is there a way to handle this error while still getting the response for the other IDs in the same request? My experiments with "except Valueerror" didn't seem to solve this...
I'd very much appreciate any advice or tips you can give!
while total_request_counter <= request_total:
while list_counter_last <= len(list)+100: #while last group of 100 items is lower or equal than total number of items in list#:
while current_request_counter < 180:
response = twitter.users.lookup(include_entities="false",user_id=",".join(list[list_counter_first:list_counter_last])) #API query for 100 users#
for item in list[list_counter_first:list_counter_last]: #parses twitter IDs in the list by groups of 100 to record results#
try: #necessary for handling errors#
results = str(response)
results = results[results.index("u'screen_name': u'",results.index(item)) + 18:results.index("',",results.index("u'screen_name': u'",results.index(item)) + 18)]#looks for username section in the query output#
text_file = open("output_IDs.txt", "a") #opens current txt output / change path to desired output#
text_file.write(str(item) + "," + results + "\n") #adds twitter ID, info lookup result, and a line skip to the txt output#
text_file.close()
error_counter = error_counter + 1
print str(item) + " = " + str(results)
except: #creates exception to handle errors#
text_file = open("output_IDs.txt", "a") #opens current txt output / change path to desired output#
text_file.write(str(item) + "," + "ERROR " + str(response.headers.get('h')) + "\n") #adds twitter ID, error code, and a line skip to the txt output#
text_file.close()
print str(item) + " = ERROR"
continue
print "Succesfully processed " + str(error_counter) + " users of request number " + str(current_request_counter + 1) + ". " + str(179 - current_request_counter) + " requests left until break." #confirms latest batch processing#
print ""
list_counter_first = list_counter_first + 100 #updates list navigation to move on to next group of 100#
list_counter_last = list_counter_last + 100
error_counter = 0 # resets error counter for the next round#
current_request_counter = current_request_counter + 1 #updates current request counter to stay below rate limit of 180#
t = str(datetime.now())
print ""
print "Taking a break, back in 16 minutes. Approximate time left: " + str((len(list)/18000*15)-(15*total_request_counter)) + " minutes. Current timestamp: " + t
print ""
current_request_counter = 0
total_request_counter = total_request_counter + 1
time.sleep(960) #suspends activities for 16 minutes to respect rate limit#

Python's csv reader keep reading the same file

i'm having a problem with the python's csv reader. The problem is that i want to open and read different csv files, but he keeps on reading always the same one.
from csv import reader
alphabet = ["a", "b", "c"]
for letter in alphabet:
csv_file = open('/home/desktop/csv/' + letter + '.csv', 'r')
csv_data = reader(csv_file)
The problem is that he seems to open the other files, but he keep on reading always the first file.
Is there a way to "clean" the reader or make him read another file? I even tried to close the csv file, but it didn't work.
The full code is this
from csv import reader
#Orario
orario_csv_file = '/home/andrea/Scrivania/orario.csv'
orario_csv = open(orario_csv_file)
orario_data = reader(orario_csv)
orario = []
#Corsi
corsi = ["EDILIZIA", "EDILE-ARCHIT", "ELETTRONICA", "TECNOLOGIE_DI_INTERNET", "INFORMATICA", "GESTIONALE", "ENERGETICA", "MECCANICA", "CIVILE_ED_AMBIENTALE", "MEDICA", "ENGINEERING_SCIENCES"]
giorni = ["Lun", "Mar", "Mer", "Gio", "Ven"]
for row in orario_data:
orario.append(row)
for corso in corsi:
nome_corso_file = '/home/andrea/Scrivania/xml/' + corso + '.xml'
nome_corso_xml = open(nome_corso_file, 'wt')
nome_corso_xml.write('<?xml version="1.0"?>' + "\n")
nome_corso_xml.write('<orario>' + "\n")
nome_csv = corso + '_csv'
nome_csv = '/home/andrea/Scrivania/csv/' + corso + '.csv'
nome_corso_csv = open(nome_csv, 'rt')
corso_data = reader(nome_corso_csv)
nome_corso_xml.write(' <corso name="' + corso + '">' + "\n")
for a in range(0, 3):
nome_corso_xml.write(' <anno num="' + str(a+1) + '">' + "\n")
for j in range(1, 6):
nome_corso_xml.write(' <giorno name="' + orario[2][j] + '">' + "\n")
for i in range(3, 12):
lez = orario[i + a*12][j]
if lez == "":
nome_corso_xml.write(' <lezione>' + "-" + '</lezione>' + "\n")
else:
for riga in corso_data:
if riga[0] == lez:
if riga[2] == "":
nome_corso_xml.write(' <lezione name="' + lez + '">' + riga[1] + '</lezione>' + "\n")
else:
for g in range(0, len(riga)):
if riga[g].lower() == orario[2][j].lower():
nome_corso_xml.write(' <lezione name="' + lez + '">' + riga[g+1] + '</lezione>' + "\n")
nome_corso_csv.seek(0)
nome_corso_xml.write(' </giorno>' + "\n")
nome_corso_xml.write(' </anno>' + "\n")
nome_corso_xml.write(' </corso>' + "\n")
nome_corso_xml.write('</orario>' + "\n")
nome_corso_xml.close()
He open the "EDILIZIA.csv" and compile the "EDILIZIA.xml", then he should open the "EDILE-ARCHIT.csv" and compile its xml, but when he read, he keeps on reading from "EDILIZIA.csv"
Here's the .csv files that you need.
http://pastebin.com/kJhL8HpK
If you try to make it read first EDILIZIA.csv and then EDILE-ARCHIT.csv he'll keep on using always the EDILIZIA.csv to compile the xml, but he should firt open EDILIZIA.csv, compile the EDILIZIA.xml, then read the EDILE-ARCHIT.csv and compile the EDILE-ARCHIT.xml.
If you take a look at the final xmls, you'll see that the EDILE-ARCHIT.xml will only display the common subjects of EDILIZIA.csv and EDILE-ARCHIT.csv
It took a long time to figure out what you are doing here. To tell the truth your code is a mess - there are many unused variables and lines that make no sense at all. Anyway, your code reads the appropriate csv file each time, thus the error is not where you thought it was.
If I am right, orario.csv contains the timetable of each course (stored in corsi list) for three semesters or years, and the corso.csv files contain the room where subjects are held. So you want to merge the information into an XML file.
You only forgot one thing: to proceed in orario.csv. Your code wants to merge the very first three anno with the current corso. To fix it, you have to make two changes.
First in this for loop header:
for corso in corsi:
Modify to:
for num, corso in enumerate(corsi):
And when you assign lez:
lez = orario[i + a*12][j]
Modify to:
lez = orario[i + a*12*(num+1)][j]
Now it should work.
This code produces exactly the same result, but it uses Python's XML module to build the output file:
from csv import reader
import xml.etree.cElementTree as ET
import xml.dom.minidom as DOM
corsi = ["EDILIZIA", "EDILE-ARCHIT", "ELETTRONICA", "TECNOLOGIE_DI_INTERNET", "INFORMATICA", "GESTIONALE", "ENERGETICA", "MECCANICA", "CIVILE_ED_AMBIENTALE", "MEDICA", "ENGINEERING_SCIENCES"]
with open('orario.csv', 'r') as orario_csv:
orario = reader(orario_csv)
orario_data = [ row for row in orario ]
for num, corso in enumerate(corsi):
with open(corso + '.csv', 'r') as corso_csv:
corso_raw = reader(corso_csv)
corso_data = [ row for row in corso_raw ]
root_elem = ET.Element('orario')
corso_elem = ET.SubElement(root_elem, 'corso')
corso_elem.set('name', corso)
for anno in range(0, 3):
anno_elem = ET.SubElement(corso_elem, 'anno')
anno_elem.set('num', str(anno + 1))
for giorno in range(1, 6):
giorno_elem = ET.SubElement(anno_elem, 'giorno')
giorno_elem.set('name', orario_data[2][giorno])
for lezione in range(3, 12):
lez = orario_data[lezione + anno * 12 * (num + 1)][giorno]
if lez == '':
lezione_elem = ET.SubElement(giorno_elem, 'lezione')
lezione_elem.text = '-'
else:
for riga in corso_data:
if riga[0] == lez:
if riga[2] == '':
lezione_elem = ET.SubElement(giorno_elem, 'lezione')
lezione_elem.set('name', lez)
lezione_elem.text = riga[1]
else:
for g in range(0, len(riga)):
if riga[g].lower() == orario_data[2][giorno].lower():
lezione_elem = ET.SubElement(giorno_elem, 'lezione')
lezione_elem.set('name', lez)
lezione_elem.text = riga[g + 1]
with open(corso + '_new.xml', 'w') as corso_xml:
xml_data = DOM.parseString(ET.tostring(root_elem, method = 'xml')).toprettyxml(indent = ' ')
corso_xml.write(xml_data)
Cheers.
I think I may have spotted the cause of your problem.
The second item in your corsi list ends with a full stop. This means that you will be looking for the file "EDILE-ARCHIT..csv", which is almost does not exist. When you try and open the file, the open() call will throw an exception, and your program will terminate.
Try removing the trailing full stop, and running it again.

Categories