I want to create a json file like
{
"a":["12","34","23",...],
"b":["13","14","45",....],
.
.
.
}
key should come from the list:
lis = ['a','b',...]
and value from the sql query "select id from" + i , where I am iterating through the list through "i". This query simply returns the column id.
Here is the sample code:
lis = ['a','b','c']
len_obj = len(lis)
with open("Dataset.json", 'w') as file:
for i in lis:
file.write(i)
obj_query = i + '_query'
obj_query = sf.query("select id from " + i)
jsondata = json.loads(json.dumps(obj_query['records']))
length = len(jsondata)
i = {}
k = 0
for j in range(length):
obj_id = jsondata[j]['Id']
# print("id " + obj_id)
if k == 0:
ids = "\"" + obj_id + "\""
k = 1
else:
ids = ids + ",\"" + obj_id + "\""
if count != len_obj - 1:
file.write(ids)
else:
file.write(ids)
count += 1
file.write("}")
final output should be like:
{
"a":["12","23",...],
"b":["234","456",...],
}
This is my first blog and 1st program also.
Please guide me through this.
Please forgive the indentation of the program as I am not able to write it here properly.
You should be able to condense the whole thing down to just this:
import json
tables = ["a", "b", "c", "d"]
data = {}
for t in tables:
results = sf.query("select id from %s" % t)["records"]
data[t] = [r["id"] for r in results]
with open("Dataset.json", "w") as f:
json.dump(data, f)
You can simply create a dictionary containing the values you are after and then convert it to json using json.dumps
import json
data = {}
data['a'] = ["12","34","23"]
data['b'] = ["13","14","45"]
json_data = json.dumps(data)
print json_data
#Jaco
lis = ['a','b','c']
with open("Dataset.json", 'w') as file:
for i in lis:
obj_query = i + '_query'
obj_query = sf.query("select id from " + i)
jsondata = json.loads(json.dumps(obj_query['records']))
length = len(jsondata)
# create dict
data1 = {}
k = 0
for j in range(length):
obj_id = jsondata[j]['Id']
# print("id " + obj_id)
if k == 0:
ids = obj_id
k = 1
else:
ids = ids + "," + obj_id
data1[i] = [ids]
json_data = json.dumps(data1)
file.write(json_data)
the response i got is
{"a":["12,23,34.."]}{"b":["23,45,..."]}{...}
Related
I have to save a dictionary on a seprate json file. The values for the dictionary are being scraped forom a website. I want the values to add up but with every new one the old one is replaced.
gar = -1
Pirmasfilmasvaroni = varoni[gar]
while (gar < 7):
gar = gar + 1
#atdaliju varonus atkariba no filnmas
#cik varoni
garums = len(Pirmasfilmasvaroni)
z = (garums-1)
u = (z-1)
count = -1
while (count < z):
count = count + 1
pirmais = Pirmasfilmasvaroni[count]
Psaturs = requests.get(pirmais)
if Psaturs.status_code == 200:
Pdati = Psaturs.text
Pinfo = json.loads(Pdati)
var = Pinfo['result']['properties']['name']
dic = {gar:[var]}
with open("Filmas_un_varoni_kas_tajas_piedalas.json", "w") as js:
json.dump(dic, js, indent=4)
varoni={}
gar = -1
Pirmasfilmasvaroni = varoni[gar]
# create an empty dictionary
dic = {}
while (gar < 7):
gar = gar + 1
#atdaliju varonus atkariba no filnmas
#cik varoni
garums = len(Pirmasfilmasvaroni)
z = (garums-1)
u = (z-1)
count = -1
while (count < z):
count = count + 1
pirmais = Pirmasfilmasvaroni[count]
Psaturs = requests.get(pirmais)
if Psaturs.status_code == 200:
Pdati = Psaturs.text
Pinfo = json.loads(Pdati)
var = Pinfo['result']['properties']['name']
# add the new key-value pair to the dictionary
dic[gar] = var
# write the dictionary to the json file
with open("Filmas_un_varoni_kas_tajas_piedalas.json", "w") as js:
json.dump(dic, js, indent=4)
Below is my code i am reading .txt file then making a list and then saving it to excel but in excel i am getting ('ip subnet/mask', ) but i want only (ip subnet/mask) in out put
Below are my code blocks
1.I read routing Table output from Txt file and create a list
2.then from 10.0.0.0/8 address space i remove routing table sybnets
3.I save the available IP,s to Available.txt file
4.Create List from Available.txt file
5.then i create excel file and then i save the list out put to excel in specific 10.x.x.x/16 sheet
import os
import re
import xlsxwriter
from netaddr import *
from openpyxl import load_workbook
def ip_adresses():
lst = []
for line in fstring:
for word in line.split():
result = pattern.search(word)
if result:
lst.append(word)
return lst
def write_excel(aaa, bbb, num):
bbb = sorted(bbb)
work11 = load_workbook(r'C:\Users\irfan\PycharmProjects\pythonProject\irfan4.xlsx')
sheet11 = work11[aaa]
count = sheet11.max_row
max1 = sheet11.max_row
for row1, entry in enumerate(bbb, start=1):
sheet11.cell(row=row1 + max1, column=1, value=entry)
work11.save("irfan4.xlsx")
os.chdir(r'C:\Users\irfan\PycharmProjects\pythonProject')
file = open('RR-ROUTING TABLE.txt')
fstring = file.readlines()
# declaring the regex pattern for IP addresses
pattern = re.compile(r'(10\.\d{1,3}\.\d{1,3}\.\d{1,3}[/])')
# initializing the list object
unique = []
# extracting the IP addresses
IPs = ip_adresses()
unique = list(dict.fromkeys(IPs))
ipv4_addr_space = IPSet(['10.0.0.0/8'])
ip_list = IPSet(list(unique))
print(ip_list)
available = ipv4_addr_space ^ ip_list
print()
f = open("Available.txt", "a")
f.write(str(available))
f.close
print(available)
workbook = xlsxwriter.Workbook('irfan4.xlsx')
worksheet = workbook.add_worksheet()
for row_num, data in enumerate(available):
worksheet.write(row_num, 0, data)
num = 0
while num <= 255:
worksheet = workbook.add_worksheet("10." + str(num) + ".0.0")
num += 1
workbook.close()
# CREATE AUDIT BOOK
##################################################
os.chdir(r'C:\Users\irfan\PycharmProjects\pythonProject')
file_2 = open('Available.txt')
fstring_2 = file_2.readlines()
def ip_adresses1():
lst = []
for line in fstring_2:
for word in line.split():
result = pattern.search(word)
if result:
lst.append(word)
return lst
List_A=ip_adresses1()
print(List_A[1])
get_list = []
num = 0
while num <= 255:
pattern_sheet = re.compile(r'(10\.' + str(num) + '\.\d{1,3}\.\d{1,3}[/])')
for get_ips in fstring_2:
result_ip = pattern_sheet.search(get_ips)
if result_ip:
get_list.append(get_ips)
sheet_name = ("10." + str(num) + ".0.0")
write_excel(sheet_name, get_list, num)
get_list = []
num += 1
enter code here
I have used re.sub function to remove characters from string:
def ip_adresses1():
lst = []
for line in fstring_2:
for word in line.split():
word = re.sub("IPSet", " ", word)
word = re.sub(",", " ", word)
word = re.sub("'", " ", word)
word = re.sub("\(", " ", word)
word = re.sub("\)", " ", word)
word = re.sub("\]", " ", word)
word = re.sub("\[", " ", word)
result = pattern.search(word)
if result:
lst.append(word)
return lst
I need help sorting my key-value pair. My output is in this url http://pastebin.com/ckKAtP5y.
However, what I've been trying to do is.
{
"courses": [
{
"professors": [
{
"first_name": "Zvezdelina",
"last_name": "Stankova",
"professor_url": "http://www.ratemyprofessors.com/ShowRatings.jsp?tid=375269",
"helpfullness": 4.3,
"clarity": 4.3,
"overall_rating": 4.3
}],
"course_name": "CHEM 1",
"course_mentioned_times": 37
},
{
"professors": [
{
"first_name": "Alan",
"last_name": "Shabel",
"professor_url": "http://www.ratemyprofessors.com/ShowRatings.jsp?tid=1309831",
"helpfullness": 3.9,
"clarity": 3.5,
"overall_rating": 3.7
}],
"course_name": "CHEMISTRY 231",
"course_mentioned_times": 50
}
]
So what I want to do is I want to compare 'CHEM' and 'CHEMISTRY' in "course_name" and just get me the most 'course_mentioned_times' and remove the other one. In this case I'd want CHEMISTRY 231 because it's mentioned 50 times.
Here's what I've been helped with so far.
if __name__ == "__main__":
import json
#'output_info.json is http://pastebin.com/ckKAtP5y
with open('output_info.json') as data_file:
data = json.load(data_file)
temp_data = data
greater = []
len1 = len(data['courses'])
len2 = len1
for i in range(0,len1):
for j in range(0, len2):
if i==j:
continue
if data['courses'][i]['course_name'][0] == temp_data['courses'][j]['course_name'][0]:
if data['courses'][i]['course_name'][1] == temp_data['courses'][j]['course_name'][1]:
if data['courses'][i]['course_name'][2] == temp_data['courses'][j]['course_name'][2]:
if data['courses'][i]['course_mentioned_times']> temp_data['courses'][j]['course_mentioned_times']:
greater.append(i)
else:
greater.append(j)
final = []
for i in greater:
if i not in final:
final.append(i)
list_order = []
for i in range(0,len(data['courses'])):
list_order.append(i)
new_final = []
for i in list_order:
if i not in final:
new_final.append(i)
for i in new_final:
if i!=new_final[0]:
i=i-1
data['courses'].pop(i)
# Writing the new json data back to data.json file.
with open('data.json', 'w') as f:
json.dump(data, f)
This code gives me an IndexError
data['courses'].pop(i)
IndexError: pop index out of range
After a lot of back and forth in question's comments:
#coding:utf-8
import json
filename = 'data.json'
with open(filename, 'r') as f:
data = json.load(f)
courses = data.get('courses', None)
if courses:
keys = sorted(set([course.get('course_name', None).strip().split()[0][0:3] for course in courses]))
results = {'courses': {}}
for key in keys:
results['courses'][key] = []
temp = {}
for course in courses:
course_name = course.get('course_name', None)
professors = course.get('professors', None)
if course_name.strip().split()[0][0:3] == key:
course_mentioned_times = course.get('course_mentioned_times')
temp[course_name] = {'course_mentioned_times':course_mentioned_times, 'professors': professors}
results['courses'][key] = temp
else:
raise Exception('No courses could be found on {}'.format(filename))
def get_most_mentioned(name):
name = name[0:3]
data = results.get('courses', None).get(name)
max_mentioned_times = max(map(lambda m: data.get(m, None).get('course_mentioned_times'), data.keys()))
most_mentioned = []
for course_name, values in data.items():
course_mentioned_times = values.get('course_mentioned_times', None)
if course_mentioned_times == max_mentioned_times:
most_mentioned.append({'course_name': course_name, 'course_mentioned_times': course_mentioned_times, \
'professors': values.get('professors')})
return most_mentioned
print "Course with most mentioned times:"
print "---------------------------------"
for key in keys:
print "[*] For Key '{}':".format(key)
for item in get_most_mentioned(key):
course_name = item.get('course_name', None)
print " Course Name: {}".format(course_name)
print " Mentioned Times: {}\n".format(item.get('course_mentioned_times'))
print " Professors:\n"
for i, professor in enumerate(item.get('professors', None), start=1):
print " {}) Full name: {} {}".format(i, professor.get('first_name'), professor.get('last_name'))
print " URL: {}".format(professor.get('professor_url'))
print " Helpfullness: {}".format(professor.get('helpfullness'))
print " Clarity: {}".format(professor.get('clarity'))
print " Overall_rating: {}".format(professor.get('overall_rating'))
print ""
print ""
import json
import collections
with open('output_info.json') as data_file:
data = json.load(data_file)
courses = data['courses']
courses_by_prefix = collections.defaultdict(list)
for course in courses:
prefix = course['course_name'].split(' ', 2)[0].upper()[:3]
courses_by_prefix[prefix].append(course)
results = []
for prefix, courselist in courses_by_prefix.items():
mosttimes = max(courselist, key=lambda c: c['course_mentioned_times'])
results.append(mosttimes)
print(results)
I'm programming a script that connects to an Oracle database and get the results into a log file. I want to get a output like this:
FEC_INCLUSION = 2005-08-31 11:43:48,DEBITO_PENDIENTE = None,CAN_CUOTAS = 1.75e-05,COD_CUENTA = 67084,INT_TOTAL = None,CAN_CUOTAS_ANTERIOR = None,COD_INVERSION = 1,FEC_MODIFICACION = 10/04/2012 09:45:22,SAL_TOT_ANTERIOR = None,CUOTA_COMISION = None,FEC_ULT_CALCULO = None,MODIFICADO_POR = CTAPELA,SAL_TOTAL = 0.15,COD_TIPSALDO = 1,MONTO_COMISION = None,COD_EMPRESA = 1,SAL_INFORMATIVO = None,COD_OBJETIVO = 5,SAL_RESERVA = None,INCLUIDO_POR = PVOROPE,APORTE_PROM = 0.0,COSTO_PROM = None,CREDITO_PENDIENTE = None,SAL_PROM = 0.0,
FEC_INCLUSION = 2005-08-31 11:43:49,DEBITO_PENDIENTE = None,CAN_CUOTAS = 0.0,COD_CUENTA = 67086,INT_TOTAL = None,CAN_CUOTAS_ANTERIOR = None,COD_INVERSION = 9,FEC_MODIFICACION = 25/02/2011 04:38:52,SAL_TOT_ANTERIOR = None,CUOTA_COMISION = None,FEC_ULT_CALCULO = None,MODIFICADO_POR = OPEJAMO,SAL_TOTAL = 0.0,COD_TIPSALDO = 1,MONTO_COMISION = None,COD_EMPRESA = 1,SAL_INFORMATIVO = None,COD_OBJETIVO = 5,SAL_RESERVA = None,INCLUIDO_POR = PVOROPE,APORTE_PROM = 0.0,COSTO_PROM = None,CREDITO_PENDIENTE = None,SAL_PROM = 0.0,
I created a dictionary with the query results:
def DictFactory(description,data):
column_names = [col[0] for col in description]
results = []
for row in data:
results.append(dict(zip(column_names,row)))
return results
Then I created this function which finally save the results into my log:
def WriteLog(log_file,header,data):
file_exist = os.path.isfile(log_file)
log = open(log_file,'a')
if not file_exist:
print "File does not exist, writing new log file"
open(log_file,'w').close()
mydata = DictFactory(header,data)
checkpoint_name = ReadCheckpointName()
string = ''
for m in mydata:
for k,v in m.items():
string = string + k + ' = ' + str(v) + ','
if k == checkpoint_name:
#print "KEY FOUND"
cur_checkpoint = v
cur_checkpoint = str(cur_checkpoint)
#print string
string = string + '\n'
print cur_checkpoint
log.write(string + '\n')
WriteCheckpoint(cur_checkpoint,checkpoint_file)
log.close()
This is the main function:
def GetInfo():
mypool = PoolToDB()
con = mypool.acquire()
cursor = con.cursor()
GetLastCheckpoint()
sql = ReadQuery()
#print sql
cursor.execute(sql)
data = cursor.fetchall()
WriteLog(log_file,cursor.description,data)
#WriteCsvLog(log_file,cursor.description,data)
cursor.close()
But I realized that it works if I use a query that fetch few records, however if I try to fetch many records my script never ends.
This is my output when I executed a query with 5000 records. As you can see it takes too long.
jballesteros#SplunkPorvenir FO_TIPSALDOS_X_CUENTA]$ python db_execution.py
Starting connection: 5636
GetLastCheckpoint function took 0.073 ms
GetLastCheckpoint function took 0.025 ms
ReadQuery function took 0.084 ms
File does not exist, writing new log file
DictFactory function took 23.050 ms
ReadCheckpointName function took 0.079 ms
WriteCheckpoint function took 0.204 ms
WriteLog function took 45112.133 ms
GetInfo function took 46193.033 ms
I'm pretty sure you know a much better way to do what I am trying to do.
This is the complete code:
#!/usr/bin/env python
# encoding: utf-8
import re
import sys
try:
import cx_Oracle
except:
print "Error: Oracle module required to run this plugin."
sys.exit(0)
import datetime
import re
import commands
import os
from optparse import OptionParser
import csv
import time
#################################
#### Database Variables ####
#################################
Config = {
"host" : "",
"user" : "",
"password" : "",
"instance" : "",
"port" : "",
}
Query = {
"sql" : "",
"checkpoint_datetype" : "",
"checkpoint_name" : "",
}
dir = '/home/jballesteros/PENS2000/FO_TIPSALDOS_X_CUENTA/'
connection_dir = '/home/jballesteros/PENS2000/Connection'
checkpoint_file = dir + 'checkpoint.conf'
log_file = '/var/log/Pens2000/FO_TIPSALDOS_X_CUENTA.csv'
internal_log = '/var/log/Pens2000/internal.log'
query = dir + 'query'
sys.path.append(os.path.abspath(connection_dir))
from db_connect_pool import *
def Timing(f):
def wrap(*args):
time1 = time.time()
ret = f(*args)
time2 = time.time()
print "%s function took %0.3f ms" % (f.func_name,(time2- time1)*1000.0)
return ret
return wrap
#Timing
def InternalLogWriter(message):
now = datetime.datetime.now()
log = open(internal_log, 'a')
log.write("%s ==> %s" % (now.strftime("%Y-%m-%d %H:%M:%S"),message))
log.close()
return
#Timing
def GetLastCheckpoint():
global cur_checkpoint
conf = open(checkpoint_file, 'r')
cur_checkpoint = conf.readline()
cur_checkpoint = cur_checkpoint.rstrip('\n')
cur_checkpoint = cur_checkpoint.rstrip('\r')
conf.close()
#Timing
def ReadQuery():
global cur_checkpoint
GetLastCheckpoint()
qr = open(query, 'r')
line = qr.readline()
line = line.rstrip('\n')
line = line.rstrip('\r')
Query["sql"], Query["checkpoint_datetype"],Query["checkpoint_name"] = line.split(";")
sql = Query["sql"]
checkpoint_datetype = Query["checkpoint_datetype"]
checkpoint_name = Query["checkpoint_name"]
if (checkpoint_datetype == "DATETIME"):
sql = sql + " AND " + checkpoint_name + " >= " + "TO_DATE('%s','YYYY-MM-DD HH24:MI:SS') ORDER BY %s" % (cur_checkpoint,checkpoint_name)
if (checkpoint_datetype == "NUMBER"):
sql = sql + " AND " + checkpoint_name + " > " + "%s ORDER BY %s" % (cur_checkpoint,checkpoint_name)
qr.close()
return str(sql)
#Timing
def ReadCheckpointName():
qr = open(query, 'r')
line = qr.readline()
line = line.rstrip('\n')
line = line.rstrip('\r')
Query["sql"], Query["checkpoint_datetype"],Query["checkpoint_name"] = line.split(";")
checkpoint_name = Query["checkpoint_name"]
return str(checkpoint_name)
#Timing
def LocateCheckPoint(description):
description
checkpoint_name = ReadCheckpointName()
#print checkpoint_name
#print description
startcounter = 0
finalcounter = 0
flag = 0
for d in description:
prog = re.compile(checkpoint_name)
result = prog.match(d[0])
startcounter = startcounter + 1
if result:
finalcounter = startcounter - 1
counterstr = str(finalcounter)
print "Checkpoint found in the array position number: " + counterstr
flag = 1
if (flag == 0):
print "Checkpoint did not found"
return finalcounter
#Timing
def DictFactory(description,data):
column_names = [col[0] for col in description]
results = []
for row in data:
results.append(dict(zip(column_names,row)))
return results
#Timing
def WriteCsvLog(log_file,header,data):
checkpoint_index = LocateCheckPoint(header)
file_exists = os.path.isfile(log_file)
with open(log_file,'ab') as csv_file:
headers = [i[0] for i in header]
csv_writer = csv.writer(csv_file,delimiter='|')
if not file_exists:
print "File does not exist, writing new CSV file"
csv_writer.writerow(headers) # Writing headers once
for d in data:
csv_writer.writerow(d)
cur_checkpoint = d[checkpoint_index]
cur_checkpoint = str(cur_checkpoint)
WriteCheckpoint(cur_checkpoint,checkpoint_file)
csv_file.close()
#Timing
def WriteLog(log_file,header,data):
file_exist = os.path.isfile(log_file)
log = open(log_file,'a')
if not file_exist:
print "File does not exist, writing new log file"
open(log_file,'w').close()
mydata = DictFactory(header,data)
checkpoint_name = ReadCheckpointName()
#prin #string = ''
for m in mydata:
for k,v in m.items():
string = string + k + ' = ' + str(v) + ','
if k == checkpoint_name:
#print "KEY FOUND"
cur_checkpoint = v
cur_checkpoint = str(cur_checkpoint)
#print string
string = string + '\n'
print cur_checkpoint
log.write(string + '\n')
WriteCheckpoint(cur_checkpoint,checkpoint_file)
log.close()
#Timing
def WriteCheckpoint(cur_checkpoint,conf_file):
conf = open(conf_file,'w')
conf.write(cur_checkpoint)
conf.close()
#Timing
def GetInfo():
mypool = PoolToDB()
con = mypool.acquire()
cursor = con.cursor()
GetLastCheckpoint()
sql = ReadQuery()
#print sql
cursor.execute(sql)
#data = cursor.fetchall()
#WriteLog(log_file,cursor.description,data)
#WriteCsvLog(log_file,cursor.description,data)
cursor.close()
def __main__():
parser = OptionParser()
parser.add_option("-c","--change- password",dest="pass_to_change",help="Change the password for database connection",metavar="1")
(options, args) = parser.parse_args()
if (options.pass_to_change):
UpdatePassword()
else:
GetInfo()
__main__()
This is a query sample:
SELECT COD_EMPRESA, COD_TIPSALDO, COD_INVERSION, COD_CUENTA, COD_OBJETIVO, CAN_CUOTAS, SAL_TOTAL, INT_TOTAL, SAL_RESERVA, APORTE_PROM, SAL_PROM, COSTO_PROM, SAL_TOT_ANTERIOR, FEC_ULT_CALCULO, INCLUIDO_POR, FEC_INCLUSION, MODIFICADO_POR, TO_CHAR(FEC_MODIFICACION,'DD/MM/YYYY HH24:MI:SS') AS FEC_MODIFICACION, CUOTA_COMISION, MONTO_COMISION, SAL_INFORMATIVO, CREDITO_PENDIENTE, DEBITO_PENDIENTE, CAN_CUOTAS_ANTERIOR FROM FO.FO_TIPSALDOS_X_CUENTA WHERE ROWNUM <=100000 AND FEC_INCLUSION >= TO_DATE('2005-08-31 11:43:49','YYYY-MM-DD HH24:MI:SS') ORDER BY FEC_INCLUSION
PS: I've really been searching in google and this forum about my question but I haven't found anything similar.
I have dictionary that takes data from a file and puts it in list. I want to make a search engine that when I type name or quantity or price of a component it will find all with that name and print info that it holds (price, quantity, category).
Input
I just can't make my script read info from lines in the file. The file's text looks like:
AMD A4-3300 2.5GHz 2-Core Fusion APU Box|5.179,00 din|58|opis|Procesor
AMD Athlon II X2 340 3.2GHz Box|4.299,00 din|8|opis|Procesor
INTEL Celeron G465 1.9GHz Box|3.339,00 din|46|opis|Procesor
INTEL Celeron Dual Core G550 2.6GHz Box|1.439,00 din|13|opis|Procesor
Output
Here is my code which should be a search engine for my components, I just don't know how I can take form list data and target that data full info for example I type key word like AMD and seach engine print all AMD components that have AMD in their name or price I put price range and I got all prices in that range. I tried some things but it wont work.Sorry for long time to respond.I translated my code, there may be some lines left out but I hope you get the picture.
def option_p_components():
option = 0
#component = []
components = []
while option == 0 :
option_comp = option_p_components_str()
option_k = int(raw_input("Chose option : ")
print "" \
""
if option_k != 1 and option_k != 2 :
error = "!!!Error!!!"
error_p = " you typed wrong command please try again ."
print "-" * 80
print error.center(80)
print error_p.center(80)
print "-" * 80
option = 0
if option_k == 1 :
option_p_d = 0
print "Components search "
print"-" * 80
cu = temp_comp(components)
print cu
print "X)Working with components(editing, deleting )"
print"-" * 80
print "1)Change components "
print "2)Editing components"
print "3)Delating componetns"
print "4)Components search "
print "5)Back"
print"-" * 80
option_p_d = int(raw_input("Chose option :"))
if Option_p_d == 2 :
option_d = 0
for I in range(5):
u_component_name = raw_input("Unesite naziv komponente :")
u_component_price= raw_input("Unestie cenu komponente:")
u_component_quantity = raw_input("Unesite kolicinu komponente :")
u_component_opis = raw_input("Unesite opis komponente :")
u_component_category = raw_input("Unesite kategoriju komponente:")
component = {"name_compo":u_komponenta_ime,
"price":u_komponenta_cena,
"quantity":u_komponenta_kolicina,
"opis":u_komponenta_opis,
"category":u_komponenta_kategorija}
upis_komponente = saving_components(component)
components.append(saving_components)
print"-" * 80
print "1)New component"
print "2)Back"
print"-" * 80
option_d = int(raw_input("Odaberite opciju :"))
if option_d == 1 :
option_k = 0
elif option_d == 2 :
option_p_komponenti()
elif option_k == 2 :
print "Back"
def saving_components(component):
final_komponenta = component["name_compo"] + "|" + component["price"] + "|" + componenta["quantity"] + "|"\
+ component["opis"] + "|" + component["category"]
file = open("Data/component.txt", "a")
file.write(final_component)
file.close
def reading_component(component):
file = open("Data/component.txt", "r")
for line in file :
name_comp, price, quantity, opis, category = line.split("|")
komponenta = {"name_compo": name_comp,
"price": price,
"quantity": quantity,
"opis" : opis,
"category": category}
# ovo izvlaci samo pojedinacne vrednosti iz recnika
compon_info = "Name: " + component["name_compo"] + "\n" + "price: " + component["Price"]+"\n" +\
"Quantity:" + component["quantity"] + "\n" + "Opis: " + komponenta["opis"] + \
"\n" + "category: " + component["category"] + "\n"
#print compon_info
component.append(component)
#print sortiranje(kompon_info)
#print sorted([compon_info])
#print compon_info.sort()
#Vrti koliko ima u fajlu for ...a to je 7
file.close()
return component
def temp_comp(components):
pretraga_po_opisu(komponente)
def pretraga_po_opisu(komponente):
kolicina = str(raw_input("Unesite kolicinu:"))
for komponenta in komponente:
if komponenta["kolicina"] == kolicina:
print komponenta["kolicina"]
return None
def pera(komponente, cena):
ulaz = input("Unesi")
list = komponente.pera("cena",cena)
All you need is csv.DictReader() together with a sequence of key names for each column:
with open(inputfilename, 'rb') as fileobj:
reader = csv.DictReader(fileobj,
('name_compon', 'price', 'quantity', 'something_else', 'category'),
delimiter='|')
for row in reader:
print row
where row is the dictionary you wanted.
If you want to look into using zip, you could always use it here:
component_dicts = []
components = ("name_compon", "price", "quanity", "category")
with open('/path/to/data') as f:
for line in f.readlines():
components_dicts.append(dict(zip(components, line.split("|")[:4])))
#slicing the first four elements because you never say which 4 out of 5 you wanted.
for c in components_dict:
print c
Here the line.split("|") method is creating a list of str's, dividing the string being read wherever the "|" character is found.
Then zip will return a list of tuples which you then feed into a dict:
# This is what it would look like after you zip the components tuple and the line.split("|") data
[(name_compon, 'AMD A4-3300 2.5GHz 2-Core Fusion APU Box'), (price, '5.179,00 din'), (quanity, 58), (type, opis)]