write zip array vertical in csv - python

is there ways to display zipped text vertically in csv ?? I tried many difference type of \n ',' but still can't get the array to be vertical
if __name__ == '__main__': #start of program
master = Tk()
newDirRH = "C:/VSMPlots"
FileName = "J123"
TypeName = "1234"
Field = [1,2,3,4,5,6,7,8,9,10]
Court = [5,4,1,2,3,4,5,1,2,3]
for field, court in zip(Field, Court):
stringText = ','.join((str(FileName), str(TypeName), str(Field), str(Court)))
newfile = newDirRH + "/Try1.csv"
text_file = open(newfile, "w")
x = stringText
text_file.write(x)
text_file.close()
print "Done"
This is the method i am looking for for your Code i can't seem to add new columns as all the column will repeat 10x

You are not writing CSV data. You are writing Python string representations of lists. You are writing the whole Field and Court lists each iteration of your loop, instead of writing field and court, and Excel sees the comma in the Python string representation:
J123,1234,[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],[5, 4, 1, 2, 3, 4, 5, 1, 2, 3]
J123,1234,[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],[5, 4, 1, 2, 3, 4, 5, 1, 2, 3]
etc.
while you wanted to write:
J123,1234,1,5
J123,1234,2,4
etc.
Use the csv module to produce CSV files:
import csv
with open(newfile, "wb") as csvfile:
writer = csv.writer(csvfile)
for field, court in zip(Field, Court):
writer.writerow([FileName, TypeName, field, court])
Note the with statement; it takes care of closing the open file object for you. The csv module also makes sure everything is converted to strings.
If you want to write something only on the first row, keep a counter with your items; enumerate() makes that easy:
with open(newfile, "wb") as csvfile:
writer = csv.writer(csvfile)
# row of headers
writer.writerow(['FileName', 'TypeName', 'field', 'court'])
for i, (field, court) in enumerate(zip(Field, Court)):
row = [[FileName, TypeName] if i == 0 else ['', '']
writer.writerow(row + [field, court])

Related

How to store data from text files into lists on integers [duplicate]

This question already has answers here:
How do I create variable variables?
(17 answers)
Closed 10 days ago.
Hi hope everyone is okay.
I am trying to find the most simple method to take data from a text file and store it into diffrent
variables. Below is the format of a text file:
TEXT FILE:
min:1,2,3,4,5,7,8,9
avg:1,2,3,4
max:1,2,3,4,5,1,2,3,44,55,32,12
I want to take each of these lines remove the part before the number starts (min,avg,max and the ':')
and store all the number data in seperate variables in their appropriate names.
NOTE: amount of numbers in each line may differ and shouldnt effect the code
desired in python:
min = [1,2,3,4,5,7,8,9]
avg = [1,2,3,4]
max = [1,2,3,4,5,1,2,3,44,55,32,12]
The code i have tried:
with open('input.txt', 'r') as input:
input = input.read()
input = input.strip().split(',')
After this part i am unsure which method would be best to achieve what I am trying to do.
Any help is appriciated!
There's no reasonable way to generate variables (by name) dynamically. Better to use a dictionary. Something like this:
my_dict = {}
with open('input.txt') as data:
for line in map(str.strip, data):
try:
key, vals = line.split(':')
my_dict[key.rstrip()] = list(map(int, vals.split(',')))
except ValueError:
pass
print(my_dict)
Output:
{'min': [1, 2, 3, 4, 5, 7, 8, 9], 'avg': [1, 2, 3, 4], 'max': [1, 2, 3, 4, 5, 1, 2, 3, 44, 55, 32, 12]}
Using exec for a string evaluation. Do that on trusted data to avoid injection attacks.
with open('input.txt', 'r') as fd:
data = fd.read()
# list of lines
lines = data.split('\n')
# python code format
code_format = '\n'.join("{} = [{}]".format(*line.partition(':')[::2]) for line in lines if line)
# execute the string as python code
exec(code_format)
print(avg)
#[1, 2, 3, 4]
Notice that there is a further side effect in this code evaluation since some variable identifiers overload those of the built-in functions min, max. So, if after the execution of the code you try to call such build-in functions you will get TypeError: 'list' object is not callable.
One way to re-approach the problem would be by pickling the objects and use pickle.dumps to save an object to a file and pickle.loads to retrieve the object, see doc.
This is how you store it in a python dictionary:
txtdict = {}
with open('input.txt', 'r') as f:
for line in f:
if line.strip():
name = line.split(':')[0]
txtdict[name] = [int(i) for j in line.strip().split(':')[1:] for i in j.split(',')]
Output:
{'min': [1, 2, 3, 4, 5, 7, 8, 9],
'avg': [1, 2, 3, 4],
'max': [1, 2, 3, 4, 5, 1, 2, 3, 44, 55, 32, 12]}

How to write several commas in a CSV file in python

I'm converting a .txt file with annotations into another annotation format in a .csv file. The annotation format is as follows: filepath,x1,y1,x2,y2,classname. For pictures which haven't an instance of any class in them, annotation is like this: filepath,,,,,.
The problem is, that the .writerrow method of the csv.writer class doesn't write more than one comma after another.
My code is like this:
with open(annotation_file, 'r') as file:
lines = file.readlines()
splitted_lines = [line.split(' ') for line in lines]
with open(out_file, 'w', newline = '') as out:
csv_writer = csv.writer(out,delimiter= ';' )
for l in splitted_lines:
if len(l) == 1:
# indicate empty images
csv_writer.writerow([l[0] + ',,,,,'])
l is a list that contains a single string, so by l[0] + ',,,,,' I want to concatenate l with five commas.
Thank you in advance
set missing values as empty strings and fill the list
with open(annotation_file, 'r') as file:
lines = file.readlines()
splitted_lines = [line.split(' ') for line in lines]
with open(out_file, 'w', newline='') as out:
csv_writer = csv.writer(out, delimiter=';')
for l in splitted_lines:
if len(l) == 1:
# indicate empty images
csv_writer.writerow(l + ['' for _ in range(5)])
else:
csv_writer.writerow(l)
Given sample data:
data = [
[1, 2, 3, 4, 5, 6],
[1, 2, 3, 4, 5, 6],
[1, 2, 3, 4, 5, 6],
[1],
]
it outputs:
1;2;3;4;5;6
1;2;3;4;5;6
1;2;3;4;5;6
1;;;;;
which is inline with what you want
I discovered my problem, l is a string which contained a '\n' at the end. Because of this the writer wasn't able to write the five commas to the string. I changed the code like displayed below what fixed the problem.
with open(annotation_file, 'r') as file:
lines = file.readlines()
splitted_lines = [line.split(' ') for line in lines]
with open(out_file, 'w', newline = '') as out:
csv_writer = csv.writer(out,delimiter= ';' )
for l in splitted_lines:
if len(l) == 1:
# indicate empty images
l[0] = l[0].replace('\n', '')
csv_writer.writerow([l[0] + ',,,,,'])
else:
csv_writer.writerow(['something else'])
Thanks anyway #DelphiX

Creating, then appending data on to the end of each line on a text file in python

I am trying to create a database to store the information generated by my code in the form of a 1 x 21 vector. I have called this prices, and would like to store each element of this vector in a new line of a text file. Then, the second time I run the program, I wish to append the new entries of the vector prices onto each respective line in the text file. The purpose of this is so that once a large set of results has been gathered, it is easy to produce a plot to see how each of these 21 elements changed over time.
I tried to copy (with a bit of modification with the condition in the if loop, as well as the overall for loop) the method shown in this answer, but for some reason, I get a blank text file when I run the code. I changed one thing, the w to w+, but it doesn't work with the w either. What am I doing wrong?
prices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1] # this is just a sample output
for exp in range(21):
with open("price_database.txt", 'r') as f:
lines = f.readlines()
with open("price_database.txt", 'w+') as f:
for x, line in enumerate(lines):
if x == exp:
f.write(str(prices[exp]))
f.write(line)
Edit 1:
prices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1]
for exp in range(21):
with open("price_database.txt", 'r') as f:
lines = f.readlines()
with open("price_database.txt", 'a') as f:
for x, line in enumerate(lines):
if x == exp:
f.write(str(prices[exp]))
You need to close the file every time you open to read or write results with f.close() but i cant undertand why you use txt file to do this job you should really use csv or even mysql it will be much better
Edited:
Open the file with append mode so you can write at the end:
with open('something.txt', 'a') as f:
f.write('text to be appended')
f.close()
If you want to be a little less careful, then do it this way, it is slightly faster because you don’t have to keep closing.
app = 'append text'
with open('something.txt', 'a') as f:
f.write(app)
f.close()

Write multiple rows from dict using csv

Update: I do not want to use pandas because I have a list of dict's and want to write each one to disk as they come in (part of webscraping workflow).
I have a dict that I'd like to write to a csv file. I've come up with a solution, but I'd like to know if there's a more pythonic solution available. Here's what I envisioned (but doesn't work):
import csv
test_dict = {"review_id": [1, 2, 3, 4],
"text": [5, 6, 7, 8]}
with open('test.csv', 'w') as csvfile:
fieldnames = ["review_id", "text"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(test_dict)
Which would ideally result in:
review_id text
1 5
2 6
3 7
4 8
The code above doesn't seem to work that way I'd expect it to and throws a value error. So, I've turned to following solution (which does work, but seems verbose).
with open('test.csv', 'w') as csvfile:
fieldnames = ["review_id", "text"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
response = test_dict
cells = [{x: {key: val}} for key, vals in response.items()
for x, val in enumerate(vals)]
rows = {}
for d in cells:
for key, val in d.items():
if key in rows:
rows[key].update(d.get(key, None))
else:
rows[key] = d.get(key, None)
for row in [val for _, val in rows.items()]:
writer.writerow(row)
Again, to reiterate what I'm looking for: the block of code directly above works (i.e., produces the desired result mentioned early in the post), but seems verbose. So, is there a more pythonic solution?
Thanks!
Your first example will work with minor edits. DictWriter expects a list of dicts rather than a dict of lists. Assuming you can't change the format of the test_dict:
import csv
test_dict = {"review_id": [1, 2, 3, 4],
"text": [5, 6, 7, 8]}
def convert_dict(mydict, numentries):
data = []
for i in range(numentries):
row = {}
for k, l in mydict.iteritems():
row[k] = l[i]
data.append(row)
return data
with open('test.csv', 'w') as csvfile:
fieldnames = ["review_id", "text"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(convert_dict(test_dict, 4))
Try using pandas of python..
Here is a simple example
import pandas as pd
test_dict = {"review_id": [1, 2, 3, 4],
"text": [5, 6, 7, 8]}
d1 = pd.DataFrame(test_dict)
d1.to_csv("output.csv")
Cheers
The built-in zip function can join together different iterables into tuples which can be passed to writerows. Try this as the last line:
writer.writerows(zip(test_dict["review_id"], test_dict["text"]))
You can see what it's doing by making a list:
>>> list(zip(test_dict["review_id"], test_dict["text"]))
[(1, 5), (2, 6), (3, 7), (4, 8)]
Edit: In this particular case, you probably want a regular csv.Writer, since what you effectively have is now a list.
If you don't mind using a 3rd-party package, you could do it with pandas.
import pandas as pd
pd.DataFrame(test_dict).to_csv('test.csv', index=False)
update
So, you have several dictionaries and all of them seems to come from a scraping routine.
import pandas as pd
test_dict = {"review_id": [1, 2, 3, 4],
"text": [5, 6, 7, 8]}
pd.DataFrame(test_dict).to_csv('test.csv', index=False)
list_of_dicts = [test_dict, test_dict]
for d in list_of_dicts:
pd.DataFrame(d).to_csv('test.csv', index=False, mode='a', header=False)
This time, you would be appending to the file and without the header.
The output is:
review_id,text
1,5
2,6
3,7
4,8
1,5
2,6
3,7
4,8
1,5
2,6
3,7
4,8
The problem is that with DictWriter.writerows() you are forced to have a dict for each row. Instead you can simply add the values changing your csv creation:
with open('test.csv', 'w') as csvfile:
fieldnames = test_dict.keys()
fieldvalues = zip(*test_dict.values())
writer = csv.writer(csvfile)
writer.writerow(fieldnames)
writer.writerows(fieldvalues)
You have two different problems in your question:
Create a csv file from a dictionary where the values are containers and not primitives.
For the first problem, the solution is generally to transform the container type into a primitive type. The most common method is creating a json-string. So for example:
>>> import json
>>> x = [2, 4, 6, 8, 10]
>>> json_string = json.dumps(x)
>>> json_string
'[2, 4, 6, 8, 10]'
So your data conversion might look like:
import json
def convert(datadict):
'''Generator which converts a dictionary of containers into a dictionary of json-strings.
args:
datadict(dict): dictionary which needs conversion
yield:
tuple: key and string
'''
for key, value in datadict.items():
yield key, json.dumps(value)
def dump_to_csv_using_dict(datadict, fields=None, filepath=None, delimiter=None):
'''Dumps a datadict value into csv
args:
datadict(list): list of dictionaries to dump
fieldnames(list): field sequence to use from the dictionary [default: sorted(datadict.keys())]
filepath(str): filepath to save to [default: 'tmp.csv']
delimiter(str): delimiter to use in csv [default: '|']
'''
fieldnames = sorted(datadict.keys()) if fields is None else fields
filepath = 'tmp.csv' if filepath is None else filepath
delimiter = '|' if not delimiter else delimiter
with open(filepath, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames, restval='', extrasaction='ignore', delimiter=delimiter)
writer.writeheader()
for each_dict in datadict:
writer.writerow(each_dict)
So the naive conversion looks like this:
# Conversion code
test_data = {
"review_id": [1, 2, 3, 4],
"text": [5, 6, 7, 8]}
}
converted_data = dict(convert(test_data))
data_list = [converted_data]
dump_to_csv(data_list)
Create a final value that is actually some sort of a merging of two disparate data sets.
To do this, you need to find a way to combine data from different keys. This is not an easy problem to generically solve.
That said, it's easy to combine two lists with zip.
>>> x = [2, 4, 6]
>>> y = [1, 3, 5]
>>> zip(y, x)
[(1, 2), (3, 4), (5, 6)]
In addition, in the event that your lists are not the same size, python's itertools package provides a method, izip_longest, which will yield back the full zip even if one list is shorter than another. Note izip_longest returns a generator.
from itertools import izip_longest
>>> x = [2, 4]
>>> y = [1, 3, 5]
>>> z = izip_longest(y, x, fillvalue=None) # default fillvalue is None
>>> list(z) # z is a generator
[(1, 2), (3, 4), (5, None)]
So we could add another function here:
from itertoops import izip_longest
def combine(data, fields=None, default=None):
'''Combines fields within data
args:
data(dict): a dictionary with lists as values
fields(list): a list of keys to combine [default: all fields in random order]
default: default fill value [default: None]
yields:
tuple: columns combined into rows
'''
fields = data.keys() if field is None else field
columns = [data.get(field) for field in fields]
for values in izip_longest(*columns, fillvalue=default):
yield values
And now we can use this to update our original conversion.
def dump_to_csv(data, filepath=None, delimiter=None):
'''Dumps list into csv
args:
data(list): list of values to dump
filepath(str): filepath to save to [default: 'tmp.csv']
delimiter(str): delimiter to use in csv [default: '|']
'''
fieldnames = sorted(datadict.keys()) if fields is None else fields
filepath = 'tmp.csv' if filepath is None else filepath
delimiter = '|' if not delimiter else delimiter
with open(filepath, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=delimiter)
for each_row in data:
writer.writerow(each_dict)
# Conversion code
test_data = {
"review_id": [1, 2, 3, 4],
"text": [5, 6, 7, 8]}
}
combined_data = combine(test_data)
data_list = [combined_data]
dump_to_csv(data_list)

Python - Parsing Columns and Rows

I am running into some trouble with parsing the contents of a text file into a 2D array/list. I cannot use built-in libraries, so have taken a different approach. This is what my text file looks like, followed by my code
1,0,4,3,6,7,4,8,3,2,1,0
2,3,6,3,2,1,7,4,3,1,1,0
5,2,1,3,4,6,4,8,9,5,2,1
def twoDArray():
network = [[]]
filename = open('twoDArray.txt', 'r')
for line in filename.readlines():
col = line.split(line, ',')
row = line.split(',')
network.append(col,row)
print "Network = "
print network
if __name__ == "__main__":
twoDArray()
I ran this code but got this error:
Traceback (most recent call last):
File "2dArray.py", line 22, in <module>
twoDArray()
File "2dArray.py", line 8, in twoDArray
col = line.split(line, ',')
TypeError: an integer is required
I am using the comma to separate both row and column as I am not sure how I would differentiate between the two - I am confused about why it is telling me that an integer is required when the file is made up of integers
Well, I can explain the error. You're using str.split() and its usage pattern is:
str.split(separator, maxsplit)
You're using str.split(string, separator) and that isn't a valid call to split. Here is a direct link to the Python docs for this:
http://docs.python.org/library/stdtypes.html#str.split
To directly answer your question, there is a problem with the following line:
col = line.split(line, ',')
If you check the documentation for str.split, you'll find the description to be as follows:
str.split([sep[, maxsplit]])
Return a list of the words in the string, using sep as the delimiter string. If maxsplit is given, at most
maxsplit splits are done (thus, the list will have at most maxsplit+1 elements). If maxsplit is not specified, then there is no limit on the number of splits (all possible splits are made).
This is not what you want. You are not trying to specify the number of splits you want to make.
Consider replacing your for loop and network.append with this:
for line in filename.readlines():
# line is a string representing the values for this row
row = line.split(',')
# row is the list of numbers strings for this row, such as ['1', '0', '4', ...]
cols = [int(x) for x in row]
# cols is the list of numbers for this row, such as [1, 0, 4, ...]
network.append(row)
# Put this row into network, such that network is [[1, 0, 4, ...], [...], ...]
"""I cannot use built-in libraries""" -- do you really mean "cannot" as in you have tried to use the csv module and failed? If so, say so. Do you mean that "may not" as in you are forbidden to use a built-in module by the terms of your homework assignment? If so, say so.
Here is an answer that works. It doesn't leave a newline attached to the end of the last item in each row. It converts the numbers to int so that you can use them for whatever purpose you have. It fixes other errors that nobody else has mentioned.
def twoDArray():
network = []
# filename = open('twoDArray.txt', 'r')
# "filename" is a very weird name for a file HANDLE
f = open('twoDArray.txt', 'r')
# for line in filename.readlines():
# readlines reads the whole file into memory at once.
# That is quite unnecessary.
for line in f: # just iterate over the file handle
line = line.rstrip('\n') # remove the newline, if any
# col = line.split(line, ',')
# wrong args, as others have said.
# In any case, only 1 split call is necessary
row = line.split(',')
# now convert string to integer
irow = [int(item) for item in row]
# network.append(col,row)
# list.append expects only ONE arg
# indentation was wrong; you need to do this once per line
network.append(irow)
print "Network = "
print network
if __name__ == "__main__":
twoDArray()
Omg...
network = []
filename = open('twoDArray.txt', 'r')
for line in filename.readlines():
network.append(line.split(','))
you take
[
[1,0,4,3,6,7,4,8,3,2,1,0],
[2,3,6,3,2,1,7,4,3,1,1,0],
[5,2,1,3,4,6,4,8,9,5,2,1]
]
or you neeed some other structure as output? Please add what do you need as output?
class TwoDArray(object):
#classmethod
def fromFile(cls, fname, *args, **kwargs):
splitOn = kwargs.pop('splitOn', None)
mode = kwargs.pop('mode', 'r')
with open(fname, mode) as inf:
return cls([line.strip('\r\n').split(splitOn) for line in inf], *args, **kwargs)
def __init__(self, data=[[]], *args, **kwargs):
dataType = kwargs.pop('dataType', lambda x:x)
super(TwoDArray,self).__init__()
self.data = [[dataType(i) for i in line] for line in data]
def __str__(self, fmt=str, endrow='\n', endcol='\t'):
return endrow.join(
endcol.join(fmt(i) for i in row) for row in self.data
)
def main():
network = TwoDArray.fromFile('twodarray.txt', splitOn=',', dataType=int)
print("Network =")
print(network)
if __name__ == "__main__":
main()
The input format is simple, so the solution should be simple too:
network = [map(int, line.split(',')) for line in open(filename)]
print network
csv module doesn't provide an advantage in this case:
import csv
print [map(int, row) for row in csv.reader(open(filename, 'rb'))]
If you need float instead of int:
print list(csv.reader(open(filename, 'rb'), quoting=csv.QUOTE_NONNUMERIC))
If you are working with numpy arrays:
import numpy
print numpy.loadtxt(filename, dtype='i', delimiter=',')
See Why NumPy instead of Python lists?
All examples produce arrays equal to:
[[1 0 4 3 6 7 4 8 3 2 1 0]
[2 3 6 3 2 1 7 4 3 1 1 0]
[5 2 1 3 4 6 4 8 9 5 2 1]]
Read the data from the file. Here's one way:
f = open('twoDArray.txt', 'r')
buffer = f.read()
f.close()
Parse the data into a table
table = [map(int, row.split(',')) for row in buffer.strip().split("\n")]
>>> print table
[[1, 0, 4, 3, 6, 7, 4, 8, 3, 2, 1, 0], [2, 3, 6, 3, 2, 1, 7, 4, 3, 1, 1, 0], [5, 2, 1, 3, 4, 6, 4, 8, 9, 5, 2, 1]]
Perhaps you want the transpose instead:
transpose = zip(*table)
>>> print transpose
[(1, 2, 5), (0, 3, 2), (4, 6, 1), (3, 3, 3), (6, 2, 4), (7, 1, 6), (4, 7, 4), (8, 4, 8), (3, 3, 9), (2, 1, 5), (1, 1, 2), (0, 0, 1)]

Categories