I have a scripts that parse two CSV files and compares the first column from one file with the second column from another file. The problem is those files are big and it takes some time to finish the process. The question is how to improve the speed? I tried to use yield from lines before the for cycle but the problem is then I have convert lines[1:] to list(lines[1:]) as result it makes no sense.
def pk():
with open('way/to/first.csv') as csv_file:
lines = csv_file.readlines()
full_list = []
for line in lines[1:]:
array = line.split(',')
list_pk = array[0].replace('"', '')
full_list.append(list_pk)
return full_list
def fk():
with open('way/to/second.csv') as csv_file:
lines = csv_file.readlines()
full_list = []
for line in lines[1:]:
array = line.split(',')
list_fk = array[1].replace('"', '')
full_list.append(list_fk)
return full_list
def res():
f = fk()
p = pk()
for i in f:
if i not in p:
raise AssertionError(f'{i} not found')
Try using python's "set difference" to find the elements in set A that do not have a match in set B:
def res():
fset = set(fk())
pset = set(pk())
print('items in F that are missing from P:')
print(fset - pset)
Related
I started out with a 4d list, something like
tokens = [[[["a"], ["b"], ["c"]], [["d"]]], [[["e"], ["f"], ["g"]],[["h"], ["i"], ["j"], ["k"], ["l"]]]]
So I converted this to a csv file using the code
import csv
def export_to_csv(tokens):
csv_list = [["A", "B", "C", word]]
for h_index, h in enumerate(tokens):
for i_index, i in enumerate(h):
for j_index, j in enumerate(i):
csv_list.append([h_index, i_index, j_index, j])
with open('TEST.csv', 'w') as f:
# using csv.writer method from CSV package
write = csv.writer(f)
write.writerows(csv_list)
But now I want to do the reverse process, want to convert a csv file obtained in this format, back to the list format mentioned above.
Assuming you wanted your csv file to look something like this (there were a couple typos in the posted code):
A,B,C,word
0,0,0,a
0,0,1,b
0,0,2,c
...
here's one solution:
import csv
def import_from_csv(filename):
retval = []
with open(filename) as fh:
reader = csv.reader(fh)
# discard header row
next(reader)
# process data rows
for (x,y,z,word) in reader:
x = int(x)
y = int(y)
z = int(z)
retval.extend([[[]]] * (x + 1 - len(retval)))
retval[x].extend([[]] * (y + 1 - len(retval[x])))
retval[x][y].extend([0] * (z + 1 - len(retval[x][y])))
retval[x][y][z] = [word]
return retval
def import_from_csv(file):
import ast
import csv
data = []
# Read the CSV file
with open(file) as fp:
reader = csv.reader(fp)
# Skip the first line, which contains the headers
next(reader)
for line in reader:
# Read the first 3 elements of the line
a, b, c = [int(i) for i in line[:3]]
# When we read it back, everything comes in as strings. Use
# `literal_eval` to convert it to a Python list
value = ast.literal_eval(line[3])
# Extend the list to accomodate the new element
data.append([[[]]]) if len(data) < a + 1 else None
data[a].append([[]]) if len(data[a]) < b + 1 else None
data[a][b].append([]) if len(data[a][b]) < c + 1 else None
data[a][b][c] = value
return data
# Test
assert import_from_csv("TEST.csv") == tokens
First, I'd make writing this construction in a CSV format independent from dimensions:
import csv
def deep_iter(seq):
for i, val in enumerate(seq):
if type(val) is list:
for others in deep_iter(val):
yield i, *others
else:
yield i, val
with open('TEST.csv', 'w') as f:
csv.writer(f).writerows(deep_iter(tokens))
Next, we can use the lexicographic order of the indices to recreate the structure. All we have to do is sequentially move deeper into the output list according to the indices of a word. We stop at the penultimate index to get the last list, because the last index is pointing only at the place of the word in this list and doesn't matter due to the natural ordering:
with open('TEST.csv', 'r') as f:
rows = [*csv.reader(f)]
res = []
for r in rows:
index = r[:-2] # skip the last index and word
e = res
while index:
i = int(index.pop(0)) # get next part of a current index
if i < len(e):
e = e[i]
else:
e.append([]) # add new record at this level
e = e[-1]
e.append(r[-1]) # append the word to the corresponding list
def pk():
with open('doc.csv') as csv_file:
lines = csv_file.readlines()
for line in lines[1::]:
array = line.split(',')
list_pk = array[1].replace('"', '')
# print(list_pk)
return list_pk
print(pk())
I wonder why when I print list_pk just right in front of return it prints all extracted data, but when I print pk() it prints only the first row. What should I do to get the list of data when I print pk()?
You are currently returning your list after only a single run through your for loop. The return statement breaks out of loops. You could accumulate results in your for loop and then move the return statement to after it:
def pk():
with open('doc.csv') as csv_file:
lines = csv_file.readlines()
full_list = []
for line in lines[1::]:
array = line.split(',')
list_pk = array[1].replace('"', '')
full_list.append(list_pk) # accumulate results
return full_list # return the full result
Goal: Return a list of the names and sequences
def ReadFastaFile(filename):
fileObj = open(filename, 'r')
sequences = []
seqFragments = []
for line in fileObj:
if line.startswith('>'):
if seqFragments:
sequence = ''.join(seqFragments)
sequences.append(sequence)
seqFragments = []
else:
seq = line.rstrip()
seqFragments.append(seq)
if seqFragments:
sequence = ''.join(seqFragments)
sequences.append(sequence)
fileObj.close()
return sequences
I want to get list with name and sequence
This code gives me a list with only the sequence, because i first thought i would not need the name for what I want to do. But now i realized that it would be good to also include the names. Maybe if possible also in a dictionary form, so that it is like: dict = {'name':sequence}. Does somebody have I idea how to alter the code to achieve this?
This should be pretty straightforward:
def ReadFastaFile(filename):
fileObj = open(filename, 'r')
sequences = dict()
seqFragments = []
for line in fileObj:
if line.startswith('>'):
if seqFragments:
sequence = ''.join(seqFragments)
sequences[id] = sequence
seqFragments = []
id = line.rstrip()[1:]
else:
seq = line.rstrip()
seqFragments.append(seq)
if seqFragments:
sequence = ''.join(seqFragments)
sequences[id] = sequence
fileObj.close()
return sequences
I'd like to join every 4th line together so I thought something like this would work:
import csv
filename = "mycsv.csv"
f = open(filename, "rb")
new_csv = []
count = 1
for i, line in enumerate(file(filename)):
line = line.rstrip()
print line
if count % 4 == 0:
new_csv.append(old_line_1 + old_line_2 + old_line_3+line)
else:
old_line_1 = line[i-2]
old_line_2 = line[i-1]
old_line_3 = line
count += 1
print new_csv
But line[i-1] and line[i-2] does not take current line -1 and -2 as I thought. So how can I access current line -1 and -2?
The variable line contains only the line for the current iteration, so accessing line[i-1] will only give you one character within the current line. The other answer is probably the tersest way to put it but, building on your code, you could do something like this instead:
import csv
filename = "mycsv.csv"
with open(filename, "rb") as f:
reader = csv.reader(f)
new_csv = []
lines = []
for i, line in enumerate(reader):
line = line.rstrip()
lines.append(line)
if (i + 1) % 4 == 0:
new_csv.append("".join(lines))
lines = []
print new_csv
This should do as you require
join_every_n = 4
all_lines = [line.rstrip() for line in file(filename)] # note the OP uses some unknown func `file` here
transposed_lines = zip(*[all_lines[n::join_every_n] for n in range(join_every_n)])
joined = [''.join([l1,l2,l3,l4]) for (l1,l2,l3,l4) in transposed_lines]
likewise you could also do
joined = map(''.join, transposed_lines)
Explanation
This will return every i'th element in a your_list with an offset of n
your_list[n::i]
Then you can combine this across a range(4) to generate for every 4 lines in a list such that you get
[[line0, line3, ...], [line1, line4, ...], [line2, line6, ...], [line3, line7, ...]]
Then the transposed_lines is required to transpose this array so that it becomes like
[[line0, line1, line2, line3], [line4, line5, line6, line7], ...]
Now you can simple unpack and join each individual list element
Example
all_lines = map(str, range(100))
transposed_lines = zip(*[all_lines[n::4] for n in range(4)])
joined = [''.join([l1,l2,l3,l4]) for (l1,l2,l3,l4) in transposed_lines]
gives
['0123',
'4567',
'891011',
...
So I have a text file consisting of one column, each column consist two numbers
190..255
337..2799
2801..3733
3734..5020
5234..5530
5683..6459
8238..9191
9306..9893
I would like to discard the very 1st and the very last number, in this case, 190 and 9893.
and basically moves the rest of the numbers one spot forward. like this
My desired output
255..337
2799..2801
3733..3734
5020..5234
5530..5683
6459..8238
9191..9306
I hope that makes sense I'm not sure how to approach this
lines = """190..255
337..2799
2801..3733"""
values = [int(v) for line in lines.split() for v in line.split('..')]
# values = [190, 255, 337, 2799, 2801, 3733]
pairs = zip(values[1:-1:2], values[2:-1:2])
# pairs = [(255, 337), (2799, 2801)]
out = '\n'.join('%d..%d' % pair for pair in pairs)
# out = "255..337\n2799..2801"
Try this:
with open(filename, 'r') as f:
lines = f.readlines()
numbers = []
for row in lines:
numbers.extend(row.split('..'))
numbers = numbers[1:len(numbers)-1]
newLines = ['..'.join(numbers[idx:idx+2]) for idx in xrange(0, len(numbers), 2]
with open(filename, 'w') as f:
for line in newLines:
f.write(line)
f.write('\n')
Try this:
Read all of them into one list, split each line into two numbers, so you have one list of all your numbers.
Remove the first and last item from your list
Write out your list, two items at a time, with dots in between them.
Here's an example:
a = """190..255
337..2799
2801..3733
3734..5020
5234..5530
5683..6459
8238..9191
9306..9893"""
a_list = a.replace('..','\n').split()
b_list = a_list[1:-1]
b = ''
for i in range(len(a_list)/2):
b += '..'.join(b_list[2*i:2*i+2]) + '\n'
temp = []
with open('temp.txt') as ofile:
for x in ofile:
temp.append(x.rstrip("\n"))
for x in range(0, len(temp) - 1):
print temp[x].split("..")[1] +".."+ temp[x+1].split("..")[0]
x += 1
Maybe this will help:
def makeColumns(listOfNumbers):
n = int()
while n < len(listOfNumbers):
print(listOfNumbers[n], '..', listOfNumbers[(n+1)])
n += 2
def trim(listOfNumbers):
listOfNumbers.pop(0)
listOfNumbers.pop((len(listOfNumbers) - 1))
listOfNumbers = [190, 255, 337, 2799, 2801, 3733, 3734, 5020, 5234, 5530, 5683, 6459, 8238, 9191, 9306, 9893]
makeColumns(listOfNumbers)
print()
trim(listOfNumbers)
makeColumns(listOfNumbers)
I think this might be useful too. I am reading data from a file name list.
data = open("list","r")
temp = []
value = []
print data
for line in data:
temp = line.split("..")
value.append(temp[0])
value.append(temp[1])
for i in range(1,(len(value)-1),2):
print value[i].strip()+".."+value[i+1]
print value
After reading the data I split and store it in the temporary list.After that, I copy data to the main list value which have all of the data.Then I iterate from the second element to second last element to get the output of interest. strip function is used in order to remove the '\n' character from the value.
You can later write these values to a file Instead of printing out.