Getting 'coordinates' of a csv list - python

I have a CSV file with values that can change and the file can be appended using a module I made. I want to make a module that can search whether a value is contained in the file and its location in the file.
What i have right now is:
import csv
def GET_ROW_COUNT():
with open('battle_royale.csv', 'r') as source:
battleRoyaleData = csv.reader(source, delimiter=',')
row_count = sum(1 for row in battleRoyaleData)
return row_count
def DISPLAY_PLAYERS():
with open('battle_royale.csv', 'r') as source:
battleRoyaleData = csv.reader(source, delimiter=',')
for row in battleRoyaleData:
print(row)
def WRITE_PLAYER(avatarName, name):
csv_list = []
rowCount = GET_ROW_COUNT()
with open('battle_royale.csv', 'r') as source:
battleRoyaleData = csv.reader(source, delimiter=',')
for row in battleRoyaleData:
csv_list.append(row)
csv_list.append([f"'{avatarName}'", f"'{name}'", f"'{rowCount}'"])
with open('battle_royale.csv', 'w', newline='') as csvfile:
newWrite = csv.writer(csvfile, delimiter=',')
newWrite.writerows(csv_list)
I'm thinking I would use
data = []
with open('battle_royale.csv', 'r') as source:
battleRoyaleData = csv.reader(source, delimiter=',')
for row in battleRoyaleData:
row = [t for t in row]
data.append(row)
and then include like
if (value) in data:
coordinates = #some way to get the position of the value in the list from index
----Edit----
How can I get the position of a value and know whether the value exists in a list?

As I can see, You trying to implement CRUD operations for csv files.
Create
Read
Update
Delete
For this purpose, you can use sqlite3 database and simple SQL queries.
It won't be harder than your idea with csv.
https://docs.python.org/3/library/sqlite3.html
It also has a nice client to interact with the data.
https://sqlitebrowser.org/

Related

how to replace blank cell with previous cell value use python

I want to replace blank cell to previous value in python.
Example:
data = [AAA, , ,BBB,CCC, ,DDD]
expected data = [AAA,AAA,AAA,BBB,CCC,CCC,DDD]
import csv
filename = 'some.csv'
with open(filename, newline='') as f:
reader = csv.reader(f)
rows = list(csv_reader)
print(rows)
Try this:
Before running the code, the image of CSV file:
The code:
import csv
filename = 'Book1.csv'
with open(filename, newline='') as f:
reader = csv.reader(f)
rows = list(reader)
with open(filename,'w',newline='') as file:
csvwriter = csv.writer(file)
previous=rows[0]
for row in rows:
if row==[]:
csvwriter.writerow(previous)
else:
csvwriter.writerow(row)
previous=row
After running the code:

Compare 2 different csv files and output all the changes into a new csv

I have 2 CSVs which are New.csv and Old.csv shown below:
Old.csv
longName,shortName,eventType,number,severity
ACTAGENT201,ACAT201,RES,1,INFO
ACTAGENT202,ACAT202,RES,2,ALERT
ACODE801,AC801,ADMIN,1,MINOR
ACODE802,AC802,ADMIN,2,MINOR
ACODE102,AC102,COMM,2,CRITICAL
ACODE103,AC103,COMM,3,CRITICAL
ACODE104,AC104,COMM,4,CRITICAL
ACODE105,AC105,COMM,5,CRITICAL
ACODE106,AC106,COMM,6,CRITICAL
New.csv
longName,shortName,eventType,number,severity
ACTAGENT201,ACAT201,RES,1,INFO
ACTAGENT202,ACAT202,RES,2,ALERT
ACODE801,AC801,ADMIN,1,MINOR
ACODE802,AC802,ThisHasBeenChanged,2,MINOR
ACODE102,AC102,COMM,2,CRITICAL
ACODE103,AC103,COMM,3,CRITICAL
ACODE104,AC104,COMM,4,THISHASBEENCHANGED
ACODE105,AC105,COMM,5,CRITICAL
ACODE106,AC106,COMM,6,CRITICAL
If there is data in one of the columns in the row that has been modified/changed between the old.csv and the new.csv then that whole row should be appended to the changes.csv like this with each column from old.csv and new.csv beside each other:
I know how to find new and deleted items in the csv, but could not figure out how to get the modified items. Code below:
import csv
def DeletedItems(old_csv, new_csv, changes_csv):
with open(new_csv, newline="", encoding="utf8") as new_fp:
csv_reader = csv.reader(new_fp)
csv_headings = next(csv_reader)
new_long_names = {row[0] for row in csv.reader(new_fp)}
with open(old_csv, newline="", encoding="utf8") as old_fp:
with open(changes_csv, "a", newline="", encoding="utf8") as changes_fp:
writer = csv.writer(changes_fp)
writer.writerow("")
for row in csv.reader(old_fp):
if row[0] not in new_long_names:
writer.writerow(row)
def NewItems(old_csv, new_csv, changes_csv):
with open(old_csv, newline="", encoding="utf8") as old_fp:
csv_reader = csv.reader(old_fp)
csv_headings = next(csv_reader)
old_long_names = {row[0] for row in csv.reader(old_fp)}
with open(new_csv, newline="", encoding="utf8") as new_fp:
with open(changes_csv, "w", newline="", encoding="utf8") as changes_fp:
writer = csv.writer(changes_fp)
for row in csv.reader(new_fp):
if row[0] not in old_long_names:
writer.writerow(row)
NewItems("old.csv", "new.csv", "changes.csv")
DeletedItems("old.csv", "new.csv", "changes.csv")
First, read both CSV files into a dictionary, using the longName values as keys.
import csv
with open(old_csv_file, "r") as fh:
reader = csv.reader(fh)
old_csv = {row[0]: row for row in reader}
with open(new_csv_file, "r") as fh:
reader = csv.reader(fh)
new_csv = {row[0]: row for row in reader}
Then, it's easy to find newly added and deleted keys using set operations.
old_longNames = set(old_csv.keys())
new_longNames = set(new_csv.keys())
# common: set intersection
common_longNames = old_longNames.intersection(new_longNames)
# removed: whatever's in old but not in new
removed_longNames = old_longNames - new_longNames
# added: whatever's in new but not in old
added_longNames = new_longNames - old_longNames
Finally, iterate over the common set to find where there are changes:
changed_longNames = []
for key in common_longNames:
old_row = old_csv[key]
new_row = new_csv[key]
# if any(o != n for o, n in zip(old_row, new_row)):
if old_row != new_row:
# this row has at least one column changed. Do whatever
print(f"LongName {key} has changes")
changed_longNames.append(key)
Or, as a list comprehension:
changed_longNames = [key for key in common_longNames if old_csv[key] != new_csv[key]]
Writing everything to a new csv file is also fairly trivial. Note that the sets don't preserve the order, so you might not get the result in the same order.
with open("deleted.csv", "w") as fh:
writer = csv.writer(fh)
for key in removed_longNames:
writer.writerow(old_csv[key])
with open("inserted.csv", "w") as fh:
writer = csv.writer(fh)
for key in added_longNames:
writer.writerow(new_csv[key])
with open("changed.csv", "w") as fh:
writer = csv.writer(fh)
for key in changed_longNames:
old_row = old_csv[key]
new_row = new_csv[key]
merged_row = []
for oi, ni in zip(old_row, new_row):
merged_row.append(oi)
merged_row.append(ni)
writer.writerow(merged_row)

How to read csv file with emails and passwords and save results in dictionary?

Maybe a simple one, but I cant get it to work by googling.
I have a csv file with Emails in Column A and Passwords in column b.
I want to save those in a dict {Email:Password}.
The code I have so far:
f = open('email_list_test.csv', 'r')
with f:
reader = csv.DictReader(f)
for row in reader:
print(row['Email'], row['Password'])
But it is not doing anything. Ideally I would just have a dict, that I can then iterate through later, as I want to pass those email password combinations on to a function.
My code now:
dict = {}
f = open('email_list_test.csv', 'r')
with f:
reader = csv.DictReader(f)
for row in reader:
dict[row['Email']] = row['Password']
print(dict)
Sadly the dict is empty, even though I have a few test records in my csv.
Try this:
credentials = {}
with open('email_list_test.csv', 'r') as f:
reader = csv.DictReader(f, delimiter=';') # replace the delimiter character with the one you're using.
for row in reader:
credentials.update(row)
Easy try this:
my_dict = {}
with open('email_list_test.csv', 'r') as f:
reader = csv.DictReader(f)
for row in reader:
my_dict[row['Email']] = row['Password']
print(my_dict )

how to access rows in csv file

im trying to get the value of the first and the thirs row in a csv file.
my approach gives me the first and the 3rd character of the first row. instead of the fields in row 1 and 3. Would be great if someone could give me a tipp what im doing wrong!
lang_tags = []
tweets = []
#open and read csv file
with open("tweet-corpus.csv", "r") as csv_file:
reader = csv.DictReader(csv_file)
for row in csv_file:
lang_tags = row[0]
tweets = row[2]
for lan in lang_tags:
print("lang: ", lang_tags)
print("tweet: ", tweets)
Use the csv reader object.
Ex:
with open("tweet-corpus.csv", "r") as csv_file:
reader = csv.reader(csv_file)
for row in reader:
lang_tags = row[0]
or
with open("tweet-corpus.csv", "r") as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
lang_tags = row['YOURCOL_NAME']
tweets = row['YOURCOL_NAME']
If your data looks anything remotely like:
col_name0, col_name1, col_name2, ...
value0, value1, value2, ...
value0, value1, value2, ...
I recommend using pandas.
import pandas as pd # by convention, we always import pandas as pd
df = pd.read_csv(filename)
column = df[column_name]

Replacing specific data in a csv file

I'm currently in the process of producing a quiz as a competition between me and my friends, and to learn a bit more about programming which I am relatively new to. My program is intended to keep the last 3 results for each user that uses the quiz and replaces the oldest result with the newest. The current stage I have reached is being able to check if the user has their name in the file, and if not writes to the file as normal.
if team == 'Team 1':
path = 'team1scores.csv'
elif team == 'Team 2':
path = 'team2scores.csv'
elif team == 'Team 3':
path = 'team3scores.csv'
else:
print("--Error Defining File Path--")
with open(path, 'rt') as csvfile:
ver_read = csv.reader(csvfile, delimiter =",")
ver_write = csv.writer(csvfile, delimiter =",")
for row in ver_read:
if user in row:
row_data = list(ver_read)
row_len = len(row_data)
if row_len >= 3:
>>> The Problem is here
else:
with open(path, 'a+', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerows(datacsv)
The problem I have with the program is being able to replace the result, say I had the data below in my csv file with 3 inputs already. These need to be kept in two different columns. As I plan to have a sorting feature included.
Jake,5
Jake,7
Jake,2
Max,9
Lee,8
I have experimented several times with the basis of the code above but I am confused once the program reaches the situation of replacing the information. So far I have been able to overwrite the entire file but not specific pieces of data.
Will the ver_write be neccessary in the next steps?
Edit:
I now have an updated version but still have the same problem, This program is adapted from 2ps's answer to fit into my criteria. It still needs to overwrite and needs to print to two different cells for the name and the score. The basis is there for what I need but it won't work.
from collections import OrderedDict
user_data = OrderedDict()
data_to_write = []
with open(path, 'r+') as csvfile:
ver_read = csv.reader(csvfile, delimiter =";")
for x, row in enumerate(ver_read):
if user == row[0]:
user_data[x] = row
else:
data_to_write.append(row)
if len(user_data) > 2:
keys = user_data.keys()[-2:]
for x in keys:
data_to_write.append(user_data[x])
data_to_write.append(datacsv)
with open(path, 'w') as csvfile:
ver_write = csv.writer(csvfile, delimiter=",")
ver_write.writerows(data_to_write)
else:
with open(path, 'a+', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',')
csvwriter.writerows(datacsv)
Am I doing something fundamentally wrong here?
As far as I know, you cannot change one row in a file. So you'll have to rewrite the complete file.
I do not know how you insert new data, but you could do the following:
import csv
# Assuming new score
new_score = ['Jake', '3']
# Open the file containing the scores
with open('scores.csv', 'r') as csvfile:
ver_read = csv.reader(csvfile, delimiter=',')
# Make a dict that will contain the scores per person
names = {}
for row in ver_read:
# Grab the name and the score
name,score = list(row)
# If it's not in names yet, put it in and make it a list
if name not in names:
names[name] = []
# Append the score to the name list
names[name].append(score)
# Add the new score
names[new_score[0]].append(new_score[1])
with open('scores.csv', 'w') as csvfile:
# Loop the names in the names dict
for name in names:
# If the person has more than 3 scores, only take the last 3
if len(names[name]) > 3:
names[name] = names[name][-3:]
# For each score, print it
for score in names[name]:
print('{},{}'.format(name, score))
#ver_write.writerow([name, score])
In:
Jake,5
Jake,7
Jake,2
Max,9
Lee,8
New score:
Jake,3
Out:
Jake,7
Jake,2
Jake,3
Max,9
Lee,8
from collections import OrderedDict
user_data = OrderedDict() # dict to hold all matching rows for the user, keyed off of line number
data_to_write = []
with open(path, 'r') as csvfile: # read-write mode for file
ver_read = csv.reader(csvfile, delimiter =",")
for x, row in enumerate(ver_read):
if user == row[0]:
user_data[x] = row
else:
data_to_write.append(row) # store it for afterwards
if len(user_data) >= 3:
keys = user_data.keys()[-2:] # Grab the last two scores in the file
for x in keys:
data_to_write.append(user_data[x])
# Write the contents of the new score here:
data_to_write.append(. . . . .)
with open(path, 'w') as csvfile:
# finally write the changes to file
ver_write = csv.writer(csvfile, delimiter=",")
ver_write.writerows(data_to_write)
You could try something like this maybe:
data_to_write = []
with open(path, 'r+') as csvfile: # read-write mode for file
ver_read = csv.reader(csvfile, delimiter =",")
row_data = list(ver_read)
for row in row_data:
if user in row:
if row_data.index(row) >= 3: # if we found the user in a row after 3
row = [] # change the information here to whatever suits you
else:
row = [] # or here depending on your logic
data_to_write.append(row) # store it for afterwards
# finally write the changes to file
ver_write = csv.writer(csvfile, delimiter=",")
ver_write.writerows(data_to_write)

Categories