I want to replace blank cell to previous value in python.
Example:
data = [AAA, , ,BBB,CCC, ,DDD]
expected data = [AAA,AAA,AAA,BBB,CCC,CCC,DDD]
import csv
filename = 'some.csv'
with open(filename, newline='') as f:
reader = csv.reader(f)
rows = list(csv_reader)
print(rows)
Try this:
Before running the code, the image of CSV file:
The code:
import csv
filename = 'Book1.csv'
with open(filename, newline='') as f:
reader = csv.reader(f)
rows = list(reader)
with open(filename,'w',newline='') as file:
csvwriter = csv.writer(file)
previous=rows[0]
for row in rows:
if row==[]:
csvwriter.writerow(previous)
else:
csvwriter.writerow(row)
previous=row
After running the code:
I have a CSV file with values that can change and the file can be appended using a module I made. I want to make a module that can search whether a value is contained in the file and its location in the file.
What i have right now is:
import csv
def GET_ROW_COUNT():
with open('battle_royale.csv', 'r') as source:
battleRoyaleData = csv.reader(source, delimiter=',')
row_count = sum(1 for row in battleRoyaleData)
return row_count
def DISPLAY_PLAYERS():
with open('battle_royale.csv', 'r') as source:
battleRoyaleData = csv.reader(source, delimiter=',')
for row in battleRoyaleData:
print(row)
def WRITE_PLAYER(avatarName, name):
csv_list = []
rowCount = GET_ROW_COUNT()
with open('battle_royale.csv', 'r') as source:
battleRoyaleData = csv.reader(source, delimiter=',')
for row in battleRoyaleData:
csv_list.append(row)
csv_list.append([f"'{avatarName}'", f"'{name}'", f"'{rowCount}'"])
with open('battle_royale.csv', 'w', newline='') as csvfile:
newWrite = csv.writer(csvfile, delimiter=',')
newWrite.writerows(csv_list)
I'm thinking I would use
data = []
with open('battle_royale.csv', 'r') as source:
battleRoyaleData = csv.reader(source, delimiter=',')
for row in battleRoyaleData:
row = [t for t in row]
data.append(row)
and then include like
if (value) in data:
coordinates = #some way to get the position of the value in the list from index
----Edit----
How can I get the position of a value and know whether the value exists in a list?
As I can see, You trying to implement CRUD operations for csv files.
Create
Read
Update
Delete
For this purpose, you can use sqlite3 database and simple SQL queries.
It won't be harder than your idea with csv.
https://docs.python.org/3/library/sqlite3.html
It also has a nice client to interact with the data.
https://sqlitebrowser.org/
I have 2 CSVs which are New.csv and Old.csv shown below:
Old.csv
longName,shortName,eventType,number,severity
ACTAGENT201,ACAT201,RES,1,INFO
ACTAGENT202,ACAT202,RES,2,ALERT
ACODE801,AC801,ADMIN,1,MINOR
ACODE802,AC802,ADMIN,2,MINOR
ACODE102,AC102,COMM,2,CRITICAL
ACODE103,AC103,COMM,3,CRITICAL
ACODE104,AC104,COMM,4,CRITICAL
ACODE105,AC105,COMM,5,CRITICAL
ACODE106,AC106,COMM,6,CRITICAL
New.csv
longName,shortName,eventType,number,severity
ACTAGENT201,ACAT201,RES,1,INFO
ACTAGENT202,ACAT202,RES,2,ALERT
ACODE801,AC801,ADMIN,1,MINOR
ACODE802,AC802,ThisHasBeenChanged,2,MINOR
ACODE102,AC102,COMM,2,CRITICAL
ACODE103,AC103,COMM,3,CRITICAL
ACODE104,AC104,COMM,4,THISHASBEENCHANGED
ACODE105,AC105,COMM,5,CRITICAL
ACODE106,AC106,COMM,6,CRITICAL
If there is data in one of the columns in the row that has been modified/changed between the old.csv and the new.csv then that whole row should be appended to the changes.csv like this with each column from old.csv and new.csv beside each other:
I know how to find new and deleted items in the csv, but could not figure out how to get the modified items. Code below:
import csv
def DeletedItems(old_csv, new_csv, changes_csv):
with open(new_csv, newline="", encoding="utf8") as new_fp:
csv_reader = csv.reader(new_fp)
csv_headings = next(csv_reader)
new_long_names = {row[0] for row in csv.reader(new_fp)}
with open(old_csv, newline="", encoding="utf8") as old_fp:
with open(changes_csv, "a", newline="", encoding="utf8") as changes_fp:
writer = csv.writer(changes_fp)
writer.writerow("")
for row in csv.reader(old_fp):
if row[0] not in new_long_names:
writer.writerow(row)
def NewItems(old_csv, new_csv, changes_csv):
with open(old_csv, newline="", encoding="utf8") as old_fp:
csv_reader = csv.reader(old_fp)
csv_headings = next(csv_reader)
old_long_names = {row[0] for row in csv.reader(old_fp)}
with open(new_csv, newline="", encoding="utf8") as new_fp:
with open(changes_csv, "w", newline="", encoding="utf8") as changes_fp:
writer = csv.writer(changes_fp)
for row in csv.reader(new_fp):
if row[0] not in old_long_names:
writer.writerow(row)
NewItems("old.csv", "new.csv", "changes.csv")
DeletedItems("old.csv", "new.csv", "changes.csv")
First, read both CSV files into a dictionary, using the longName values as keys.
import csv
with open(old_csv_file, "r") as fh:
reader = csv.reader(fh)
old_csv = {row[0]: row for row in reader}
with open(new_csv_file, "r") as fh:
reader = csv.reader(fh)
new_csv = {row[0]: row for row in reader}
Then, it's easy to find newly added and deleted keys using set operations.
old_longNames = set(old_csv.keys())
new_longNames = set(new_csv.keys())
# common: set intersection
common_longNames = old_longNames.intersection(new_longNames)
# removed: whatever's in old but not in new
removed_longNames = old_longNames - new_longNames
# added: whatever's in new but not in old
added_longNames = new_longNames - old_longNames
Finally, iterate over the common set to find where there are changes:
changed_longNames = []
for key in common_longNames:
old_row = old_csv[key]
new_row = new_csv[key]
# if any(o != n for o, n in zip(old_row, new_row)):
if old_row != new_row:
# this row has at least one column changed. Do whatever
print(f"LongName {key} has changes")
changed_longNames.append(key)
Or, as a list comprehension:
changed_longNames = [key for key in common_longNames if old_csv[key] != new_csv[key]]
Writing everything to a new csv file is also fairly trivial. Note that the sets don't preserve the order, so you might not get the result in the same order.
with open("deleted.csv", "w") as fh:
writer = csv.writer(fh)
for key in removed_longNames:
writer.writerow(old_csv[key])
with open("inserted.csv", "w") as fh:
writer = csv.writer(fh)
for key in added_longNames:
writer.writerow(new_csv[key])
with open("changed.csv", "w") as fh:
writer = csv.writer(fh)
for key in changed_longNames:
old_row = old_csv[key]
new_row = new_csv[key]
merged_row = []
for oi, ni in zip(old_row, new_row):
merged_row.append(oi)
merged_row.append(ni)
writer.writerow(merged_row)
Maybe a simple one, but I cant get it to work by googling.
I have a csv file with Emails in Column A and Passwords in column b.
I want to save those in a dict {Email:Password}.
The code I have so far:
f = open('email_list_test.csv', 'r')
with f:
reader = csv.DictReader(f)
for row in reader:
print(row['Email'], row['Password'])
But it is not doing anything. Ideally I would just have a dict, that I can then iterate through later, as I want to pass those email password combinations on to a function.
My code now:
dict = {}
f = open('email_list_test.csv', 'r')
with f:
reader = csv.DictReader(f)
for row in reader:
dict[row['Email']] = row['Password']
print(dict)
Sadly the dict is empty, even though I have a few test records in my csv.
Try this:
credentials = {}
with open('email_list_test.csv', 'r') as f:
reader = csv.DictReader(f, delimiter=';') # replace the delimiter character with the one you're using.
for row in reader:
credentials.update(row)
Easy try this:
my_dict = {}
with open('email_list_test.csv', 'r') as f:
reader = csv.DictReader(f)
for row in reader:
my_dict[row['Email']] = row['Password']
print(my_dict )
How can I export my scraped data into a CSV file. My code below prints out all of the data correctly, but I would like to export it the same way to a CSV file, line by line.
How can I write the data to a csv file?
import requests
import json
import csv
with open('Links.csv', 'r') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in readCSV:
data = row[0]
for b in row:
r = requests.get(b)
json_object = json.loads('{"data":%s}}' % (r.content.decode("utf-8").replace("jQuery111002521088376353553_1491736907010(", "")[:-2].replace("\'", "")))
for game in json_object["data"]["docs"]:
print ("Name: %s, Price: %s, CatalogId: %s, slug: %s" % (game["name"], game["minPrice"], game["catalogId"], game["slug"]))
You can write the data to a row in a csv file like:
writeCSV.writerow([game["name"], game["minPrice"], game["catalogId"], game["slug"]])
Here is this added to your code, with the init code needed:
import csv
import json
import requests
with open('Links.csv', 'r') as r_csvfile, open('outp.csv', 'w') as w_csvfile:
readCSV = csv.reader(r_csvfile, delimiter=',', quotechar='"')
writeCSV = csv.writer(w_csvfile, delimiter=',', quotechar='"')
writeCSV.writerow("Name Price CatalogId slug".split())
for row in readCSV:
data = row[0]
for b in row:
r = requests.get(b)
json_object = json.loads('{"data":%s}}' % (
r.content.decode("utf-8").replace(
"jQuery111002521088376353553_1491736907010(", "")[:-2]
.replace("\'", "")))
for game in json_object["data"]["docs"]:
writeCSV.writerow([game["name"], game["minPrice"],
game["catalogId"], game["slug"]])
You did not give any actual data, so I could not test this, but it will be close.
I think pandas is the package you‘re looking for.
Use pandas.dataframe.from_dict or pandas.dataframe.from_json. Once you have your pandas dataframe writing a csv file is as easy as it gets.
https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_csv.html