How to save multiple csv files using to_csv in python? - python

Need to create multiple CSV files at the location
csv_data="D:\Desktop\DS\DSDA\Assignment\"
for a in range(60):
csv_data = csv_data + str(a)
csv_data = DataFrameDict[a].to_csv(csv_data.csv, index = False, header=True)`
csv_data = ""
I have tried:
file.with_suffix('.csv'), index = False) also didnt work

First/better path for me would be to use pathlib; however, let's stick to ur question. I would suggest adding the csv to the string addition:
csv_data="D:\Desktop\DS\DSDA\Assignment\"
for a in range(60):
#add the '.csv' here
csv_data = csv_data + str(a)+'.csv'
csv_data = DataFrameDict[a].to_csv(csv_data, index = False, header=True)`
#what is this line for?
csv_data = ""
let's see if it works

This code is able to create 60 CSV files using one dataframe:
csv_data="D:\Desktop\DS\DSDA\Assignment\"
for a in range(60):
csv_data = csv_data + str(a)
csv_data = DataFrameDict[a].to_csv(csv_data+str(".csv"), index = False, header=True)
csv_data = ""

Related

Split large DataFrame into Dataframes containing records of unique values in a column

A csv file has 90 million rows. One of the Columns in named "State". It has 12 unique values at present. (The count of unique values in the "State" column is dynamic and can change with each csv file.)
I want to split the DataFrame into smaller chunks and then save State-wise files.
The code below is not working.
source_path = "DataJune.txt"
for i,chunk in enumerate(pd.read_csv(source_path, sep = '|',chunksize=1000000)):
dfs = dict(tuple(chunk.groupby('State')))
for i, df in dfs.items():
df = df.append(df)
df.to_csv("tempcsv/" + i +".csv",sep=",", index = False)
IIUC, Try:
source_path = "DataJune.txt"
from collections import defaultdict
def def_value():
return pd.DataFrame()
# Defining the dict
d = defaultdict(def_value)
for i,chunk in enumerate(pd.read_csv(source_path, sep = '|',chunksize=2)):
chunk_states = chunk['State'].unique()
for state in chunk_states:
d[state]=d[state].append(chunk[chunk['State']==state])
for i, df in d.items():
df.to_csv("tempcsv/" + str(i) +".csv",sep=",", index = False)
Another version, based on the #Corralien comment:
source_path = "DataJune.txt"
for i,chunk in enumerate(pd.read_csv(source_path, sep = '|',chunksize=2)):
chunk_states = chunk['State'].unique()
for state in chunk_states:
with open("tempcsv/" + str(state) +".csv",mode='a+') as file:
for i, row in chunk[chunk['State']==state].iterrows():
file.write(','.join([str(x) for x in row]))
file.write('\n')
Another version:
source_path = "DataJune.txt"
from os.path import exists
import csv
for i,chunk in enumerate(pd.read_csv(source_path, sep = '|',chunksize=2)):
chunk_states = chunk['State'].unique()
for state in chunk_states:
path = "tempcsv/" + str(state) +".csv"
file_exists = exists(path)
if not file_exists:
with open(path,newline='',mode='a+') as file:
writer = csv.writer(file)
writer.writerow(chunk.columns)
print(chunk.columns)
with open(path,newline='',mode='a+') as file:
writer = csv.writer(file)
writer.writerows(chunk[chunk['State']==state].values)
You can use:
import pandas as pd
import os
source_path = 'DataJune.txt'
fps = {}
for chunk in pd.read_csv(source_path, sep='|', chunksize=1000000, dtype=object):
for state, df in chunk.groupby('State'):
# New state, create a new file and write headers
if state not in fps:
fps[state] = open(f'tempcsv/{state}.csv', 'w')
fps[state].write(f"{','.join(df.columns)}{os.linesep}")
# Write data without headers
df.to_csv(fps[state], index=False, header=False)
# Close files properly
for fp in fps.values():
fp.close()
del fps
Update
Try to replace:
# Write data without headers
df.to_csv(fps[state], index=False, header=False)
By
# Write data without headers
g = (row.strip() for row in df.to_csv(index=False, header=None, sep=',').split(os.linesep) if row)
print(*g, sep=os.linesep, file=fps[state])

How can I convert Cell of Openpyxl from Text to Number format?

I wrote a code to convert a text file into excel file using Openpyxl extension of Python.
Although the value are setting properly into the column but they are showing as a text instead of number. Although I tried to convert, seems like it is not working.
Can anyone please correct the code?
import csv
import openpyxl
import openpyxl as oxl
input_file = r'C:\Python\Test.txt'
output_file = r'C:\Python\Test.xlsx'
wb = oxl.Workbook()
ws = wb.active
ws.number_format = 'General'
ws.title = "Waveform"
#ws = wb.create_sheet(title='Waveform')
with open(input_file, 'r') as data:
reader = csv.reader(data, delimiter='\t')
for row in reader:
ws.append(row)
for row in range(2, ws.max_row+1):
ws["{}{}".format("A", row)].number_format = 'General'
ws["{}{}".format("B", row)].number_format = 'General'
wb.save(output_file)
Here is the output excel file
the read data from txt file will be in string. So, as suggested by jezza, you need to convert list to float. You don't need the 'number_format` lines you have. Updated code is here. Note that the conversion map assumes all data can be converted to float (no text). The try/catch will basically skip the row if there is text on any row
import csv
#import openpyxl
import openpyxl as oxl
input_file = r'C:\Python\Test.txt'
output_file = r'C:\Python\Test.xlsx'
wb = oxl.Workbook()
ws = wb.active
#ws.number_format = 'General'
ws.title = "Waveform"
#ws = wb.create_sheet(title='Waveform')
with open(input_file, 'r') as data:
reader = csv.reader(data, delimiter='\t')
for row in reader:
try:
row = list(map(float, row))
ws.append(row)
except:
print("Skipping row ", row)
pass
#for row in range(2, ws.max_row+1):
# ws["{}{}".format("A", row)].number_format = 'General'
# ws["{}{}".format("B", row)].number_format = 'General'
wb.save(output_file)
Output

Add CR & LF in all lines python

first of all, thanks to read my post. I hope you guys can help me, I'm really new in Python, sorry maybe the answer is really easy.
I read several posts to add [CR][LN] in all lines but the main issue I have in my script ( I don't create that ), is the need to integrate [CR][LN] in all the lines.
At the moment the script only adds [LN] but not the [CR]. The script goes to SQL to extract some tables, convert the information to CSV ( at this moment the information maintains [CR][LN] ), and after that convert to JSON ( in this step only give me the [LN].
import pyodbc
import fileinput
import csv
import pandas as pd
import json
import os
import sys
conn = pyodbc.connect('Driver={SQL Server};'
'Server=TEST;'
'UID=test;'
'PWD=12345;'
'Database=TEST;'
'Trusted_Connection=no;')
cursor = conn.cursor()
query = "SELECT * FROM placeholder"
with open(r"D:\Test.txt") as file:
lines = file.readlines()
print(lines)
for user_input in lines:
result = query.replace("placeholder", user_input)
print(result)
sql_query = pd.read_sql(result,conn)
df = pd.DataFrame(sql_query)
user_inputs = user_input.strip("\n")
filename = os.path.join('D:\\', user_inputs + '.csv')
df.to_csv (filename, index = False, encoding='utf-8', sep = '~', quotechar = "`", quoting=csv.QUOTE_ALL)
print(filename)
filename_json = os.path.join('D:\\', user_inputs + '.jsonl')
csvFilePath = (filename)
jsonFilePath = (filename_json)
print(filename_json)
df_o = df.astype(str)
df_o = df_o.applymap(lambda x: x.strip() if isinstance(x, str) else x)
df_o.to_json(filename_json, orient = "records", lines = bool, date_format = "iso", double_precision = 15, force_ascii = False, date_unit = 'ms', default_handler = str)
dir_name = "D:\\"
test = os.listdir(dir_name)
for item in test:
if item.endswith(".csv"):
os.remove(os.path.join(dir_name, item))
cursor.close()
conn.close()
So, I don't know where I need to add this instruction.
Again thanks so much for all you guys always helping me !!!
Kind regards.
pandas.DataFrame.to_json uses the newline rules of the underlying file object when writing records. If you pass in a file name, pandas will open the file in the default "\n" newline mode. Alternately, you could open the file yourself, choosing the newline policy you want.
import pandas as pd
df = pd.DataFrame([[1,2,3],[4,5,6],[7,8,9]])
df.to_json(open("test.json", "w", newline="\r\n"), orient="records", lines=True)
print(open("test.json", "rb").read())
Output
b'{"0":1,"1":2,"2":3}\r\n{"0":4,"1":5,"2":6}\r\n{"0":7,"1":8,"2":9}'
(Note also that lines should be True or False, not bool - which happens to be "truthy" so it works, but not correct).

Want to read an a file and write to existing excel sheet using python

[enter image description here][1]I have a text file with data delimited qith a pipe delimiter, i want to write it to an excel sheet.
I have created a code but it is taking around 700 seconds to execute since the data in the input file is large(about 45kb).
Is there any way to optimize it?
Thanks
Please find the code as follows:
import csv
from time import process_time
def create_sheet():
wb1 = Workbook()
wb1.create_sheet("A")
wb1.create_sheet("B")
src_sheet = wb1.create_sheet("C")
print(type(src_sheet))
wb1.create_sheet("D")
sheet = wb1['Sheet']
wb1.remove(sheet)
write_Data(src_sheet)
wb1.save('outfile.xlsx')
def write_Data(src_sheet):
csv.register_dialect('myDialect', delimiter='|', quoting=csv.QUOTE_ALL)
data_list = []
with open("C:/Users/atapadar/input_text.txt",
"r") as csvfile:
reader = csv.reader(csvfile, dialect='myDialect')
count: int = 1
for i in reader:
if count == 1:
i.append("New col")
else:
i.append(i[0] + i[1] + i[3])
count = count + 1
src_sheet.append(i)
t1_start = process_time()
create_sheet()
t1_stop = process_time()
print("Elapsed time during the whole program in seconds:", t1_stop - t1_start)
[1]: https://i.stack.imgur.com/X9iMQ.png
Doing this via dataframes would be much simpler:
df = pd.read_csv("filename.csv", sep="|")
df['New col'] = int(df['C1']) + int(df['C2']) + int(df['C3'])
df.to_excel("output.xlsx", sheet_name='Sheet_name_1')

Taking Same Worksheet from a Folder of xlsm Files with Python

I'm new to pandas/python and Ive come up with the following code to extract data from a specific part of a worksheet.
import openpyxl as xl
import pandas as pd
rows_with_data = [34,37,38,39,44,45,46,47,48,49, 50,54,55,57,58,59,60,62,63,64,65,66,70,71,72,76,77, 78,79,80,81,82,83,84,88,89,90,91,92]
path = r'XXX'
xpath = input('XXX')
file = r'**.xlsm'
xfile = input('Change file name, current is ' + file + ' :')
sheetname = r'Summary'
wb = xl.load_workbook(filename = xpath + '\\' +file, data_only = True)
sheet = wb.get_sheet_by_name(sheetname)
rows = len(rows_with_data)
line_items = []
for i in range(rows) :
line_items.append(sheet.cell(row = rows_with_data[i], column = 13).value)
period = []
for col in range(17,35):
period.append(sheet.cell(row = 20, column = col).value)
print(line_items)
vals = []
x = []
for i in range(rows):
if i != 0:
vals.append(x)
x = []
for col in range(17,35):
x.append(sheet.cell(row = rows_with_data[i], column = col).value)
vals.append(x)
all_values = {}
all_values['Period'] = period
for i in range(rows):
print(line_items[i])
all_values[line_items[i]] = vals[i]
print(all_values)
period_review = input('Enter a period (i.e. 2002): ')
item = input('Enter a period (i.e. XXX): ')
time = period.index(period_review)
display_item = str(all_values[item][time])
print(item + ' for ' + period_review + " is " + display_item)
Summary_Dataframe = pd.DataFrame(all_values)
writer = pd.ExcelWriter(xpath + '\\' + 'values.xlsx')
Summary_Dataframe.to_excel(writer,'Sheet1')
writer.save()
writer.close()
I have the same worksheet (summary results) across a library of 60 xlsm files and I'm having a hard time figuring out how to iterate this across the entire folder of files. I also want change this from extracting specific rows to taking the entire "Summary" worksheet, pasting it to the new file and naming the worksheet by its filename ("Experiment_A") when pasted to the new excel file. Any advice?
I was having hard time to read your code to understand that what you want to do finally. So it is just an advice not a solution. You can iterate through all files in the folder using os then read the files in to one dataframe then save the single big data frame in to csv. I usually avoid excel but I guess you need the excel conversion. In the example below I have read all txt file from a directory put them in to dataframe list then store the big data frame as json. You can also store it as excel/csv.
import os
import pandas as pd
def process_data():
# input file path in 2 part in case it is very long
input_path_1 = r'\\path\to\the\folder'
input_path_2 = r'\second\part\of\the\path'
# adding the all file path
file_path = input_path_1 + input_path_2
# listing all file in the file folder
file_list = os.listdir(os.path.join(file_path))
# selecting only the .txt files in to a list object
file_list = [file_name for file_name in file_list if '.txt' in file_name]
# selecting the fields we need
field_names = ['country', 'ticket_id']
# defining a list to put all the datafremes in one list
pd_list = []
inserted_files = []
# looping over txt files and storing in to database
for file_name in file_list:
# creating the file path to read the file
file_path_ = file_path + '\\' + file_name
df_ = pd.read_csv(os.path.join(file_path_), sep='\t', usecols=field_names)
# converting the datetime to date
# few internal data transformation example before writting
df_['sent_date'] = pd.to_datetime(df_['sent_date'])
df_['sent_date'] = df_['sent_date'].values.astype('datetime64[M]')
# adding each dataframe to the list
pd_list.append(df_)
# adding file name to the inserted list to print later
inserted_files.append(file_name)
print(inserted_files)
# sql like union all dataframes and create a single data source
df_ = pd.concat(pd_list)
output_path_1 = r'\\path\to\output'
output_path_2 = r'\path\to\output'
output_path = output_path_1 + output_path_2
# put the file name
file_name = 'xyz.json'
# adding the day the file processed
df_['etl_run_time'] = pd.to_datetime('today').strftime('%Y-%m-%d')
# write file to json
df_.to_json(os.path.join(output_path, file_name), orient='records')
return print('Data Stored as json successfully')
process_data()

Categories