I'm having 10 CSV files each of the CSV file is having same number of column from which I'm reading data one by one in the form of pandas data frame. I want those data to be displayed in a window or like in some table form. And it should be like if every time the data gets into new row. Any suggestions on this ?
Below is my sample CSV file :
Like this, there are 10 or more CSV file and I will be reading data from those file one by one and want to display in GUI.
Brief Introduction to my Application
I have a machine that is generating CSV files after a certain interval of time into a folder. I am using Watchdog library to put a watch on the folder where the CSV files are being generated. When I receive a CSV file I Read it into a pandas data frame. Sample CSV file is given above.
As far as the machine is running it will keep generating the CSV files. So if I want to see the data I need to open each and every CSV files, Instead, I want a View in which the Data gets updated when there is a new CSV file generated.
So Technically One CSV file is getting read gets converted into a data frame and then inserted into some sort of Table View. And this process happens again when a new CSV file is generated, But now the data should be kept in the next row of the same Table View.
Here is my main file :
import time
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
import pandas as pd
from Append_Function import append_df_to_excel
import os.path
import sys
class Watcher:
def __init__(self, args):
self.watch_dir = os.getcwd()
print(args[0])
self.directory_to_watch = os.path.join(self.watch_dir, args[1])
self.observer = Observer()
self.event_handler = Handler(patterns=["*.CSV"], ignore_patterns=["*.tmp"], ignore_directories=True)
def run(self):
self.observer.schedule(self.event_handler, self.directory_to_watch, recursive=False)
self.observer.start()
try:
while True:
time.sleep(1)
except:
self.observer.stop()
print("Error")
self.observer.join()
class Handler(PatternMatchingEventHandler):
#staticmethod
def on_any_event(event):
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
print("Received created event - %s." % event.src_path)
df = pd.read_csv(event.src_path, header=1, index_col=0)
append_df_to_excel(os.path.join(os.getcwd(), "myfile.xlsx"), df)
elif event.event_type == 'modified':
# Taken any actionc here when a file is modified.
df = pd.read_csv(event.src_path, header=0, index_col=0)
append_df_to_excel(os.path.join(os.getcwd(), "myfile.xlsx"), df)
print("Received modified event - %s." % event.src_path)
if __name__ == '__main__':
print(sys.argv)
w = Watcher(sys.argv)
w.run()
Here is my Append Function:
import pandas as pd
import openpyxl as ox
def append_df_to_excel(filename, df, sheet_name='Sheet1', startrow=None,
truncate_sheet=False,
**to_excel_kwargs):
# ignore [engine] parameter if it was passed
if 'engine' in to_excel_kwargs:
to_excel_kwargs.pop('engine')
writer = pd.ExcelWriter(filename, engine='openpyxl')
# Python 2.x: define [FileNotFoundError] exception if it doesn't exist
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
try:
# try to open an existing workbook
writer.book = ox.load_workbook(filename,keep_vba=True)
# get the last row in the existing Excel sheet
# if it was not specified explicitly
if startrow is None and sheet_name in writer.book.sheetnames:
startrow = writer.book[sheet_name].max_row
# truncate sheet
if truncate_sheet and sheet_name in writer.book.sheetnames:
# index of [sheet_name] sheet
idx = writer.book.sheetnames.index(sheet_name)
# remove [sheet_name]
writer.book.remove(writer.book.worksheets[idx])
# create an empty sheet [sheet_name] using old index
writer.book.create_sheet(sheet_name, idx)
# copy existing sheets
writer.sheets = {ws.title: ws for ws in writer.book.worksheets}
except FileNotFoundError:
# file does not exist yet, we will create it
pass
if startrow is None:
startrow = 0
# write out the new sheet
df.to_excel(writer, sheet_name, startrow=startrow, **to_excel_kwargs, header=True)
# save the workbook
writer.save()
You have to add the dataframe through a loop:
import pandas as pd
from PyQt5 import QtCore, QtWidgets
class DataFrameTableWidget(QtWidgets.QTableWidget):
def append_dataframe(self, df):
df = df.copy()
if df.columns.size > self.columnCount():
self.setColumnCount(df.columns.size)
r = self.rowCount()
self.insertRow(r)
for c, column in enumerate(df):
it = QtWidgets.QTableWidgetItem(column)
self.setItem(r, c, it)
i = self.rowCount()
for r, row in df.iterrows():
self.insertRow(self.rowCount())
for c, (column, value) in enumerate(row.iteritems()):
it = QtWidgets.QTableWidgetItem(str(value))
self.setItem(i+r , c, it)
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
import numpy as np
w = DataFrameTableWidget()
df = pd.DataFrame(np.random.randint(0, 100,size=(4, 4)), columns=list('ABCD'))
w.append_dataframe(df)
def after_show():
df = pd.DataFrame(np.random.randint(0, 100,size=(4, 4)), columns=list('ABCD'))
w.append_dataframe(df)
QtCore.QTimer.singleShot(2*1000, after_show)
w.resize(640, 480)
w.show()
sys.exit(app.exec_())
Update:
The observer runs on another thread so it can not update the GUI from that thread, so a signal must be used to transmit the information:
import os
import time
import pandas as pd
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
from PyQt5 import QtCore, QtWidgets
from Append_Function import append_df_to_excel
class Emitter(QtCore.QObject):
newDataFrameSignal = QtCore.pyqtSignal(pd.DataFrame)
class Watcher:
def __init__(self, filename):
self.watch_dir = os.getcwd()
self.directory_to_watch = os.path.join(self.watch_dir, filename)
self.emitter = Emitter()
self.observer = Observer()
self.event_handler = Handler(
emitter=self.emitter,
patterns=["*.CSV"],
ignore_patterns=["*.tmp"],
ignore_directories=True
)
def run(self):
self.observer.schedule(self.event_handler, self.directory_to_watch, recursive=False)
self.observer.start()
class Handler(PatternMatchingEventHandler):
def __init__(self, *args, emitter=None, **kwargs):
super(Handler, self).__init__(*args, **kwargs)
self._emitter = emitter
def on_any_event(self, event):
if event.is_directory:
return None
elif event.event_type == 'created':
# Take any action here when a file is first created.
print("Received created event - %s." % event.src_path)
df = pd.read_csv(event.src_path, header=1)
self._emitter.newDataFrameSignal.emit(df.copy())
df.set_index(df.columns.values.tolist()[0], inplace=True)
append_df_to_excel(os.path.join(os.getcwd(), "myfile.xlsx"), df)
elif event.event_type == 'modified':
# Taken any actionc here when a file is modified.
df = pd.read_csv(event.src_path, header=1)
self._emitter.newDataFrameSignal.emit(df.copy())
df.set_index(df.columns.values.tolist()[0], inplace=True)
append_df_to_excel(os.path.join(os.getcwd(), "myfile.xlsx"), df)
print("Received modified event - %s." % event.src_path)
class DataFrameTableWidget(QtWidgets.QTableWidget):
#QtCore.pyqtSlot(pd.DataFrame)
def append_dataframe(self, df):
df = df.copy()
if df.columns.size > self.columnCount():
self.setColumnCount(df.columns.size)
r = self.rowCount()
self.insertRow(r)
for c, column in enumerate(df):
it = QtWidgets.QTableWidgetItem(column)
self.setItem(r, c, it)
i = self.rowCount()
for r, row in df.iterrows():
self.insertRow(self.rowCount())
for c, (column, value) in enumerate(row.iteritems()):
it = QtWidgets.QTableWidgetItem(str(value))
self.setItem(i+r , c, it)
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
w = DataFrameTableWidget()
w.resize(640, 480)
w.show()
watcher = Watcher(sys.argv[1])
watcher.run()
watcher.emitter.newDataFrameSignal.connect(w.append_dataframe)
sys.exit(app.exec_())
You might be looking for:
Jupyter notebooks, which are able to display pandas dataframes as HTML formatted tables.
Jupyter lab, which includes a GUI CSV viewer.
The qgrid extension for jupyter notebooks, which will allow you to interactively filter and edit data.
If your CSV files have the same headers, you might want to concatenate the data to create one single table for review.
Related
I have a script that collects Reddit comments. It pulls from a csv file with a list of links in it. Some of the links are dead and I get 404/403/etc errors. The code below will correctly identify them and skip, but it then exits the loop and completes the process of making the csv file without continuing onto the next link.
import praw
import pprint
import csv
import os
import pandas as pd
from collections import namedtuple
from datetime import datetime
from pathlib import Path
def scrape_comments(reddit_api, csv_file, dest):
df = pd.read_csv(csv_file)
data = []
try:
for pid in df.id:
# post_comment = []
submission = reddit_api.submission(id=pid)
submission.comments.replace_more(limit=None)
for comment in submission.comments.list():
# post_comment.append(comment.body)
data.append((pid, comment.id, comment.parent_id, comment.body, comment.link_id,comment.author, comment.score, comment.created_utc, comment.subreddit))
# data.append((pid, ";".join(post_comment)))
except:
print ("Error! Skip the Current subreddit")
df = pd.DataFrame(data, columns=["post_id", "comment_id", "comment_parent_id", "comment_body", "comment_link_id","comment_author", "comment_score","comment_created","comment_subreddit"]) # append tuple
df.to_csv(dest, index=False, encoding='utf-8')
if __name__ == "__main__":
reddit_api = praw.Reddit(
client_id="####",
client_secret="####",
user_agent="####",
username="####",
password="####"
)
# reddit_api = init_praw(client_id, client_secret, user_agent, username, password)
csv_file = "####"
dest_dir = "####"
dest_name = "reddits_comments.csv"
Path(dest_dir).mkdir(parents=True, exist_ok=True)
dest = os.path.join(dest_dir, dest_name)
scrape_comments(reddit_api, csv_file, dest)
You should put the try/except around a smaller portion of your code, as said in the comments. Here's an illustration of that:
def scrape_comments(reddit_api, csv_file, dest):
df = pd.read_csv(csv_file)
data = []
for pid in df.id:
try:
# post_comment = []
submission = reddit_api.submission(id=pid)
submission.comments.replace_more(limit=None)
for comment in submission.comments.list():
# post_comment.append(comment.body)
data.append((pid, comment.id, comment.parent_id, comment.body, comment.link_id,comment.author, comment.score, comment.created_utc, comment.subreddit))
# data.append((pid, ";".join(post_comment)))
except Exception:
print ("Error! Skip the Current subreddit")
df = pd.DataFrame(data, columns=["post_id", "comment_id", "comment_parent_id", "comment_body", "comment_link_id","comment_author", "comment_score","comment_created","comment_subreddit"]) # append tuple
df.to_csv(dest, index=False, encoding='utf-8')
In my code I need to be able to filter column names(on the left component) and show only filtered columns with information after user uploaded a CSV file.
This is my code but I am stuck on it at the moment and have no ideas of how I can implement the requirements.
try:
from enum import Enum
from io import BytesIO, StringIO
from typing import Union
import pandas as pd
import streamlit as st
except Exception as e:
print(e)
STYLE = """
<style>
img {
max-width: 100%;
}
</style>
"""
class FileUpload(object):
def __init__(self):
self.fileTypes = ["csv", "png", "jpg"]
def run(self):
"""
Upload File on Streamlit Code
:return:
"""
st.info(__doc__)
st.markdown(STYLE, unsafe_allow_html=True)
st.sidebar.title("Filter data")
st.set_option('deprecation.showfileUploaderEncoding', False)
file = st.file_uploader("Upload file", type=self.fileTypes)
show_file = st.empty()
if not file:
show_file.info("Please upload a file of type: " +
", ".join(["csv", "png", "jpg"]))
return
content = file.getvalue()
if isinstance(file, BytesIO):
show_file.image(file)
else:
data = pd.read_csv(file)
st.dataframe(data.head(10))
file.close()
if __name__ == "__main__":
helper = FileUpload()
helper.run()
If you read in a csv-file, you can use the multi-select widget to select the columns you want to show:
import streamlit as st
import pandas as pd
# load csv
uploaded_file = st.file_uploader('upload')
# into dataframe
df = pd.read_csv(uploaded_file)
'## multi select'
cols = st.multiselect('select columns:', df.columns, default=[])
st.write('You selected:', cols)
# show dataframe with the selected columns
st.write(df[cols])
In the current version (v0.68.1) the file uploader is buggy. So if you get an error on rerun, consider downgrading to v0.66
Given an existing Excel file, with data in a long format
Automate creating the following pivot table in Excel with the Python win32com module
Following is code to setup test.xlsx with data and connect to create a Excel com object
Imports
import win32com.client as win32
from pathlib import Path
import sys
import pandas as pd
import numpy as np
import random
from datetime import datetime
win32c = win32.constants
Function to create test.xlsx
This function is only to provide test data and a file
def create_test_excel_file(f_path: Path, f_name: str, sheet_name: str):
filename = f_path / f_name
random.seed(365)
np.random.seed(365)
number_of_data_rows = 1000
# create list of 31 dates
dates = pd.bdate_range(datetime(2020, 7, 1), freq='1d', periods=31).tolist()
data = {'date': [random.choice(dates) for _ in range(number_of_data_rows)],
'expense': [random.choice(['business', 'personal']) for _ in range(number_of_data_rows)],
'products': [random.choice(['book', 'ribeye', 'coffee', 'salmon', 'alcohol', 'pie']) for _ in range(number_of_data_rows)],
'price': np.random.normal(15, 5, size=(1, number_of_data_rows))[0]}
pd.DataFrame(data).to_excel(filename, index=False, sheet_name=sheet_name, float_format='%.2f')
Function to create Excel com object
def run_excel(f_path: Path, f_name: str, sheet_name: str):
filename = f_path / f_name
# create excel object
excel = win32.gencache.EnsureDispatch('Excel.Application')
# excel can be visible or not
excel.Visible = True # False
# try except for file / path
try:
wb = excel.Workbooks.Open(filename)
except com_error as e:
if e.excepinfo[5] == -2146827284:
print(f'Failed to open spreadsheet. Invalid filename or location: {filename}')
else:
raise e
sys.exit(1)
# set worksheet
ws1 = wb.Sheets('data')
# wb.Close(True)
# excel.Quit()
Main
def main():
# sheet name for data
sheet_name = 'data' # update with sheet name from your file
# file path
f_path = Path.cwd() # file in current working directory
# f_path = Path(r'c:\...\Documents') # file located somewhere else
# excel file
f_name = 'test.xlsx'
# function calls
create_test_excel_file(f_path, f_name, sheet_name) # remove when running your own file
run_excel(f_path, f_name, sheet_name)
A helpful way to figure out the proper Excel methods to use, is record a step-by-step Macro in Excel, while creating a pivot table in the form you want.
This is useful for creating a pivot table that has to be run on a routine basis in a file with existing data.
Uses the imports and methods from the question
To modify this code for a new data file
Update def main
sheet_name
f_path
f_name
Update def run_excel
ws1
ws2_name
pt_name
pt_rows
pt_cols
pt_filters
pt_fields
Call main() to run code
pivot_table function
def pivot_table(wb: object, ws1: object, pt_ws: object, ws_name: str, pt_name: str, pt_rows: list, pt_cols: list, pt_filters: list, pt_fields: list):
"""
wb = workbook1 reference
ws1 = worksheet1
pt_ws = pivot table worksheet number
ws_name = pivot table worksheet name
pt_name = name given to pivot table
pt_rows, pt_cols, pt_filters, pt_fields: values selected for filling the pivot tables
"""
# pivot table location
pt_loc = len(pt_filters) + 2
# grab the pivot table source data
pc = wb.PivotCaches().Create(SourceType=win32c.xlDatabase, SourceData=ws1.UsedRange)
# create the pivot table object
pc.CreatePivotTable(TableDestination=f'{ws_name}!R{pt_loc}C1', TableName=pt_name)
# selecte the pivot table work sheet and location to create the pivot table
pt_ws.Select()
pt_ws.Cells(pt_loc, 1).Select()
# Sets the rows, columns and filters of the pivot table
for field_list, field_r in ((pt_filters, win32c.xlPageField), (pt_rows, win32c.xlRowField), (pt_cols, win32c.xlColumnField)):
for i, value in enumerate(field_list):
pt_ws.PivotTables(pt_name).PivotFields(value).Orientation = field_r
pt_ws.PivotTables(pt_name).PivotFields(value).Position = i + 1
# Sets the Values of the pivot table
for field in pt_fields:
pt_ws.PivotTables(pt_name).AddDataField(pt_ws.PivotTables(pt_name).PivotFields(field[0]), field[1], field[2]).NumberFormat = field[3]
# Visiblity True or Valse
pt_ws.PivotTables(pt_name).ShowValuesRow = True
pt_ws.PivotTables(pt_name).ColumnGrand = True
Update run_excel to call pivot_table
def run_excel(f_path: Path, f_name: str, sheet_name: str):
filename = f_path / f_name
# create excel object
excel = win32.gencache.EnsureDispatch('Excel.Application')
# excel can be visible or not
excel.Visible = True # False
# try except for file / path
try:
wb = excel.Workbooks.Open(filename)
except com_error as e:
if e.excepinfo[5] == -2146827284:
print(f'Failed to open spreadsheet. Invalid filename or location: {filename}')
else:
raise e
sys.exit(1)
# set worksheet
ws1 = wb.Sheets('data')
# Setup and call pivot_table
ws2_name = 'pivot_table'
wb.Sheets.Add().Name = ws2_name
ws2 = wb.Sheets(ws2_name)
pt_name = 'example'
pt_rows = ['expense']
pt_cols = ['products']
pt_filters = ['date']
# [0]: field name [1]: pivot table column name [3]: calulation method [4]: number format
pt_fields = [['price', 'price: mean', win32c.xlAverage, '$#,##0.00'],
['price', 'price: sum', win32c.xlSum, '$#,##0.00'],
['price', 'price: count', win32c.xlCount, '0']]
pivot_table(wb, ws1, ws2, ws2_name, pt_name, pt_rows, pt_cols, pt_filters, pt_fields)
# wb.Close(True)
# excel.Quit()
Resources
Jupyter Notebook: How to Create a Pivot Table in Excel with the Python win32com Module
Automate Excel with Python
Examples with Pivot Table
Using Python win32com to get list of Excel worksheets
Excel VBA reference
Workbook object (Excel)
Worksheet object (Excel)
Currently I am grabbing a excel file from a folder with Python just fine; in the below code.. and pushing this to a web form via selenium.
However, I am trying to modify this to continue to go through a directory over multiple files. (there will be many excel files in my 'directory' or 'folder').
main.py
from data.find_pending_records import FindPendingRecords
from vital.vital_entry import VitalEntry
if __name__ == "__main__":
try:
#Instantiates FindPendingRecords then gets records to process
PENDING_RECORDS = FindPendingRecords().get_excel_data()
#Reads excel to map data from excel to vital
MAP_DATA = FindPendingRecords().get_mapping_data()
#Configures Driver for vital
VITAL_ENTRY = VitalEntry()
#Start chrome and navigate to vital website
VITAL_ENTRY.instantiate_chrome()
#Begin processing Records
VITAL_ENTRY.process_records(PENDING_RECORDS, MAP_DATA)
print("All done, Bill")
except Exception as exc:
print(exc)
config.py
FILE_LOCATION = r"C:\Zip\2019.02.12 Data Docs.zip"
UNZIP_LOCATION = r"C:\Zip\Pending"
VITAL_URL = 'http://boringdatabasewebsite:8080/Horrible'
HEADLESS = False
PROCESSORS = 4
MAPPING_DOC = ".//map/mapping.xlsx"
find_pending_records.py
"""Module used to find records that need to be inserted into Horrible website"""
from zipfile import ZipFile
import math
import pandas
import config
class FindPendingRecords:
"""Class used to find records that need to be inserted into Site"""
#classmethod
def find_file(cls):
""""Finds the excel file to process"""
archive = ZipFile(config.FILE_LOCATION)
for file in archive.filelist:
if file.filename.__contains__('Horrible Data Log '):
return archive.extract(file.filename, config.UNZIP_LOCATION)
return FileNotFoundError
def get_excel_data(self):
"""Places excel data into pandas dataframe"""
excel_data = pandas.read_excel(self.find_file())
columns = pandas.DataFrame(columns=excel_data.columns.tolist())
excel_data = pandas.concat([excel_data, columns])
excel_data.columns = excel_data.columns.str.strip()
excel_data.columns = excel_data.columns.str.replace("/", "_")
excel_data.columns = excel_data.columns.str.replace(" ", "_")
num_valid_records = 0
for row in excel_data.itertuples():
person = row.PERSON
if person in ("", " ", None) or math.isnan(mrn):
print(f"Invalid record: {row}")
excel_data = excel_data.drop(excel_data.index[row.Index])
else:
num_valid_records += 1
print(f"Processing #{num_valid_records} records")
return self.clean_data_frame(excel_data)
def clean_data_frame(self, data_frame):
"""Cleans up dataframes"""
for col in data_frame.columns:
if "date" in col.lower():
data_frame[col] = pandas.to_datetime(data_frame[col],
errors='coerce', infer_datetime_format=True)
data_frame[col] = data_frame[col].dt.date
data_frame['PERSON'] = data_frame['PERSON'].astype(int).astype(str)
return data_frame
def get_mapping_data(self):
map_data = pandas.read_excel(config.MAPPING_DOC, sheet_name='main')
columns = pandas.DataFrame(columns=map_data.columns.tolist())
return pandas.concat([map_data, columns])
One way is as below (pseudocode)
class FindPendingRecords:
#classmethod
def find_file(cls):
return ["file1", "file2", "file3"]
def __init__(self):
self.files = self.find_file()
def get_excel_data(self):
for excel_data in self.files:
# process your excel_data
yield excel_data
Your main should be
if __name__ == "__main__":
try:
for PENDING_RECORDS in FindPendingRecords().get_excel_data():
# Do operations on PENDING_RECORDS
print (PENDING_RECORDS)
print("All done, Bill")
except Exception as exc:
print(exc)
Your find_file method will be
#classmethod
def find_file(cls):
all_files = list()
""""Finds the excel file to process"""
archive = ZipFile(config.FILE_LOCATION)
for file in archive.filelist:
if file.filename.__contains__('Horrible Data Log '):
all_files.append(archive.extract(file.filename, config.UNZIP_LOCATION))
return all_files
I am trying to put a bunch of CSV files into one workbook and here is my code:
import csv
import glob
import openpyxl
import os, sys
import pandas as pd
import xlsxwriter as xlwr
def main():
list_of_files = []
names = []
for csv_file in glob.glob(os.path.join('.', '*.csv')):
bleh = csv_file[2:]
name = bleh[:-4]
names.append(name)
df = pd.read_csv(csv_file, index_col=None, header=0)
list_of_files.append(df)
writer = pd.ExcelWriter('non_concussed_game_logs.xlsx')
for n, df in enumerate(list_of_files):
df.to_excel(writer, '%s' % names[n])
writer.save
if __name__ == "__main__":
main()
I am getting the error mentioned in the title of my post but I am unsure as to why I'm getting it. I have used this script before and it has worked but I'm not sure why it is not now. Any help is appreciated!
I figured it out, my CSV files were encoded in utf-8 so I had to make the read_csv() call
df = pd.read_csv(csv_file, index_col=None, header=0, encoding='utf-8')
and also add the parenthesis to the writer.save line.