How can I concatenate multiple rows of excel data into one? - python

I'm currently facing an issue where I need to bring all of the data shown in the images below into one line only.
So using Python and Openpyxl, I tried to write a parsing script that reads the line and only copies when values are non-null or non-identical, into a new workbook.
I get out of range errors, and the code does not keep just the data I want. I've spent multiple hours on it, so I thought I would ask here to see if I can get unstuck.
I've read some documentation on Openpyxl and about making lists in python, tried a couple of videos on youtube, but none of them did exactly what I was trying to achieve.
import openpyxl
from openpyxl import Workbook
path = "sample.xlsx"
wb = openpyxl.load_workbook(path)
ws = wb.active
path2 = "output.xlsx"
wb2 = Workbook()
ws2 = wb2.active
listab = []
rows = ws.max_row
columns = ws.max_column
for i in range (1, rows+1):
listab.append([])
cellValue = " "
prevCell = " "
for c in range (1, rows+1):
for r in range(1, columns+1):
cellValue = ws.cell(row=r, column=c).value
if cellValue == prevCell:
listab[r-1].append(prevCell)
elif cellValue == "NULL":
listab[r-1].append(prevCell)
elif cellValue != prevCell:
listab[r-1].append(cellValue)
prevCell = cellValue
for r in range(1, rows+1):
for c in range (1, columns+1):
j = ws2.cell(row = r, column=c)
j.value = listab[r-1][c-1]
print(listab)
wb2.save("output.xlsx")
There should be one line with the below information:
ods_service_id | service_name| service_plan_name| CPU | RAM | NIC | DRIVE |

Personally I would go with pandas.
import pandas as pd
#Loading into pandas
df_data = pd.read_excel('sample.xlsx')
df_data.fillna("NO DATA",inplace=True) ## Replaced nan values with "NO DATA"
unique_ids = df_data.ods_service_ids.unique()
#Storing pd into a list
records_list = df_data.to_dict('records')
keys_to_check = ['service_name', 'service_plan_name', 'CPU','RAM','NIC','DRIVE']
processed = {}
#Go through unique ids
for key in unique_ids:
processed[key] = {}
#Get related records
matching_records = [y for y in records_list if y['ods_service_ids'] == key]
#Loop through records
for record in matching_records:
#For each key to check, save in dict if non null
processed[key]['ods_service_ids'] = key
for detail_key in keys_to_check:
if record[detail_key] != "NO DATA" :
processed[key][detail_key] = record[detail_key]
##Note : doesn't handle duplicate values for different keys so far
#Records are put back in list
output_data = [processed[x] for x in processed.keys()]
# -> to Pandas
df = pd.DataFrame(output_data)[['ods_service_ids','service_name', 'service_plan_name', 'CPU','RAM','NIC','DRIVE']]
#Export to Excel
df.to_excel("output.xlsx",sheet_name='Sheet_name_1', index=False)
The above should work but I wasn't really sure on how you wanted to save duplicated records for the same id. Do you look to store them as DRIVE_0, DRIVE_1, DRIVE_2 ?
EDIT:
df could be exported in a different way. Replaced below #export to Excel with the following :
df.to_excel("output.xlsx",sheet_name='Sheet_name_1')
EDIT 2:
with no input data it was hard to see any flows. Corrected the code above with fake data

To be honest, I think you've managed to get confused by data structures and come up with something far more complicated than you need.
One approach that would suit would be to use Python dictionaries for each service, updating them row by row.
wb = load_workbook("sample.xlsx")
ws = wb.active
objs = {}
headers = next(ws.iter_rows(min_row=1, max_row=1, values_only=True))
for row in ws.iter_rows(min_row=2, values_only=True):
if row[0] not in objs:
obj = {key:value for key, value in zip(headers, row)}
objs[obj['ods_service_id']] = obj
else:# update dict with non-None values
extra = {key:value for key, value in zip(headers[3:], row[3:]) if value != "NULL"}
obj.update(extra)
# write to new workbook
wb2 = Workbook()
ws2 = wb2.active
ws2.append(headers)
for row in objs.values(): # do they need sorting?
ws2.append([obj[key] for key in headers])
Note how you can do everything without using counters.

Related

Python: populate list in dictionary from data in excel sheet using openpyxl

I am trying to take the below data from excel sheet and create a dictionary with a list of mac address values in this format siteinv_dict = {1741 : [XX:XX:XX:XX:XX:XX, YY:YY:YY:YY:YY:YY]}
The list of mac addresses vary from col to col.
When trying to do it for just one number to try get it working i was trying something like this and failed but really i would like it to get each header number make it the key then list of mac addresses as values
wb = openpyxl.load_workbook('C:\Site_Details.xlsx', data_only=True)
sh = wb.active
siteno = "1741"
siteinv_dict = {}
for row in sh.rows:
if row[3].value == "1741":
for i in range(4):
siteinv_dict ={siteno : [ ]}
You can construct a deque by column of the workbook, then left pop everything off the deque using non-strings as keys and strings as a values list into a new dict.
import openpyxl
from collections import deque
wb = openpyxl.load_workbook('C:\Site_Details.xlsx', data_only=True)
sh = wb.active
siteinv_queue = deque([cell.value for col in sh.iter_cols() for cell in col if cell.value is not None])
siteinv_dict = {}
k = siteinv_queue.popleft()
v = []
while siteinv_queue:
item = siteinv_queue.popleft()
if isinstance(item, str):
v.append(item)
else:
siteinv_dict.update({k:v})
k, v = item, []
siteinv_dict.update({k:v}) # don't forget the last dict
print(siteinv_dict)
Output
{1741: ['xx:xx:xx:f0:9b:3f', 'xx:xx:xx:f0:5b:3f', 'xx:xx:xx:f0:4b:3f', 'xx:xx:xx:f0:3b:3f'], 250: ['yy:yy:yy:f0:9b:3f', 'yy:yy:yy:f0:9b:4f'], 731: ['zz:zz:zz:zz:2a:23:12', 'zz:zz:zz:zz:2a:23:13', 'zz:zz:zz:zz:2a:23:14']}

python excel subtract with 2 worksheet

Is it possible to create a python script to automatic which is subtract cell value with 2 worksheet in one excel file?
I have checked some documents, and seem that use the method of pandas or openpyxl to do so. But I can't to do that. Do you have any suggestion to me? Many thanks.
Script:
from datetime import datetime
import pandas as pd
import openpyxl as xl;
currDateTime = datetime.now()
Sheet1 ="C:\\Users\\peter\\Downloads\\" + currDateTime.strftime('%Y%m%d') + "\\5250A" + "\\5250A.xlsx"
wb3 = xl.load_workbook(Sheet1)
ws3 = wb3.worksheets[0]
wb4 = xl.load_workbook(Sheet1)
ws4 = wb4.worksheets[1]
wb5 = xl.load_workbook(Sheet1)
ws5 = wb5.create_sheet("Done")
wb4.subtract(wb3)
wb5.save(str(Sheet1))
Expected Result:
Do so in excel coule be way easier I think. There could be a smarter way to write this code.
[NOTE] I just do the subsctraction cell by cell, so if there's any mismatch like same row but different dept.id or same col but different item will make errors. If you may meet this situation, you'll have a change some in the following code.
import openpyxl as xl
def get_row_values(worksheet):
"""
return data structure:
[
[A1, B1, C1, ...],
[A2, B2, C2, ...],
...
]
"""
result = []
for i in worksheet.rows:
row_data = []
for j in i:
row_data.append(j.value)
result.append(row_data)
return result
if __name__ == '__main__':
# load excel file
wb = xl.load_workbook('test1.xlsx')
ws1 = wb.worksheets[0]
ws2 = wb.worksheets[1]
# get data from the first 2 worksheets
ws1_rows = get_row_values(ws1)
ws2_rows = get_row_values(ws2)
# calculate and make a new sheet
ws_new = wb.create_sheet('Done')
# insert header
ws_new.append(ws1_rows[0])
for row in range(1, len(ws1_rows)):
# do the substract cell by cell
row_data = []
for column, value in enumerate(ws1_rows[row]):
if column == 0:
# insert first column
row_data.append(value)
else:
if ws1_rows[row][0] == ws2_rows[row][0]:
# process only when first column match
row_data.append(value - ws2_rows[row][column])
ws_new.append(row_data)
wb.save('test2.xlsx')
here's my sample excel file
first sheet:
second sheet:
generated sheet:

openpyxl: Formulas read as blanks in some (key use) cases

My code downloads a .xlsx file from google drive (using pydrive), finds some blank cells with pandas, and fills in those blank cells with openpyxl.
When I open the openpyxl altered file, everything looks great. However, when I use the pandas read_excel function, all cells that have equations are read as blanks. I suspect the issue is with openpyxl because when I preview the file on drive, those cells are blank. There is no issue with a file that openpyxl hasn't touched.
It looks like my issue is very similar to this one, but since my objective is just to leave the formulas untouched (I only want to fill blank cells), I don't really want to parse the formulas and I'm not really sure how or if to apply Felipe's fix.
I'd like to be able to download the file to plot it with bokeh, and users and python will both be editing the program, so I'd really like pandas to be able to read the equations whether its a user modified file or an openpyxl modified file. The equations in the file were click and drag "shared equations", and I'd like to keep it that way if possible so ideally I'd like to avoid using data_only=True. I tried specifying data_only=False, but this didn't appear to change anything.
I'm using openpyxl 2.3.5 2.4, and I keep excel closed when code is running.
Versions of the file before and after openpyxl modification are available here.
My code is here, all openpyxl code is isolated to :
# Import Libraries
import datetime
import imp
import os
import pandas as pd
from openpyxl import load_workbook
from itertools import islice
# Relative imports for bokeh interaction
dl = imp.load_source('downloader', os.getcwd() +
'/Project/downloader.py')
gdu = imp.load_source('googledriveutils', os.getcwd() +
'/Project/googledriveutils.py')
remove_file = gdu.remove_file
find_folderid = gdu.find_folderid
get_file_list = gdu.get_file_list
# Define constants
COL_LABEL = '\nProbe - '
# TODO: ORP PROBE: REVISE THIS DATE when orp probe is added
IGNORE_BEFORE = pd.to_datetime('5.24.2016')
PROBE_DICT = {'DO (mg/L)': 'DO mg/L',
'pH': 'pH',
'NH4+ (mgN/L)': 'Ammonium',
'ORP (mV)': 'ORP mV'}
TS = '\nTimestamps'
def save_to_workbook(newval,
date,
header,
rows_to_skip=12,
wbname='temp.xlsx',
sheet_name='Reactor Data'):
wb = load_workbook(wbname)
ws = wb[sheet_name]
for cell in ws[rows_to_skip+1]:
# TODO: Error if header isn't found
if cell.value == header:
colno = cell.col_idx
break
for row in ws.iter_rows(min_row=rows_to_skip+1, min_col=1, max_col=1):
for cell in row:
# TODO: Error if date isn't found
if cell.value == date:
rowno = cell.row
break
ws.cell(row=rowno, column=colno).value = newval
wb.save(wbname)
return df
def find_r1masterfile():
# Navigate through the directories
wlab_fid = find_folderid('Winkler Lab', 'root')
kp_fid = find_folderid('KathrynsProjects', wlab_fid)
amxrct_fid = find_folderid('Anammox Reactor', kp_fid)
trials_fid = find_folderid('Reactor Trials', amxrct_fid)
# List files in directory
file_list = get_file_list(trials_fid)
for afile in file_list:
if afile['title'] == 'AMX RCT.xlsx':
# Return the file we asked for
return afile
# TODO: error if there was no file with that name
def save_r1masterfile(csv, rows_to_skip=12, filename='temp.xlsx', sheet_name='Reactor Data'):
# Get the file we want
master_file = find_r1masterfile()
try:
master_file.GetContentFile(filename)
except Exception, e:
print "Warning: Something wrong with file R1 Master File."
print str(e)
# TODO: add an email alarm to responsible user
if csv:
return master_file
else:
# convert to dataframe
wb = load_workbook(filename, data_only=True)
ws = wb[sheet_name]
print ws["B14"].value
data = ws.values
data = list(data)[rows_to_skip:]
cols = list(data[0])
del cols[0]
del data[0]
idx = [r[0] for r in data]
data = (islice(r, 1, None) for r in data)
df = pd.DataFrame(data, index=idx, columns=cols)
print df.dropna(how='all')
remove_file(filename)
return df
def upload_r1masterfile(filename='temp.xlsx'):
# Get the file we want
master_file = find_r1masterfile()
try:
master_file.SetContentFile(filename)
master_file.Upload()
except Exception, e:
print "Warning: Something wrong with file R1 Master File."
print str(e)
# TODO: add an email alarm to responsible user
def populate_r1masterfile(rows_to_skip=12, filename='temp.xlsx'):
# Get the R1 master file as a file
save_r1masterfile(True)
# Convert the juicy stuff to a dataframe
masterdf = pd.read_excel(filename,
sheetname='Reactor Data',
encoding="utf-16",
skiprows=rows_to_skip,
sep='\t',
index_col='Date',
keep_default_na=False,
na_values=['-1.#IND', '1.#QNAN', '1.#IND',
'-1.#QNAN', '','N/A', '#NA', 'NA'
'NULL', 'NaN', '-NaN', 'nan', '-nan'])
# Find what we will populate with probe data
# Find timestamps
ts_columns = [col for col in masterdf.columns if TS in col]
tsdf = masterdf[ts_columns]
# Find probes, ignore before given date
probe_columns = [col for col in masterdf.columns if COL_LABEL in col]
probedf = masterdf[probe_columns]
probedf = probedf[masterdf.index > IGNORE_BEFORE]
# Find Indices and column labels of blank values
stackdf = probedf.stack(dropna=False)
empty = stackdf[stackdf.isnull()].index.tolist()
# For each blank look for the probe, time & date of cycle, and return val
for each in empty:
probe, time = each[1].split(COL_LABEL)
time = tsdf.loc[each[0], time+TS]
ts = each[0]+pd.DateOffset(hour=time.hour, minute=time.minute)
val = dl.get_val_from(1, ts, PROBE_DICT.get(probe))
probedf.set_value(each[0], each[1], val)
# Save that value to the workbook
save_to_workbook(val, each[0], each[1])
upload_r1masterfile()
print 'Master file updated. ' + str(datetime.datetime.now())
remove_file('temp.xlsx')
return probedf
UPDATE
I modified my code as per Charlie's suggestions (updated above). But I'm still getting Nones in the dataframe. To provide a more specific example, why is it when I run this code:
from openpyxl import load_workbook
wb = load_workbook('AMX RCT mod.xlsx', data_only=True)
ws = wb['Reactor Data']
print 'Value of B14 Formula is: ' + str(ws["B14"].value)
With this file, I get back?:
Value of B14 Formula is: None
Is there a workaround?
Using openpyxl 2.4 you might be able to do what you need in a single pass. I've taken your first function and adapted it.
from itertools import islice
from pandas import DataFrame
def save_to_workbook(newval,
date,
header,
rows_to_skip=12,
wbname='temp.xlsx',
sheet_name='Reactor Data'):
wb = load_workbook(wbname)
ws = wb[sheet_name]
rowno = None
colno = None
for cell in ws[1]:
# TODO: Error if header isn't found
if cell.value == header:
colno = col
for row in ws.iter_rows(min_row=rows_to_skip+1, min_col=1, max_col=1):
for cell in row:
# TODO: Error if date isn't found
if cell.value == date:
rowno = row
break
# TODO: Fix this
ws.cell(row=rowno, column=colno).value = newval
# convert to dataframe
data = ws.values
cols = next(data)[1:]
data = list(data)
idx = [r[0] for r in data]
data = (islice(r, 1, None) for r in data)
df = DataFrame(data, index=idx, columns=cols)
return df
This probably doesn't do everything you want but will hopefully get you started. It also avoids saving and parsing a whole workbook which could make it quite a bit faster.
To work with openpyxl 2.4 you will either need to do pip install -U --pre openpyxl or work with a checkout.
For further information on using openpyxl and pandas together please see the documentation.
Charlie's Answer from the mailing list:
So, if you want to keep the formulae then you must not use data only mode.
As previously stated, openpyxl never evaluates formulae so if you want to
know value of A3 you must pass the file to an application such as Excel
or OpenOffice — you can run OpenOffice headless for this kind of thing or
use xlwings for Excel — that does do formula evaluation. You could then
read this file in data only mode to see the result of the calculation.
Alternatively you could try using something like PyCel to do the
evaluation for you. But, basically if you want to do calculations: do them
in Python.
As per his suggestion, my workaround was to redo all the calculations column by column as they are done in the excel file. I.E. For excel File like this:
col1 col2 col3 col4
row1 1 3 =A1+B1 =1+3
row2 2 4 =A2+B2 =2+4
I import it as a dataframe like this (to maintain equations as a string):
wb = load_workbook(filename, data_only=False)
ws = wb[sheet_name]
data = ws.values
cols = next(data)[1:]
data = list(data)
idx = [r[0] for r in data]
data = (islice(r, 1, None) for r in data)
df = DataFrame(data, index=idx, columns=cols)
And then do the following:
parse_excel = lambda x: eval(str(x)[1:]) if isinstance(x, str) else x
for col in df.columns:
try:
df[col] = df[col].map(parse_excel)
except:
pass
df['col3'] = df['col2']+df['col1']
I'm certain this is maybe the clumsiest way to do this, but it works for now.

Using xlwt vs openpyxl

I need some help with openpyxl in PYTHON. I have been using xlwt quite successfully, but now I have some files (in MySQL Workbench) that contain more than 65,000 rows. I know I can create a CSV file, but XLSX is the preferred output. I am able to create a workbook using openpyxl, but I have not been successful placing the MySQL data into the table. The main portion of the program using xlwt is pretty straightforward (see below). I just cannot seem to figure out how to do the same thing using openpyxl. I've tried a number of different combinations and solutions. I just get stuck after the "for x in result:".
file_dest = "c:\home\test.xls"
result = dest.execute("select a, b, c, d from filea")
for x in result:
rw = rw + 1
sheet1 = book.add.sheet("Sheet 1")
row1 = sheet1.row(rw)
row1.write(1, x[0])
row1.write(1, x[1])
row1.write(1, x[2])
row1.write(1, x[3])
book.save(file_dest)
This is a good use case for using append():
Appends a group of values at the bottom of the current sheet.
If it’s a list: all values are added in order, starting from the first
column
import openpyxl
file_dest = "test.xlsx"
workbook = openpyxl.Workbook()
worksheet = workbook.get_active_sheet()
result = dest.execute("select a, b, c, d from filea")
for x in result:
worksheet.append(list(x))
workbook.save(file_dest)
A little example:
wb = Workbook(encoding='utf-8')
ws = wb.worksheets[0]
row = 2
ws.title = "Report"
ws.cell('A1').value = "Value"
ws.cell('B1').value = "Note"
for item in results:
ws.cell('A%d' % (row)).value = item[0]
ws.cell('B%d' % (row)).value = item[1]
row += 1
http://pythonhosted.org//openpyxl/

Reading Excel File using Python, how do I get the values of a specific column with indicated column name?

I've an Excel File:
Arm_id DSPName DSPCode HubCode PinCode PPTL
1 JaVAS 01 AGR 282001 1,2
2 JaVAS 01 AGR 282002 3,4
3 JaVAS 01 AGR 282003 5,6
I want to save a string in the form Arm_id,DSPCode,Pincode. This format is configurable, i.e. it might change to DSPCode,Arm_id,Pincode. I save it in a list like:
FORMAT = ['Arm_id', 'DSPName', 'Pincode']
How do I read the content of a specific column with provided name, given that the FORMAT is configurable?
This is what I tried. Currently I'm able to read all the content in the file
from xlrd import open_workbook
wb = open_workbook('sample.xls')
for s in wb.sheets():
#print 'Sheet:',s.name
values = []
for row in range(s.nrows):
col_value = []
for col in range(s.ncols):
value = (s.cell(row,col).value)
try : value = str(int(value))
except : pass
col_value.append(value)
values.append(col_value)
print values
My output is :
[
[u'Arm_id', u'DSPName', u'DSPCode', u'HubCode', u'PinCode', u'PPTL'],
['1', u'JaVAS', '1', u'AGR', '282001', u'1,2'],
['2', u'JaVAS', '1', u'AGR', '282002', u'3,4'],
['3', u'JaVAS', '1', u'AGR', '282003', u'5,6']
]
Then I loop around values[0] trying to find out the FORMAT content in values[0] and then getting the index of Arm_id, DSPname and Pincode in the values[0] and then from next loop I know the index of all the FORMAT factors , thereby getting to know which value do I need to get .
But this is such a poor solution.
How do I get the values of a specific column with name in excel file?
A somewhat late answer, but with pandas, it is possible to get directly a column of an excel file:
import pandas
df = pandas.read_excel('sample.xls')
#print the column names
print df.columns
#get the values for a given column
values = df['Arm_id'].values
#get a data frame with selected columns
FORMAT = ['Arm_id', 'DSPName', 'Pincode']
df_selected = df[FORMAT]
Make sure you have installed xlrd and pandas:
pip install pandas xlrd
This is one approach:
from xlrd import open_workbook
class Arm(object):
def __init__(self, id, dsp_name, dsp_code, hub_code, pin_code, pptl):
self.id = id
self.dsp_name = dsp_name
self.dsp_code = dsp_code
self.hub_code = hub_code
self.pin_code = pin_code
self.pptl = pptl
def __str__(self):
return("Arm object:\n"
" Arm_id = {0}\n"
" DSPName = {1}\n"
" DSPCode = {2}\n"
" HubCode = {3}\n"
" PinCode = {4} \n"
" PPTL = {5}"
.format(self.id, self.dsp_name, self.dsp_code,
self.hub_code, self.pin_code, self.pptl))
wb = open_workbook('sample.xls')
for sheet in wb.sheets():
number_of_rows = sheet.nrows
number_of_columns = sheet.ncols
items = []
rows = []
for row in range(1, number_of_rows):
values = []
for col in range(number_of_columns):
value = (sheet.cell(row,col).value)
try:
value = str(int(value))
except ValueError:
pass
finally:
values.append(value)
item = Arm(*values)
items.append(item)
for item in items:
print item
print("Accessing one single value (eg. DSPName): {0}".format(item.dsp_name))
print
You don't have to use a custom class, you can simply take a dict(). If you use a class however, you can access all values via dot-notation, as you see above.
Here is the output of the script above:
Arm object:
Arm_id = 1
DSPName = JaVAS
DSPCode = 1
HubCode = AGR
PinCode = 282001
PPTL = 1
Accessing one single value (eg. DSPName): JaVAS
Arm object:
Arm_id = 2
DSPName = JaVAS
DSPCode = 1
HubCode = AGR
PinCode = 282002
PPTL = 3
Accessing one single value (eg. DSPName): JaVAS
Arm object:
Arm_id = 3
DSPName = JaVAS
DSPCode = 1
HubCode = AGR
PinCode = 282003
PPTL = 5
Accessing one single value (eg. DSPName): JaVAS
So the key parts are to grab the header ( col_names = s.row(0) ) and when iterating through the rows, to skip the first row which isn't needed for row in range(1, s.nrows) - done by using range from 1 onwards (not the implicit 0). You then use zip to step through the rows holding 'name' as the header of the column.
from xlrd import open_workbook
wb = open_workbook('Book2.xls')
values = []
for s in wb.sheets():
#print 'Sheet:',s.name
for row in range(1, s.nrows):
col_names = s.row(0)
col_value = []
for name, col in zip(col_names, range(s.ncols)):
value = (s.cell(row,col).value)
try : value = str(int(value))
except : pass
col_value.append((name.value, value))
values.append(col_value)
print values
By using pandas we can read excel easily.
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
DataF=pd.read_excel("Test.xlsx",sheet_name='Sheet1')
print("Column headings:")
print(DataF.columns)
Test at :https://repl.it
Reference: https://pythonspot.com/read-excel-with-pandas/
Here is the code to read an excel file and and print all the cells present in column 1 (except the first cell i.e the header):
import xlrd
file_location="C:\pythonprog\xxx.xlsv"
workbook=xlrd.open_workbook(file_location)
sheet=workbook.sheet_by_index(0)
print(sheet.cell_value(0,0))
for row in range(1,sheet.nrows):
print(sheet.cell_value(row,0))
The approach I took reads the header information from the first row to determine the indexes of the columns of interest.
You mentioned in the question that you also want the values output to a string. I dynamically build a format string for the output from the FORMAT column list. Rows are appended to the values string separated by a new line char.
The output column order is determined by the order of the column names in the FORMAT list.
In my code below the case of the column name in the FORMAT list is important. In the question above you've got 'Pincode' in your FORMAT list, but 'PinCode' in your excel. This wouldn't work below, it would need to be 'PinCode'.
from xlrd import open_workbook
wb = open_workbook('sample.xls')
FORMAT = ['Arm_id', 'DSPName', 'PinCode']
values = ""
for s in wb.sheets():
headerRow = s.row(0)
columnIndex = [x for y in FORMAT for x in range(len(headerRow)) if y == firstRow[x].value]
formatString = ("%s,"*len(columnIndex))[0:-1] + "\n"
for row in range(1,s.nrows):
currentRow = s.row(row)
currentRowValues = [currentRow[x].value for x in columnIndex]
values += formatString % tuple(currentRowValues)
print values
For the sample input you gave above this code outputs:
>>> 1.0,JaVAS,282001.0
2.0,JaVAS,282002.0
3.0,JaVAS,282003.0
And because I'm a python noob, props be to:
this answer,
this answer,
this question,
this question
and this answer.
I have read using openpyxl library,
import openpyxl
from pathlib import Path
xlsx_file = Path('C:\\Users\\Amit\\Desktop\\ReadExcel', 'ReadData.xlsx')
wb_obj = openpyxl.load_workbook(xlsx_file)
# Read the active sheet:
sheet = wb_obj.active
for i in range(sheet.max_column):
print(f'i = {i}')
for row in sheet.iter_rows():
print(row[i].value)
Although I almost always just use pandas for this, my current little tool is being packaged into an executable and including pandas is overkill. So I created a version of poida's solution that resulted in a list of named tuples. His code with this change would look like this:
from xlrd import open_workbook
from collections import namedtuple
from pprint import pprint
wb = open_workbook('sample.xls')
FORMAT = ['Arm_id', 'DSPName', 'PinCode']
OneRow = namedtuple('OneRow', ' '.join(FORMAT))
all_rows = []
for s in wb.sheets():
headerRow = s.row(0)
columnIndex = [x for y in FORMAT for x in range(len(headerRow)) if y == headerRow[x].value]
for row in range(1,s.nrows):
currentRow = s.row(row)
currentRowValues = [currentRow[x].value for x in columnIndex]
all_rows.append(OneRow(*currentRowValues))
pprint(all_rows)

Categories