Upload dataframe, show content and allow editing in dash - python

I am coding a dataframe viewer and in the main page there is a upload input, that reads a file and store the content in a dcc.Store(id="store").
#app.callback(
Output("store", "data"),
Input("upload-data", "contents"),
Input("upload-data", "filename"),
)
def read_df(contents, filename):
if contents: # Only if there is any file updated
df = parse_data(contents, filename)
return df.to_dict('records')
Then the dataframe content is showed in a dash_table.DataTable. I would like that the stored data were updated, if you edit the table. Would be possible do that avoiding circular dependencies, duplicated outputs or non existing objects error?
#app.callback(
Output("table_frame", "children"),
[Input("store", "data"),
Input("upload-data", "filename")]
)
def update_table_frame(df, filename):
table = html.Div()
if df is not None and len(df) > 0:
df = pd.DataFrame(df)
table = html.Div(
[
html.H5(filename),
dash_table.DataTable(
id="table",
data=df.to_dict('records'),
columns=[{"name": i, "id": i, 'deletable': True,
'renamable': True} for i in df.columns],
page_current=0,
page_size=PAGE_SIZE,
page_action='custom',
page_count=np.ceil(len(df) / PAGE_SIZE),
sort_action='custom',
sort_mode='multi',
sort_by=[],
filter_action='custom',
filter_query='',
# row_deletable=True,
editable=True,
style_table={'overflowX': 'auto'}
),
html.Button('Descargar dataframe', id='download', n_clicks=0, className="btn btn-primary"),
dcc.Download(id="download-df")
]
)
return table
I tried to update data stored in the same callback that reads dataframe. However it throws all kinds of errors.
#app.callback(
Output("store", "data"),
Input("upload-data", "contents"),
Input("upload-data", "filename"),
Input("store", "data"),
Input('table', "data"),
Input('table', "columns"),
Input('table', "page_current"),
Input('table', "page_size")
)
def read_df(contents, filename, df, rows, columns, page_current, page_size):
ctx = dash.callback_context
if not ctx.triggered:
input_id = ''
else:
input_id = ctx.triggered[0]['prop_id'].split('.')[0]
if input_id == 'table':
df = pd.DataFrame(df)
if len(rows) <= page_size:
step = abs(page_current - previous_page)
if previous_page < page_current:
return pd.DataFrame(pd.concat([df[0:((page_current - step) * page_size)], pd.DataFrame(rows),
df[((page_current - step + 1) * page_size):len(df)]], axis=0),
columns=[c['name'] for c in columns]).to_dict('records')
else:
return pd.DataFrame(pd.concat([df[0:((page_current + step) * page_size)], pd.DataFrame(rows),
df[((page_current + step + 1) * page_size):len(df)]], axis=0),
columns=[c['name'] for c in columns]).to_dict('records')
elif input_id == 'upload-data':
if contents: # Only if there is any file updated
df = parse_data(contents, filename)
return df.to_dict('records')

Related

ValueError: (‘Lengths must match to compare’, (5854,), (0,))

Trying to create something based on the idea here DataTable Interactivity
However, getting error message
ValueError: (‘Lengths must match to compare’, (5854,), (0,))
Below are the code I tried:
dbc.Col([
html.P("Table:",
style={"textDecoration":"underline"}),
dbc.Col([
html.Table([
html.Td('Sub', id = '',style = header_column_cell_style), #title of the column
html.Td('', id = 'subtotal', style = body_column_cell_style) #data of the column
]),
dash_table.DataTable(id='table',
columns=[
{'name': 'Today', "id": 'Date'},
{'name': 'Product', "id": 'Product'},
{'name': 'Sale', "id": 'Sale'},
],
sort_action= 'native', #"custom",
sort_mode="multi",
filter_action = "native",
row_selectable = 'multi',
# selected_rows = []
# data=df.to_dict('records')
),
])
]),
])
# table
#app.callback(
Output('table', 'data'),
Input('date_dd', 'value')
)
def update_table(selection):
if len (selection) == 0 :
return dash.no_updates
else:
selection = datetime.strptime(selection, '%Y-%m-%d').date()
dff = df[df['Date'] == selection]
columns = dff[['Date', 'Product', 'Sale']]
data=columns.to_dict('records')
return data
#app.callback(
Output('subtotal', 'children'),
Input('table', 'derived_virtual_data'),
Input('table', 'derived_virtual_selected_rows'),
Input('date_dd', 'value')
)
def update_table(rows, derived_virtual_selected_rows, selection):
if derived_virtual_selected_rows is None and len (selection) == 0:
derived_virtual_selected_rows = []
dff = df[df['Date'] == selection ]
dff1 = dff if rows is None else pd.DataFrame(rows)
subt = dff1['Sale'].sum()
return subt
Anyone can assist?
Had tried this:
dff = df[df['Date'].eq(selection) ]
but getting another error message:
ValueError: Lengths must be equal

Output a graph and table from a single dash callback

I would like to generate a graph and table from a dash callback, but the code outputs one or the other.
Below is the final part of the code. The data is filtered by chained callbacks (two drop-downs - LGA and SMA) and a radio button (standard deviation picker).
Is there a simple way to generate the two outputs or do I need to add in another callback and define additional functions?
html.Div(
id='graph-container',
children=[]),
dash_table.DataTable(
id='table-container',
columns = [{"name": i, "id": i} for i in df],
data=df.to_dict('records'),
),
])
Populate the SMA's dropdown with options and values
#app.callback(
Output('SMA-dpdn', 'options'),
Output('SMA-dpdn', 'value'),
Input('LGA-dpdn', 'value'),
)
def set_LGA_options(chosen_LGA):
dff = df[df.LGA==chosen_LGA]
SMAs_of_LGAs = [{'label': c, 'value': c} for c in sorted(dff.SMA.unique())]
values_selected = [x['value'] for x in SMAs_of_LGAs]
return SMAs_of_LGAs, values_selected
#app.callback(
Output('graph-container', 'children'),
Output('table-container', 'data'),
Input('radio_items', 'value'),
Input('SMA-dpdn', 'value'),
Input('LGA-dpdn', 'value'),
prevent_initial_call=True
)
Create graph/table component and populate
def graph(max_deviations, selected_SMA, selected_LGA):
if len(selected_SMA) == 0:
return dash.no_update
else:
dff = df[(df.LGA==selected_LGA) & (df.SMA.isin(selected_SMA))]
data = pd.DataFrame(data=dff)
x = dff.TIME
y = dff.CHANGE
mean = np.mean(y)
standard_deviation = np.std(y)
distance_from_mean = abs(y - mean)
not_outlier = distance_from_mean < max_deviations * standard_deviation
no_outliers = y[not_outlier]
trim_outliers = pd.DataFrame(data=no_outliers)
dfd = pd.merge(trim_outliers, dff, left_index=True, right_index=True)
dfd['CHANGE'] = dfd['CHANGE_x']
fig = px.scatter(dfd, x='TIME', y='CHANGE', color ='SMA', trendline='ols', size='PV', height=500, width=800, hover_name='SMA')
return dfd.to_dict('records')
return dcc.Graph(id='display-map', figure=fig)
if __name__ == '__main__':
app.run_server(debug=False)
Your have correctly defined two Outputs on your callback, but then you are only returning a single value which is incorrect.
The code below is your example stripped down and demonstrates the correct way to return multiple values. I have also switched the no_update logic around to demonstrate a cleaner way to structure the code which reduces the risk of introducing a bug on return:
#app.callback(
Output('graph-container', 'children'),
Output('table-container', 'data'),
Input('radio_items', 'value'),
Input('SMA-dpdn', 'value'),
Input('LGA-dpdn', 'value'),
prevent_initial_call=True
)
def graph(max_deviations, selected_SMA, selected_LGA):
if len(selected_SMA) > 0:
# Do processing to create a dfd record and the figure for the Graph
return dcc.Graph(id='display-map', figure=fig), dfd.to_dict('records')
# No update if length was zero.
return dash.no_update, dash.no_update

Create a Chart in dash-plot based on dropdown and date picker for a certain unique value

I have a simple database as in the below picture:
and the query looks like:
SELECT
[Date]
,[eNodeBName]
,[Downlinkbandwidth]
,[DownlinkEARFCN]
,[CellName]
,[LocalCellId]
,[PhysicalcellID]
,[LRRCConnReqAtt]
,[RRCSetupSuccessRate]
,[InterFreqSuccessRate4G]
,[IntraRATHOSucccessRate]
,[IntraFreqSuccessRate4G]
,[CellDLMaxThroughputMbps]
,[CellDownlinkAverageThroughputMbps]
FROM [myDB].[dbo].[input]
Now I need to create an interactive chart which depends on inputs like a date picker and drop down list.
As this is the sample GUI I created to figure chart throw this inputs as shown in the below picture:
Now I am creating chart based on the below columns name as in the below picture:
as this is the related query as the below:
SELECT
[Date]
,[CellName]
,[LRRCConnReqAtt]
,[RRCSetupSuccessRate]
,[InterFreqSuccessRate4G]
,[IntraRATHOSucccessRate]
,[IntraFreqSuccessRate4G]
,[CellDLMaxThroughputMbps]
,[CellDownlinkAverageThroughputMbps]
FROM [myDB].[dbo].[input]
So now the X-axis should be the Date column and the y-axis related to those below columns is the KPI columns:
SELECT
[LRRCConnReqAtt]
,[RRCSetupSuccessRate]
,[InterFreqSuccessRate4G]
,[IntraRATHOSucccessRate]
,[IntraFreqSuccessRate4G]
,[CellDLMaxThroughputMbps]
,[CellDownlinkAverageThroughputMbps]
FROM [myDB].[dbo].[input]
So now we have a unique column contains unique values is called CellName, this cell name I want create a simple chart for this unique value based on date columnn and KPI column.
So for example I want to show a line chart for a certain CellName = 2002334 for KPI LRRCConnReqAtt based on data from 27 of December to 9 of January. So I need a chart as the below picture and this is an example chart created in Excel.
and this is my code:
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from sqlalchemy import create_engine
import datetime
from datetime import datetime as dt
from dash.dependencies import Input, Output
# connect db
engine = create_engine('mssql+pyodbc://xxxxxx\SMxxxxxARTRNO_EXPRESS/myDB?driver=SQL+Server+Native+Client+11.0')
cursor = engine.raw_connection().cursor()
start = datetime.datetime(2019, 12, 2)
end = datetime.datetime(2019, 12, 15)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
lte_kpis = pd.read_sql('SELECT * FROM [myDB].[dbo].[input]',
engine)
lte_kpis_raw = pd.read_sql('SELECT LRRCConnReqAtt, RRCSetupSuccessRate, InterFreqSuccessRate4G, IntraRATHOSucccessRate, IntraFreqSuccessRate4G,CellDLMaxThroughputMbps, CellDownlinkAverageThroughputMbps FROM [myDB].[dbo].[input]',
engine)
scale_1 = ['LRRCConnReqAtt']
scale_2 = ['RRCSetupSuccessRate', 'InterFreqSuccessRate4G', 'IntraRATHOSucccessRate', 'IntraFreqSuccessRate4G']
scale_3 = ['CellDLMaxThroughputMbps', 'CellDownlinkAverageThroughputMbps']
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
availble_cell = lte_kpis['CellName'].unique()
# availble_cell = lte_kpis.unique(lte_kpis[['Date', 'Site Name', 'Cell CI', 'Cell LAC']].values.ravel('K'))
app.layout = html.Div([
dcc.Dropdown(
id='cell-name-xaxis-column',
options=[{'label': i, 'value': i} for i in availble_cell],
value='2205516'
),
dcc.Dropdown(
id='myColumns',
options=[{'label': col, 'value': col} for col in lte_kpis_raw.columns],
multi=True,
value='LRRCConnReqAtt'
),
dcc.DatePickerRange(
id='my-date-picker-range',
min_date_allowed=dt(1995, 8, 5),
max_date_allowed=dt(2030, 9, 19),
initial_visible_month=dt(2019, 10, 5),
start_date=dt(2019, 10, 1),
end_date=dt(2020, 1, 1)
),
html.Div(id='output-container-date-picker-range'),
dcc.Graph(
style={'height': 300},
id='my-graph'
)
])
#app.callback(
Output('my-graph', 'figure'),
[Input('cell-name-xaxis-column', 'value'),
Input('myColumns', 'value')])
def update_graph(xaxis_column_name, yaxis_column_name, date_value):
dff = lte_kpis[lte_kpis['Date'] == date_value]
return {
'data': [dict(
x=dff[dff['Date'] == xaxis_column_name]['Value'],
y=dff[dff['Date'] == yaxis_column_name]['Value'],
text=dff[dff['Date'] == yaxis_column_name]['CellName'],
mode='line',
line={
'size': 15,
'opacity': 0.5
}
)],
}
if __name__ == '__main__':
app.run_server(debug=True)
Note that I want to put more than one KPI in one chart with different plots....
As the scale values in those KPI are little bit different so I tried to create a three types of objects with columns name scale values as the below code
scale_1 = ['LRRCConnReqAtt']
scale_2 = ['RRCSetupSuccessRate', 'InterFreqSuccessRate4G', 'IntraRATHOSucccessRate', 'IntraFreqSuccessRate4G']
scale_3 = ['CellDLMaxThroughputMbps', 'CellDownlinkAverageThroughputMbps']
and this is the error I found:
TypeError: update_graph() missing 1 required positional argument: 'date_value'
Traceback (most recent call last)
File "C:\Users\mwx825326\PycharmProjects\MyReference\venv\Lib\site-packages\dash\dash.py", line 1337, in add_context
output_value = func(*args, **kwargs) # %% callback invoked %%
TypeError: update_graph() missing 1 required positional argument: 'date_value'
Traceback (most recent call last):
File "C:\Users\mwx825326\PycharmProjects\MyReference\venv\Lib\site-packages\dash\dash.py", line 1337, in add_context
output_value = func(*args, **kwargs) # %% callback invoked %%
TypeError: update_graph() missing 1 required positional argument: 'date_value'
Any assistance appreciated.!!!!
Now this question is solved..
But I need it in more static way ,and the way I have to remove one Drop-down and create a chart based on one Drop-down and Date-Picker instead of 2 Drop-down as the removable drop down is based on Columns in DB in y-axis...
First I created Arrays for every Column should be the y-axis column, as I want to make it static charts for every multi or single Column in one Chart based on another Column or multiple Columns value in database....
as the below code:
SHOW_COLUMNS1 = [
'lrrc_re_est_succ',
'cell_dl_max_throughput'
]
SHOW_COLUMNS2 = [
'interfreq_success_rate_4g',
'intrarat_ho_success_rate'
]
SHOW_COLUMNS3 = [
'rcc_setup_success_rate',
'interfreq_success_rate_4g'
]
SHOW_COLUMNS4 = [
'cell_downlink_average_throughput'
]
after that the connection to the Date-Base
# connect db
engine = create_engine('mssql+pyodbc://xxxxxx\zzzzzz/myDB?driver=SQL+Server+Native+Client+11.0')
cursor = engine.raw_connection().cursor()
lte_kpis = pd.read_sql('SELECT * FROM [myDB].[dbo].[lte_details]', engine)
after that I created a GUI for the interactive Charts and pandas read as the below code:
lte_kpis = pd.read_sql('SELECT * FROM [myDB].[dbo].[lte_details]', engine)
pd.set_option('display.max_columns', 10000)
print(lte_kpis)
lte_kpis.set_index('date', inplace=True)
availble_cell = lte_kpis['cell_name'].unique()
plots = []
app.layout = html.Div([
html.H5(
'Huawei KPI Dashbord'),
html.Label('Choose the "Cell-Name"'),
dcc.Dropdown(
id='cell-name-xaxis-column',
options=[{'label': i, 'value': i} for i in availble_cell],
value= availble_cell[0]
),
html.Label('Choose Date Destination'),
dcc.DatePickerRange(
id='date-picker-range',
min_date_allowed=dt(1995, 8, 5),
max_date_allowed=dt(2030, 9, 19),
initial_visible_month=dt(2019, 10, 5),
start_date=dt(2019, 10, 1),
end_date=dt(2020, 1, 1)
),
html.Div(id='output-container-date-picker-range-%s'),
dcc.Dropdown(
id='yaxis-columns',
options=[{'label': col, 'value': col} for col in SHOW_COLUMNS1],
multi=True,
disabled=True,
value=[SHOW_COLUMNS1[0], SHOW_COLUMNS1[1]]
),
dcc.Graph(
style={'height': 300},
id='my-graph'
),
dcc.Dropdown(
id='yaxis-columns2',
options=[{'label': col, 'value': col} for col in SHOW_COLUMNS2],
multi=True,
disabled=True,
value=[SHOW_COLUMNS2[0], SHOW_COLUMNS2[1]]
),
dcc.Graph(
style={'height': 300},
id='my-graph2'
),
dcc.Dropdown(
id='yaxis-columns3',
options=[{'label': col, 'value': col} for col in SHOW_COLUMNS3],
multi=True,
disabled=True,
value=[SHOW_COLUMNS3[0], SHOW_COLUMNS3[1]]
),
dcc.Graph(
style={'height': 300},
id='my-graph3'
),
dcc.Dropdown(
id='yaxis-columns4',
options=[{'label': col, 'value': col} for col in SHOW_COLUMNS4],
multi=True,
disabled=True,
value=[SHOW_COLUMNS4[0]]
),
dcc.Graph(
style={'height': 300},
id='my-graph4'
),
])
and this is Call-back
#app.callback(
Output(component_id='my-graph2', component_property='figure'),
[Input(component_id='cell-name-xaxis-column', component_property='value'),
Input(component_id='yaxis-columns2', component_property='value'),
Input(component_id='date-picker-range', component_property='start_date'),
Input(component_id='date-picker-range', component_property='end_date')])
def update_graph(cell_name, yaxis_cols, start_date, end_date):
if not isinstance(yaxis_cols, list):
yaxis_cols = [yaxis_cols]
print(yaxis_cols)
print((start_date, end_date))
sql_statement = "SELECT date, %s, %s FROM [myDB].[dbo].[lte_details] WHERE ([cell_name]='%s' AND [date]>='%s' AND [date]<='%s')" \
% (SHOW_COLUMNS2[0], SHOW_COLUMNS2[1], cell_name, start_date, end_date)
df = pd.read_sql(sql_statement, engine)
scatters = []
for col in yaxis_cols:
if col == 'lrrc_conn_req_att':
scatters.append(go.Bar(
x=df['date'],
y=df[col],
mode='lines',
name=col
))
else:
scatters.append(go.Scatter(
x=df['date'],
y=df[col],
name=col
))
figure = {
'data': scatters,
}
return figure
Note that as the previous error is related to the missing of data type in data base when importing the excel file to the data base related to the below Code:
col_dict = {
'date': 'Date',
'enodeb_name': 'eNodeB Name',
'downlink_bandwidth': 'Downlink bandwidth',
...........................
}
LTE_DETAILS_TABLE = 'lte_details'
cols = list(col_dict.keys())
# connect db
engine = create_engine('mssql+pyodbc://xxxxx\xxxxxx/myDB?driver=SQL+Server+Native+Client+11.0')
connection = engine.connect()
meta = MetaData()
ltedetails = Table(
LTE_DETAILS_TABLE, meta,
Column('id', Integer, primary_key=True),
Column(cols[0], Date),
Column(cols[1], String),
Column(cols[2], String),
.................................
)
if engine.dialect.has_table(engine, LTE_DETAILS_TABLE):
ltedetails.drop(engine)
meta.create_all(engine)
lte_df = pd.read_excel(os.path.join(os.path.dirname(__file__), 'input.xlsx'), sheet_name='LTE Details', dtype={
col_dict[cols[0]]: str,
col_dict[cols[1]]: str,
col_dict[cols[2]]: str,
.....................................
})
lte_df['Date'] = pd.to_datetime(lte_df['Date'], errors='coerce')
query = db.insert(ltedetails)
values_list = []
row_count = 1
for i in lte_df.index:
row = lte_df.loc[i]
record = {'id': row_count}
for col in col_dict.keys():
if col == cols[3] or col == cols[4] or col == cols[5] or col == cols[6] or col == cols[7] or col == cols[8]:
record[col] = int(row[col_dict[col]])
elif col == cols[9] or col == cols[10] or col == cols[11] or col == cols[12] or col == cols[13] or col == cols[14]:
record[col] = float(row[col_dict[col]])
else:
record[col] = row[col_dict[col]]
values_list.append(record)
row_count += 1
ResultProxy = engine.execute(query, values_list)
connection.close()
engine.dispose()
So I stuck now the I want to remove the Dropp-down with id ``which called id='yaxis-columns', ...
So If there's any comment about how to improve this code performance and Complete to solve my issue..
I am glad for that.

How can we create data columns in Dash Table dynamically using callback with a function providing the dataframe

I am trying to create dash table on Web using Inputs. However the issue is that the data is created from database from the callback and a priori,
I do not know the names of the columns unless the pandas dataframeis created using the callback function.
I have checked that I getting correct data. However not able to display it. I have used multiple output options (using Dash 0.41)
My code looks as follows: ( I have not provided the details of the function which generates the pandas dataframe in the callback someFunc,
as that was not important for the purpose of this Dash code TroubleShooting.
import dash_table as dt
def someFunc(ID, pattern_desc, file_path):
## do something
return df # pandas dataframe
#
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app = dash.Dash(__name__)
app.config.suppress_callback_exceptions = True
app.css.config.serve_locally = True
app.scripts.config.serve_locally = True
app.layout = html.Div(
children = [
html.Div(
id = 'title',
children = appTitle,
className = 'titleDiv'
),
html.Div(
children = [
html.Div(
children = "Enter ID:",
className = 'textDiv'
),
dcc.Input(
id = 'ID',
type = 'text',
value = 'ABCER1',
size = 8),
html.Div(
children = "Enter Test Pattern",
className = 'textDiv'
),
dcc.Input(
id = 'pattern_desc',
type = 'text',
value = 'Sample',
size = 20),
html.Div(
children = "Enter File OutPut Path:",
className = 'textDiv'
),
dcc.Input(
id = 'file_path',
type = 'text',
value = '',
size = 30),
html.Button(
id = 'submit',
n_clicks = 0,
children = 'Search'
)
]
),
html.Div(
id = 'tableDiv',
children = dash_table.DataTable(
id = 'table',
style_table={'overflowX': 'scroll'},
style_as_list_view=True,
style_header={'backgroundColor': 'white','fontWeight':
'bold'},
),
className = 'tableDiv'
)
]
)
# callback to update the table
#app.callback([Output('table', 'data'),Output('table', 'columns')]
[Input('submit', 'n_clicks')],
[State('ID', 'value'), State('pattern_desc', 'value'),
State('file_path', 'value')])
def update_table(n_clicks, ID, pattern_desc, file_path):
df = someFunc(ID, pattern_desc, file_path)
mycolumns = [{'name': i, 'id': i} for i in df.columns]
return html.Div([
dt.DataTable(
id='table',
columns=mycolumns,
data=df.to_dict("rows")
)
])
So in this case the function someFunc which takes the 3 input arguments returns a pandas dataframe which can have different columns based on the inputs. Thus the app layout should display
those columns as given by the output of the callback function dynamically based on the inputs.
I should be getting the webpage populated with table and columns, But instead getting an error. When I run this, I am getting the data generated through the function to the file, but dash is not able to
generated the table on webpage. I get the following error:
dash.exceptions.InvalidCallbackReturnValue: The callback ..table.data...table.columns.. is a multi-output.
Expected the output type to be a list or tuple but got Div([DataTable(columns=[{'name': 'pattern_desc', 'id': 'pattern_desc'}, ......
Not Sure How I can achieve that. Any help will be appreciated.
In your Dash callback you are supposed to be returning 2 separate values to the 2 separate outputs:
[Output('table', 'data'),Output('table', 'columns')]
You are returning:
return html.Div([
dt.DataTable(
id='table',
columns=mycolumns,
data=df.to_dict("rows")
)
])
which is only 1 output.
Dash expects 2 return values in either a list, or a tuple like so:
return("output1" , outputVariable2)
or
return[ Html.Div("text") , "output Text 2"]
in order to fix the problem, either return 2 values in a tuple or list, or edit your output requirements so only one value is necessary.
From the looks of it you are trying to return a Div with a Datatable in it, so you could just make the following changes:
html.Div(
id = 'tableDiv',
className = 'tableDiv'
)
...
#app.callback([Output('tableDiv', 'children')]
[Input('submit', 'n_clicks')],
[State('ID', 'value'), State('pattern_desc', 'value'),
State('file_path', 'value')])
def update_table(n_clicks, ID, pattern_desc, file_path):
df = someFunc(ID, pattern_desc, file_path)
mycolumns = [{'name': i, 'id': i} for i in df.columns]
return html.Div([
dt.DataTable(
id='table',
columns=mycolumns,
data=df.to_dict("rows")
)
])
If I've understood you correctly, then you can simply create another callback which outputs the updated value for the columns prop. You could also use a multi-output callback to update both at the same time.
#app.callback(Output('table', 'columns'),
[Input('submit', 'n_clicks')],
[State('ID', 'value'), State('pattern_desc', 'value'),
State('file_path', 'value')])
def update_table(n_clicks, ID, pattern_desc, file_path):
mydata = someFunc(ID, pattern_desc, file_path)
# here you would use the dataframe columns to create the new column values
return new_column_values

How to update dataframes each time a new date is passed

I am building a dashboard that generates a report based on the input dates. 5 csv's are generated as part of the report, which are displayed on drop down approach and made downloadable. The problem is the dataframes which are are reading the csv's are not getting updated once a new report is generated. It still holds data of the last run. How do i fix the same?
from kontekst_report_server_datewise import kontekst
def generate_table(dataframe,max_rows=10):
return html.Table(
# Header
[html.Tr([html.Th(col) for col in dataframe.columns])] +
# Body
[html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))]
)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
# external_stylesheets = ['https://maxcdn.bootstrapcdn.com/bootstrap/3.3.0/css/bootstrap.min.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
dcc.DatePickerRange(id='date-picker-range',
start_date=datetime(2019, 4, 3), end_date=datetime(2019, 4, 4)),
dcc.Dropdown(
id='my-dropdown',
options=[
{'label' : 'Returns Summary' , 'value' : 'df1'},
{'label' : 'Return Guide Doc Details' , 'value' : 'df2'},
{'label' : 'Return Guide id Counts' , 'value' : 'df3'},
{'label' : 'Non Return Guide id Counts', 'value' : 'df4'},
{'label' : 'Order Creation Details' , 'value' : 'df5'}
],
# value='df1'
# multi = True
),
html.Div(id='output-container'),
html.A('Download CSV', id = 'my-link'),
html.Div(id='kontekst')
])
#app.callback(
dash.dependencies.Output('output-container', 'children'),
[dash.dependencies.Input('my-dropdown', 'value')])
def update_output(value):
df1 = pd.read_csv('returns_summary.csv')
df2 = pd.read_csv('ReturnGuideDocDetails.csv')
df3 = pd.read_csv('Return_guideid_counts.csv')
df4 = pd.read_csv('NonReturn_guideid_counts.csv')
df5 = pd.read_csv('OrderCreationdetails.csv')
if value == 'df1':
return generate_table(df1)
elif value == 'df2':
return generate_table(df2)
elif value == 'df3' :
return generate_table(df3)
elif value == 'df4' :
return generate_table(df4)
elif value == 'df5' :
return generate_table(df5)
#app.callback(Output('my-link', 'href'), [Input('my-dropdown', 'value')])
def update_link(value):
return '/dash/urlToDownload?value={}'.format(value)
if __name__ == '__main__':
app.run_server(debug=True)```

Categories