I have written these function to show csv file data into UI of streamlit. That csv file is in 'w+' mode so data gets refreshed to every 3 minutes, want to display same on UI. Want to reflect data at same time on UI. That csv file data gets updated on time like 9:15, 9:18, 9:21AM.
def strike_details():
col1, col2 = st.columns(2)
with col1:
st.header("NIFTY")
data1 = pd.read_csv(os.path.join(directory_of_python_script, str('strike_data_csv') , "NIFTY_strike.csv"), on_bad_lines='skip')
st.table(data1)
with col2:
st.header("BANKNIFTY")
data2 = pd.read_csv(os.path.join(directory_of_python_script, str('strike_data_csv') , "BANKNIFTY_strike.csv"), on_bad_lines='skip')
st.table(data2)
strike_details()
These function displays tables like below image
Did you try the schedule module?.
import time
import streamlit as st
from schedule import every, repeat, run_pending
with st.empty():
#repeat(every(3).minutes)
def strike_details():
col1, col2 = st.columns(2)
with col1:
st.header("NIFTY")
data1 = pd.read_csv(os.path.join(directory_of_python_script, str('strike_data_csv') , "NIFTY_strike.csv"), on_bad_lines='skip')
st.table(data1)
with col2:
st.header("BANKNIFTY")
data2 = pd.read_csv(os.path.join(directory_of_python_script, str('strike_data_csv') , "BANKNIFTY_strike.csv"), on_bad_lines='skip')
st.table(data2)
while True:
run_pending()
time.sleep(1)
Related
There is an error message (KeyError: '') for line 25,26 when the text inputs are empty but I can't manage to get rid of it. I want the variables Vec1 and Vec2 to be stored only when there exists a text input for both widgets. To run the code, you can load any xlsx table, like this one:
Var1
Var2
5
7
6
8
Here is my code
import numpy as np
import pandas as pd
import streamlit as st
st.set_page_config(layout="wide")
#Import file
xlsx_file = st.sidebar.file_uploader('Import File', type = 'xlsx')
#Select Variables of interest
Vec1Name = st.sidebar.text_input("First Variable Name")
Vec2Name = st.sidebar.text_input("Second Variable Name")
st.title('Data')
col1, col2 = st.columns((3,1))
if xlsx_file is not None:
df = pd.read_excel(xlsx_file)
col1.write(''' #### Dataframe''')
col1.write(df)
if all(var is not None for var in [Vec1Name, Vec2Name]):
#Store Variables
Vec1 = df[str(Vec1Name)]
Vec2 = df[str(Vec2Name)]
#Variables of Interest
col2.write(''' #### Variables of Interest''')
col2.write(df[[str(Vec1Name),str(Vec2Name)]])
Thank you for your help!
The error you're facing is because the text_input can not be found within df. If you know already that you want the input to be amongst the columns, why not use st.selectbox instead, and specify df.columns as options? Let me know if that code works better:
import numpy as np
import pandas as pd
import streamlit as st
st.set_page_config(layout="wide")
#Import file
xlsx_file = st.sidebar.file_uploader('Import File', type = 'xlsx')
st.title('Data')
col1, col2 = st.columns((3,1))
if xlsx_file is not None:
df = pd.read_excel(xlsx_file)
#Select Variables of interest
Vec1Name = st.sidebar.selectbox("First Variable Name", df.columns)
Vec2Name = st.sidebar.selectbox("Second Variable Name", df.columns)
col1.write(''' #### Dataframe''')
col1.write(df)
#Store Variables
Vec1 = df[str(Vec1Name)]
Vec2 = df[str(Vec2Name)]
#Variables of Interest
col2.write(''' #### Variables of Interest''')
col2.write(df[[str(Vec1Name),str(Vec2Name)]])
I have a simple flask app that queries a database to write a csv then pyplot to create a chart out of that.
I would like to refresh the data in the background every 10 minutes while the app is running. The page doesn't need to refresh the html automatically. It just needs to have fresh data when someone opens the page.
Can I do that in a single script? Or do I need to run a different script outside in crontab or something?
I would just kick over the container every 10 minutes but it takes about 5 minutes to get the query, so that's a 5 minute outage. Not a great idea. I'd prefer it to fetch in the background.
Here is what I'm working with:
import os
from datetime import date
import teradatasql
import pandas as pd
import matplotlib.pyplot as plt
from flask import Flask, render_template
import time
import multitasking
### variables
ausername = os.environ.get('dbuser')
apassword = os.environ.get('dbpassword')
ahost = os.environ.get('dbserver')
systems = ["prd1", "prd2", "frz1", "frz2", "devl"]
qgsystems = ["", "#Tera_Prd2_v2", "#Tera_Frz1_v2", "#Tera_Frz2_v2", "#Tera_Devl_v2"]
weeks = ["0", "7", "30"]
query = """{{fn teradata_write_csv({system}_{week}_output.csv)}}select (bdi.infodata) as sysname,
to_char (thedate, 'MM/DD' ) || ' ' || Cast (thetime as varchar(11)) as Logtime,
sum(drc.cpuuexec)/sum(drc.secs) (decimal(7,2)) as "User CPU",
sum(drc.cpuuserv)/sum(drc.secs) (decimal(7,2)) as "System CPU",
sum(drc.cpuiowait)/sum(drc.secs) (decimal(7,2)) as "CPU IO Wait"
from dbc.resusagescpu{qgsystem} as drc
left outer join boeing_tables.dbcinfotbl{qgsystem} as bdi
on bdi.infokey = 'sysname'
where drc.thedate >= (current_date - {week})
order by logtime asc
Group by sysname,logtime
;
"""
### functions
#multitasking.task
def fetch(system,qgsystem,week):
with teradatasql.connect (host=ahost, user=ausername, password=apassword) as con:
with con.cursor () as cur:
cur.execute (query.format(system=system, qgsystem=qgsystem, week=week))
[ print (row) for row in cur.fetchall () ]
#multitasking.task
def plot(system,week):
for week in weeks:
for system in systems:
df = pd.read_csv(system + "_" + week + "_output.csv")
df.pop('sysname')
df.plot.area(x="Logtime")
figure = plt.gcf()
figure.set_size_inches(12, 6)
plt.savefig( "/app/static/" + system + "_" + week + "_webchart.png", bbox_inches='tight', dpi=100)
### main
for week in weeks:
for system, qgsystem in zip(systems, qgsystems):
fetch(system,qgsystem,week)
for week in weeks:
for system in systems:
plot(system,week)
app = Flask(__name__,template_folder='templates')
#app.route('/')
def index():
return render_template("index.html")
Dears,
i have a python script that have a query that reads from a DB , store the result in a Dataframe , then export it to MS access.
in the loop , it divides the result into 3 files ( each file has different month ) .
the issue in the column : LI_DESC , it have Arabic letter that shows correctly in jupyter , but it shows incorrect char when exported to access .
here is the columns showing correctly in jupyter:
here is the columns shown in access file:
python code:
import cx_Oracle
import os
import accessdb
import pandas as pd
dsn_tns = cx_Oracle.makedsn('10.112.**.****', '1521', service_name='cdwn10g.hq')
conn = cx_Oracle.connect(user='BI', password='BI', dsn=dsn_tns , encoding='utf-8')
sql_query= pd.read_sql_query("""SELECT MONTH1,LI_DESC,PORT,REGS_NUM,REG_DT,CTRY_CD,TAR_CD,UNS_QTY,UN_CD,KGN,KGG,CIF_AMT,CURCY_CD,CURCY_RT
FROM STTS.CDS
WHERE SUBSTR(REG_DT_G,1,6) BETWEEN to_number(extract(year from add_months(sysdate,-3)) || '' || to_char(add_months(sysdate,-3), 'MM')) AND to_number(extract(year from add_months(sysdate,-1)) || '' || to_char(add_months(sysdate,-1), 'MM'))
ORDER BY PORT, REGS_NUM, REG_DT""",conn)
df = pd.DataFrame(sql_query)
from datetime import datetime
today = datetime.now()
if not os.path.exists(r'C:\Users\nalkar\Documents\Python Scripts\RUNDATE'+today.strftime('%Y%m%d')) :
os.makedirs(r'C:\Users\nalkar\Documents\Python Scripts\RUNDATE'+today.strftime('%Y%m%d'))
months= df['MONTH1'].unique().tolist()
for month in months:
mydf=df.loc[df.MONTH1 == month]
mydf.to_accessdb(r"C:\Users\nalkar\Documents\Python Scripts\RUNDATE"+today.strftime('%Y%m%d')+"\%s.accdb" %month, "Data")
print('done')
else:
print(r'directory already exist')
I have a running cod that runs but it takes a LONG time to run. By viewing my code, is there any way to improve the speed? I am thinking to loop the query and then put it all in a dictionary and then writer it to a CSV file, but i am having issues. Here is my code:
#Import the appropriate models and functions needed for our script
from cbapi.response import *
import logging
import csv
#Connect to our CB Server
conn = CbResponseAPI()
#Sample Query
q = "ipAddress:192.0.0.5"
#Initial our query
process_query = conn.select(Process).where(q).group_by("id")
#Set your path
my_path='/Users/path/123.csv'
#all object properties for event
objects=['childproc_count',
'cmdline',
'comms_ip',
'crossproc_count',
'filemod_count',
'filtering_known_dlls',
'group',
'host_type',
'hostname',
'id',
'interface_ip',
'last_server_update',
'last_update',
'modload_count',
'netconn_count',
'os_type',
'parent_id',
'parent_md5',
'parent_name',
'parent_pid',
'parent_unique_id',
'path',
'process_md5',
'process_name',
'process_pid',
'process_sha256',
'processblock_count',
'regmod_count',
'segment_id',
'sensor_id',
'start',
'terminated',
'unique_id',
'username']
with open(my_path, 'w', newline='') as file:
header=objects #add columns
writer = csv.DictWriter(file, fieldnames=header)
writer.writeheader()
for x in process_query:
dd={'id': x.id,
'childproc_count':x.childproc_count,
'cmdline':x.cmdline,
'comms_ip':x.comms_ip
'filemod_count':x.filemod_count,
'filtering_known_dlls':x.filtering_known_dlls,
'group':x.group,
'host_type':x.host_type ,
'hostname':x.hostname,
'interface_ip':x.interface_ip,
'last_server_update':x.last_server_update,
'last_update':x.last_update,
'modload_count':x.modload_count,
'netconn_count':x.netconn_count,
'os_type':x.os_type,
'parent_id':x.parent_id,
'parent_md5':x.parnet_md5,
'parent_name':x.parent_name,
'parent_pid':x.parent_pid,
'parent_unique_id':x.parent_unique_id,
'path':x.path,
'process_md5':x.process_md5,
'process_name':x.process_name,
'process_pid':x.process_pid,
'process_sha256':x.process_sha256,
'processblock_count':x.processblock_count,
'regmod_count':x.regmod_count,
'segment_id':x.segment_id,
'sensor_id':x.sensor_id,
'start':x.start,
'terminated':x.terminated,
'unique_id':x.unique_id,
'username':x.username }
}
writer.writerow(dd)
The results for the query object is 465,000 records and takes about 30 minutes + to run this script completely which is not the most efficient.
This code correctly gets a live quote from Interactive Brokers, and then sends it to a csv text file with the current price. Now, how do I make this code automatically keep running? I am doing a sleep command every 15 seconds.
import csv
from ib_insync import *
stocks = ['TVIX']
test = list()
for stock in stocks:
stock = Stock(stock, 'SMART','USD')
contract = ib.qualifyContracts(stock)
test.append(ib.reqMktData(contract[0],snapshot=True))
ib.sleep(15)
for stock in test:
f = open('tvix-price.csv','w')
f.write(str(stock.last))
f.close()
Below is my code of what I have so far...
I get no errors which is kind of strange. Nothing at all happens. I will attempt to restart my Kernal to make sure I am connected.
On second thought, is doing a while True even the best method?
while True:
stocks = ['TVIX']
test = list()
for stock in stocks:
stock = Stock(stock, 'SMART','USD')
contract = ib.qualifyContracts(stock)
test.append(ib.reqMktData(contract[0],snapshot=True))
ib.sleep(15)
for stock in test:
f = open('tvix-price.csv','w')
f.write(str(stock.last))
f.close()
Part of your code wasn't indented correctly, and you put the wrong part in the loop.
You should have done:
import csv
from ib_insync import *
stocks = ['TVIX']
while True:
test = list()
for stock in stocks:
stock = Stock(stock, 'SMART','USD')
contract = ib.qualifyContracts(stock)
test.append(ib.reqMktData(contract[0],snapshot=True))
ib.sleep(15)
for stock in test:
f = open('tvix-price.csv','w')
f.write(str(stock.last))
f.close()
The while True loop only repeats the indented section. As part of your code was not indented beyond the loop, it wasn't run.
Hope this helps.