Make a sql select with a list in flask - python

I'm working in a code with flask and sql. I'm doing a json request to the datab base but i just need somecolumns so i have their names in a list and i have to pass them as params.
#app.route('/ajax-request', methods = ['POST'])
def ajax_request():
table = request.form['table']
columns = request.form.getlist('columns[]')
Model = ''
if table == 'Customer':
Model = Customer
elif table == 'Tile':
Model = Tile
elif table == 'Kind_of_tile':
Model = Kind_of_tile
elif table == 'Tile_example':
Model = Tile_example
columns_query = []
for x in columns:
columns_query.append(getattr(Model, x))
columns_query = tuple(columns_query)
for x in columns_query:
print(x)
query = ***Model.query.add_columns(', '.join(map(str, columns_query))).first()***
exit();

Related

multiple for in python query loop and save them

need to save them into different data frames
query = '''select name
from my_table
where class = {}
and student_number > {}
and student_number <= {} +10
group by name'''
inputs = list(range(0, 100,10))
classes = [1,2,3,4]
the expected result is running these batches for each class individually. e.g df_class1, df_class2 df_class3, df_class4
query = '''
select name from my_table where class = {} and student_number >
{} and student_number <= {} +50 group by name'''
inputs = list(range(0, 100,10))
classes = [1,2,3,4]
not sure on this part ##for i in inputs: for c in classes: query.format(c, i, i)##
results = pd.DataFrame() for input, query in queries.items():
res = my_db.execute(query)
results = results.append(pd.DataFrame(res))
each results as sth like ;df_class1, df_class2 df_class3, df_class4
You can use formatted string to save the resultant dataframe for each iteration.
inputs = list(range(0, 100,10))
classes = [1,2,3,4]
for i in inputs:
for c in classes:
query.format(c, i, i)
res = my_db.execute(query)
df = pd.DataFrame(res)
df.to_csv(f'result_{i}_{c}.csv')

SQLAlchemy - Filtering with multiple conditions

I append number of queries into a list and then filter the table using sqlachemies query function and or_. Table consists of warehouses, and I want to query them using names, available storage, prices and services. The logic should be correct but I get error
subquery must return only one column
#search.route('/search/filter', methods = ['POST'])
def filter():
name = request.form.get('name')
n_storage = request.form.get('n_storage')
#MIN PRICE MAX PRICE
min_p = request.form.get('min_p')
max_p = request.form.get('max_p')
#SERVICES
labelling = True if request.form.get('labelling') else False
manual_geo_data_entry = True if request.form.get('manual_geo_data_entry') else False
item_packaging = True if request.form.get('item_packaging') else False
palette_packaging = True if request.form.get('palette_packaging') else False
filters = []
if name:
filters.append(Warehouse.query.filter(Warehouse.name.match(name)))
if n_storage:
filters.append(Warehouse.query.filter(Warehouse.volume_available > n_storage))
#FILTERING BASED ON SERVICES
if labelling:
filters.append(Warehouse.query.filter(Warehouse.labelling.is_(True)))
if manual_geo_data_entry:
filters.append(Warehouse.query.filter(Warehouse.manual_geo_data_entry.is_(True)))
if item_packaging:
filters.append(Warehouse.query.filter(Warehouse.item_packaging.is_(True)))
if palette_packaging:
filters.append(Warehouse.query.filter(Warehouse.palette_packaging.is_(True)))
results = Warehouse.query.filter(or_(*filters)).all()
return render_template('search/search.html', title = 'Search', data = results)
sqlalchemy.exc.ProgrammingError: (psycopg2.errors.SyntaxError) subquery must return only one column
LINE 3: WHERE (SELECT "PilotApp_warehouse_test".id, "PilotApp_wareho...
^
[SQL: SELECT "PilotApp_warehouse_test".id AS "PilotApp_warehouse_test_id", "PilotApp_warehouse_test".name AS "PilotApp_warehouse_test_name", "PilotApp_warehouse_test".volume_available AS "PilotApp_warehouse_test_volume_available", "PilotApp_warehouse_test".volume_total AS "PilotApp_warehouse_test_volume_total", "PilotApp_warehouse_test".labelling AS "PilotApp_warehouse_test_labelling", "PilotApp_warehouse_test".manual_geo_data_entry AS "PilotApp_warehouse_test_manual_geo_data_entry", "PilotApp_warehouse_test".item_packaging AS "PilotApp_warehouse_test_item_packaging", "PilotApp_warehouse_test".palette_packaging AS "PilotApp_warehouse_test_palette_packaging", "PilotApp_warehouse_test".address AS "PilotApp_warehouse_test_address", "PilotApp_warehouse_test".email AS "PilotApp_warehouse_test_email", "PilotApp_warehouse_test".phone AS "PilotApp_warehouse_test_phone", "PilotApp_warehouse_test".owner AS "PilotApp_warehouse_test_owner"
FROM "PilotApp_warehouse_test"
WHERE (SELECT "PilotApp_warehouse_test".id, "PilotApp_warehouse_test".name, "PilotApp_warehouse_test".volume_available, "PilotApp_warehouse_test".volume_total, "PilotApp_warehouse_test".labelling, "PilotApp_warehouse_test".manual_geo_data_entry, "PilotApp_warehouse_test".item_packaging, "PilotApp_warehouse_test".palette_packaging, "PilotApp_warehouse_test".address, "PilotApp_warehouse_test".email, "PilotApp_warehouse_test".phone, "PilotApp_warehouse_test".owner
FROM "PilotApp_warehouse_test"
WHERE "PilotApp_warehouse_test".manual_geo_data_entry IS true) OR (SELECT "PilotApp_warehouse_test".id, "PilotApp_warehouse_test".name, "PilotApp_warehouse_test".volume_available, "PilotApp_warehouse_test".volume_total, "PilotApp_warehouse_test".labelling, "PilotApp_warehouse_test".manual_geo_data_entry, "PilotApp_warehouse_test".item_packaging, "PilotApp_warehouse_test".palette_packaging, "PilotApp_warehouse_test".address, "PilotApp_warehouse_test".email, "PilotApp_warehouse_test".phone, "PilotApp_warehouse_test".owner
FROM "PilotApp_warehouse_test"
WHERE "PilotApp_warehouse_test".item_packaging IS true)]
You should not pass to filter queries, but only conditions to avoid subqueries. I think it should work for you:
#search.route('/search/filter', methods = ['POST'])
def filter():
name = request.form.get('name')
n_storage = request.form.get('n_storage')
#MIN PRICE MAX PRICE
min_p = request.form.get('min_p')
max_p = request.form.get('max_p')
#SERVICES
labelling = True if request.form.get('labelling') else False
manual_geo_data_entry = True if request.form.get('manual_geo_data_entry') else False
item_packaging = True if request.form.get('item_packaging') else False
palette_packaging = True if request.form.get('palette_packaging') else False
filters = []
if name:
filters.append(Warehouse.name.match(name))
if n_storage:
filters.append(Warehouse.volume_available > n_storage)
#FILTERING BASED ON SERVICES
if labelling:
filters.append(Warehouse.labelling.is_(True))
if manual_geo_data_entry:
filters.append(Warehouse.manual_geo_data_entry.is_(True))
if item_packaging:
filters.append(Warehouse.item_packaging.is_(True))
if palette_packaging:
filters.append(Warehouse.palette_packaging.is_(True))
results = Warehouse.query.filter(or_(*filters)).all()
return render_template('search/search.html', title = 'Search', data = results)

How do I insert into a Table one Primary Key and Two Foreign Keys?

I work with Rolls of plastic film in different legnth and width. And I'm creating a Database to store all the orders, and, in order to avoid repetition, I created separate tables for length(class(Comprimento)) and width(class(Largura)). I used UUID to create distinct ID's.
Now, I want to cross both tables in a Model class. Which is:
class Largura(Base):
__tablename__ = 'largura'
id = Column(GUID(), primary_key=True, default=lambda: str(uuid.uuid4()))
largura = Column(String)
modelos_l = relationship('Modelo', back_populates='larguras', cascade='all, delete')
def __repr__(self):
return f"<Largura {self.largura}>"
class Comprimento(Base):
__tablename__ = 'comprimento'
id = Column(GUID(), primary_key=True, default=lambda: str(uuid.uuid4()))
comprimento = Column(String)
modelos_c = relationship('Modelo', back_populates='comprimentos', cascade='all, delete')
def __repr__(self):
return f"<Comprimento {self.comprimento}>"
class Modelo(Base):
__tablename__ = 'modelo'
id = Column(GUID(), primary_key=True, default=lambda: str(uuid.uuid4()))
descricao = Column(String(50))
largura_id = Column(GUID(), ForeignKey("largura.id"), default=lambda: str(uuid.uuid4()))
comprimento_id = Column(GUID(), ForeignKey("comprimento.id"), default=lambda: str(uuid.uuid4()))
larguras = relationship('Largura', back_populates='modelos_l')
comprimentos = relationship('Comprimento', back_populates='modelos_c')
def __repr__(self):
return f"<Modelo {self.id}>"
Then, i created a file dedicated to my data insert on this table:
from DBModelPy3 import Comprimento,Largura,Modelo,session
from sqlalchemy import create_engine
import pandas as pd
#Pre Loading my CSV file
df = pd.read_csv("dataorged.csv", sep=',')
pd.set_option('display.float_format','{:.0f}'.format) #change the number format to hide the ','
cnx = create_engine('sqlite:///data_hub2.db', echo=True).connect()
df_modelo = df[['larg_ajustada', 'comp']] # My dataframe that contains the orders. I chose the specifics columns needed for this insertion.
#print(df_modelo)
# Loading the Tables from my database
df_largura = pd.read_sql_table('largura', cnx)
df_comprimento = pd.read_sql_table('comprimento', cnx)
With everything loaded I decided to combine all the legnths and widths i had already on my two tables (df_largura and df_comprimento), and then filtered using the original file which contains the orders.
# COMBINING ALL THE LENGTH AND WIDTH OF MY TABLES
model_num = []
for n_larg in range(len(df_largura)):
db_larg = str(df_largura['largura'][n_larg])
for n_comp in range(len(df_comprimento)):
db_comp = df_comprimento['comprimento'][n_comp]
combined = str(db_larg) + "x" + str(db_comp)
model_num.append([db_larg,db_comp,combined])
df_modelos_ex = pd.DataFrame(model_num)
df_modelos_ex.columns = ['larg','comp','combined']
With these, i had all possible combinations on my dataframe.
And created the combined variable to match later
modelos_existentes = []
# COMBINATIONS THAT APPEAR IN THE ORDER DATAFRAME #
for item in range(len(df_modelo)):
mod_larg = df_modelo['larg_ajustada'][item]
mod_comp = df_modelo['comp'][item]
mod_comb = str(mod_larg) + "x" + str(mod_comp)
modelos_existentes.append([mod_larg,mod_comp,mod_comb])
df_mod_existentes = pd.DataFrame(modelos_existentes)
df_mod_existentes.columns = ['ex_larg','ex_comp','ex_comb']
df_limpo = df_mod_existentes.drop_duplicates(subset=['ex_comb'])
df_limpo.reset_index(drop=True, inplace=True)
With all my elements, then the madness began.
I started a loop to run through all my Dataframes:
for l_row in range(len(df_limpo)): # For Each Row in my dataframe which contains the orders,
larg = df_limpo['ex_larg'][l_row] # create variable for width
comp = df_limpo['ex_comp'][l_row] # create variable for lenght
comb = df_limpo['ex_comb'][l_row] # create variable for combination of both
for n_row in range(len(df_largura)): # For each row in my width table from DB,
db_larg_id = df_largura['id'][n_row] # I create a Variable for the PK from width
db_larg_largura = df_largura['largura'][n_row] # Create a Variable with the value
lar = session.query(Largura).filter(Largura.id == db_larg_id).first()
if db_larg_largura == larg: # If the value on my table matches the value of the row in the order,
for m_row in range(len(df_comprimento)): # For each length in my table on the DB,
db_comp_id = df_comprimento['id'][m_row]
db_comp_comprimento = df_comprimento['comprimento'][m_row]
compr = session.query(Comprimento).filter(Comprimento.id == db_comp_id).first()
if db_comp_comprimento == comp: # If the value on my table matches the value of the row in the order
new_model = Modelo(descricao=df_limpo['ex_comb'][n_linha], larguras=lar, comprimentos=compr)
from here, i would only add the session.add(new_model) and session.commit() to finish my code.
But it's not adding.
What I would like is for my Modelo table be like:
MODELO Table
ID(PK) | DESCRIPTION (Combined values String) | Largura_id (width_id, FK) | Comprimento_id (length_id, FK)
Sorry about the long explanation. Tried my best!
If anyone have the same trouble:
##########################
# ADDING TO THE DATABANK #
##########################
lista_a = [] #Created an empty List
for n_linha in range(len(df_limpo)): #Ran through my dataframe
larg_a = df_limpo['ex_larg'][n_linha] #Extracted width and length from it
comp_a = df_limpo['ex_comp'][n_linha]
for m_linha in range(len(df_largura)): #Ran my width table from database
db_larg_id = df_largura['id'][m_linha]
db_larg_largura = df_largura['largura'][m_linha]
if larg_a == db_larg_largura: #Checked if the width from my dataframe matches the one on the table
lista_a.append([larg_a,comp_a,db_larg_id]) #appended to the list_a
#print(lista_a)
df_lista_a = pd.DataFrame(lista_a) #Created a new Dataframe
df_lista_a.columns = ['larg','comp','id_larg']
lista_b = [] #Created a new list
for n_row in range(len(df_lista_a)): #Ran through my new dataframe
larg_b = df_lista_a['larg'][n_row] #Extracted each column from it
comp_b = df_lista_a['comp'][n_row]
larg_b_id = df_lista_a['id_larg'][n_row]
#df_limpo_lrow = df_limpo['ex_larg'][n_row]
#df_limpo_crow = df_limpo['ex_comp'][n_row]
#df_limpo_cbrow = df_limpo['ex_comb'][n_row]
#print(larg_b,comp_b,larg_b_id,n_row)
for m_row in range(len(df_comprimento)): #Ran through my lenght table
db_comp_id = df_comprimento['id'][m_row]
db_comp_comprimento = df_comprimento['comprimento'][m_row]
if comp_b == db_comp_comprimento: #Check if the lenght from dataframe matches the lenght on my table on the database
#print(larg_b,comp_b,n_row,m_row,df_limpo_lrow)
lista_b.append([larg_b,comp_b,larg_b_id,db_comp_id]) #appended the lenght id to my list
break
#print(lista_b)
#print(len(df_lista_a),len(df_limpo),len(lista_b))
df_lista_b = pd.DataFrame(lista_b) #converted to Dataframe.
df_lista_b.columns = ['larg','comp','id_larg','id_comp']
# HERE's the ACTUAL INSERTION
for n_model in range(len(df_lista_b)): #For each model found on the list, extract the values and add to new_model.
mod_largura = df_lista_b['larg'][n_model]
mod_comprimento = df_lista_b['comp'][n_model]
mod_largura_id = df_lista_b['id_larg'][n_model]
mod_comprimento_id = df_lista_b['id_comp'][n_model]
lar = session.query(Largura).filter(Largura.id == df_largura['id'][1]).first()
compr = session.query(Comprimento).filter(Comprimento.id == df_comprimento['id'][1]).first()
new_model = Modelo(descricao=df_limpo['ex_comb'][n_model], larguras=lar, comprimentos=compr)
print("Modelo: " + df_limpo['ex_comb'][n_model] + " com Id's " + mod_largura_id + " e " + mod_comprimento_id + " adicionados!")
session.add(new_model)
session.commit()
Then it's done.

can't adapt type 'data'

I have data class:
class data:
def __init__(self, ReadTime, Concentration_PM10, Concentration_SO2, Concentration_O3, Concentration_NO2, Concentration_CO, AQI_PM10,
AQI_SO2,AQI_O3, AQI_NO2, AQI_CO, AQI_AQIIndex, AQI_ContaminantParameter, AQI_State, AQI_Color ):
self.ReadTime = ReadTime
self.Concentration_PM10 = Concentration_PM10
self.Concentration_SO2 = Concentration_SO2
self.Concentration_O3 = Concentration_O3
self.Concentration_NO2 = Concentration_NO2
self.Concentration_CO = Concentration_CO
self.AQI_PM10 = AQI_PM10
self.AQI_SO2 = AQI_SO2
self.AQI_O3 = AQI_O3
self.AQI_NO2 = AQI_NO2
self.AQI_CO = AQI_CO
self.AQI_AQIIndex = AQI_AQIIndex
self.AQI_ContaminantParameter = AQI_ContaminantParameter
self.AQI_State = AQI_State
self.AQI_Color = AQI_Color
I'm sending a request to an api and populating the variables into a list.:
list = []
for i in result:
list.append( data(i['ReadTime'], i['Concentration']['PM10'], i['Concentration']['SO2'],i['Concentration']['O3'],
i['Concentration']['NO2'],i['Concentration']['CO'],i['AQI']['PM10'],
i['AQI']['SO2'],i['AQI']['O3'],i['AQI']['NO2'],i['AQI']['CO'],i['AQI']['AQIIndex'],i['AQI']['ContaminantParameter'],
i['AQI']['State'],i['AQI']['Color']))
then I want to insert this list into a table in PostgreSQL but I get error "can't adapt type 'data'"
list_record = ", ".join(["%s"] * len(list))
query_insert= (f"INSERT INTO hava_kalitesi (ReadTime, Concentration_PM10, Concentration_SO2, Concentration_O3, Concentration_NO2, Concentration_CO, AQI_PM10, AQI_SO2, AQI_O3, AQI_NO2, AQI_CO, AQI_AQIIndex, AQI_ContaminantParameter,AQI_State,AQI_Color) VALUES {list_record}"
)
cursor.execute(query_insert,list)

Generalize Getting Data From SQL Server to Python

I'm studying on a task that I have to get data from SQL Server, and because I'm running time series analysis, I need to specify a date field that can change every table or query. Also I can read a simple query or a stored procedure. I want to generalize my below code which is a field and database specific. I thought that I can define an empty dictionary in class and then I can call it in below dataread method. But I am conflicted.
class DataPrep:
def __init__(self,conn):
self.df = pd.DataFrame()
self.mega_projects = set()
self.mega_project_to_df = {}
self.mega_project_to_df_pvt = {}
self.conn={}
def read_data(self):
self.conn=pyodbc.connect({'driver':None, 'server':None, 'database':None, 'uid':None, 'pwd':None})
self.df = pd.read_sql_query('''exec [dbo].[ML_WorkLoad]''', self.conn, parse_dates={'CreatedDate': '%d/%m/%Y %H.%M.%S'})
#self.df = self.df[['EstimateManDay', 'CreatedDate', 'MegaProject', 'ProjectName']]
self.df['month'] = pd.DatetimeIndex(self.df['CreatedDate']).month
self.df['year'] = pd.DatetimeIndex(self.df['CreatedDate']).year
self.df['quarter'] = pd.DatetimeIndex(self.df['CreatedDate']).quarter
self.df['week'] = pd.DatetimeIndex(self.df['CreatedDate']).week
self.df['dayorg'] = pd.DatetimeIndex(self.df['CreatedDate']).day
self.df['day'] = 1
self.df['year_quarter'] = self.df['year'].astype(str) + "_" + self.df[
'quarter'].astype(str)
self.df['year_month'] = self.df['year'].astype(str) + "_" + self.df[
'month'].astype(str)
self.df['year_week'] = self.df['year'].astype(str) + "_" + self.df['week'].astype(
str)
self.df['date'] = pd.to_datetime(self.df[['year', 'month', 'day']])
self.df = self.df[self.df['CreatedDate'] <= datetime.strptime("2020-01-01", "%Y-%m-%d")]

Categories