Not an executable object: 'SELECT * FROM LoanParcel' [duplicate] - python

This question already has an answer here:
AttributeError: 'str' object has no attribute '_execute_on_connection'
(1 answer)
Closed 4 days ago.
I want to use the database by creating it as a dataframe, and I've used sqlalchemy for importing create_engine, but I'm stuck with the not an executable object: 'SELECT * FROM LoanParcel', where LoanParcel is the name of the database I want to create as a dataframe, how should I fix it?
views.py
from sqlalchemy import create_engine
engine = create_engine("mysql+pymysql://mariadb:mariadb#localhost:9051/mariadb")
def user_detail(req, id):
conn = engine.connect()
QLoanParcel = "SELECT * FROM LoanParcel"
dfParcel = pd.read_sql(QLoanParcel, conn)
conn.close()
df = dfParcel.drop(["id", "date_add", "start_date"], axis = 1)
return render(req,'pages/user_detail.html')

SQLAlchemy 2 introduced breaking changes, you need to wrap your SQL query in a text to create an executable object.
Also, LoanParcel is a table not a database in your example.
from sqlalchemy import create_engine, text
engine = create_engine("mysql+pymysql://mariadb:mariadb#localhost:9051/mariadb")
def user_detail(req, id):
conn = engine.connect()
QLoanParcel = text("SELECT * FROM LoanParcel")
dfParcel = pd.read_sql(QLoanParcel, conn)
conn.close()
df = dfParcel.drop(["id", "date_add", "start_date"], axis = 1)
return render(req,'pages/user_detail.html')

Related

How to cache a function that takes a database connection as an input using streamlit

I have a database connection:
import sqlalchemy as sa
engine = sa.create_engine('my_info')
connection = engine.connect()
Subsequently I have a function:
import pandas as pd
def load_data(connection):
sql = 'select * from tablename'
df = pd.read_sql(sql, con = connection)
return df
This is part of an app in Streamlit that I'm working on, I need streamlit to cache the output of my load_data function. I thought it worked like this:
import pandas as pd
import streamlit as st
#st.cache()
def load_data(connection):
sql = 'select * from tablename'
df = pd.read_sql(sql, con = connection)
return df
But this gives me the following error:
UnhashableTypeError: Cannot hash object of type builtins.weakref, found in the arguments of load_data().
The error is much longer, and if it helps I will post it. The error also contains a link to the streamlit documentation. I read it and reformulated my code to look like this:
#st.cache()
def DBConnection():
engine = sa.create_engine("my_info")
conn = engine.connect()
return conn
conn = DBConnection()
#st.cache(hash_funcs={DBConnection: id})
def load_data(connection):
sql = 'select * from tablename'
df = pd.read_sql(sql, con = connection)
return df
But this gives me a NameError:
NameError: name 'DBConnection' is not defined
I've run out of idea's to try, any help would be highly appreciated. It is very possible that I misunderstood the documentation as it assumes a lot of prior knowledge about the process of hashing and caching.
Combine the two methods and use:
#st.cache(allow_output_mutation=true)
Code:
#st.cache(allow_output_mutation=true)
def load_data():
engine = sa.create_engine("my_info")
conn = engine.connect()
sql = 'select * from tablename'
df = pd.read_sql(sql, con = conn )
return df
For more you can read in documentation

Open database files (.db) using python

I have a data base file .db in SQLite3 format and I was attempting to open it to look at the data inside it. Below is my attempt to code using python.
import sqlite3
# Create a SQL connection to our SQLite database
con = sqlite3.connect(dbfile)
cur = con.cursor()
# The result of a "cursor.execute" can be iterated over by row
for row in cur.execute("SELECT * FROM "):
print(row)
# Be sure to close the connection
con.close()
For the line ("SELECT * FROM ") , I understand that you have to put in the header of the table after the word "FROM", however, since I can't even open up the file in the first place, I have no idea what header to put. Hence how can I code such that I can open up the data base file to read its contents?
So, you analyzed it all right. After the FROM you have to put in the tablenames. But you can find them out like this:
SELECT name FROM sqlite_master WHERE type = 'table'
In code this looks like this:
# loading in modules
import sqlite3
# creating file path
dbfile = '/home/niklas/Desktop/Stuff/StockData-IBM.db'
# Create a SQL connection to our SQLite database
con = sqlite3.connect(dbfile)
# creating cursor
cur = con.cursor()
# reading all table names
table_list = [a for a in cur.execute("SELECT name FROM sqlite_master WHERE type = 'table'")]
# here is you table list
print(table_list)
# Be sure to close the connection
con.close()
That worked for me very good. The reading of the data you have done already right just paste in the tablenames.
If you want to see data for visual analysis as pandas dataframe, the below approach could also be used.
import pandas as pd
import sqlite3
import sqlalchemy
try:
conn = sqlite3.connect("file.db")
except Exception as e:
print(e)
#Now in order to read in pandas dataframe we need to know table name
cursor = conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
print(f"Table Name : {cursor.fetchall()}")
df = pd.read_sql_query('SELECT * FROM Table_Name', conn)
conn.close()
from flask import Flask
app = Flask(__name__)
from sqlalchemy import create_engine, select, MetaData, Table
from sqlalchemy.sql import and_, or_
engine = create_engine('sqlite://username:password#host/databasename')
class UserModel():
def __init__(self):
try:
self.meta = MetaData()
self.users = Table("users", self.meta, autoload=True, autoload_with=engine)
except Exception as e:
print(e)
def get(self):
stmt = select([self.users.c.name, self.users.c.email, self.users.c.password])
print(stmt)
result = engine.execute(stmt)
temp = [dict(r) for r in result] if result else None
print(temp)
return temp

Cannot drop table in pandas to_sql using SQLAlchemy

I'm trying to drop an existing table, do a query and then recreate the table using the pandas to_sql function. This query works in pgadmin, but not here. Any ideas of if this is a pandas bug or if my code is wrong?
Specific error is ValueError: Table 'a' already exists.
import pandas.io.sql as psql
from sqlalchemy import create_engine
engine = create_engine(r'postgresql://user#localhost:port/dbname')
c = engine.connect()
conn = c.connection
sql = """
drop table a;
select * from some_table limit 1;
"""
df = psql.read_sql(sql, con=conn)
print df.head()
df.to_sql('a', engine)
conn.close()
Why are you doing this like that? There is a shorter way: the if_exists kwag in to_sql. Try this:
import pandas.io.sql as psql
from sqlalchemy import create_engine
engine = create_engine(r'postgresql://user#localhost:port/dbname')
c = engine.connect()
conn = c.connection
sql = """
select * from some_table limit 1;
"""
df = psql.read_sql(sql, con=conn)
print df.head()
# Notice how below line is different. You forgot the schema argument
df.to_sql('a', con=conn, schema=schema_name, if_exists='replace')
conn.close()
According to docs:
replace: If table exists, drop it, recreate it, and insert data.
Ps. Additional tip:
This is better way to handle the connection:
with engine.connect() as conn, conn.begin():
sql = """select * from some_table limit 1"""
df = psql.read_sql(sql, con=conn)
print df.head()
df.to_sql('a', con=conn, schema=schema_name, if_exists='replace')
Because it ensures that your connection is always closed, even if your program exits with an error. This is important to prevent data corruption. Further, I would just use this:
import pandas as pd
...
pd.read_sql(sql, conn)
instead of the way you are doing it.
So, if I was in your place writing that code, it would look like this:
import pandas as pd
from sqlalchemy import create_engine
engine = create_engine(r'postgresql://user#localhost:port/dbname')
with engine.connect() as conn, conn.begin():
df = pd.read_sql('select * from some_table limit 1', con=conn)
print df.head()
df.to_sql('a', con=conn, schema=schema_name, if_exists='replace')

(flask) python - mysql - using where clause in a select query with variable from URL [duplicate]

This question already has answers here:
How to use variables in SQL statement in Python?
(5 answers)
Closed 4 years ago.
#app.route('/select/<username>')
def select(username):
db = MySQLdb.connect("localhost","myusername","mypassword","mydbname" )
cursor = db.cursor()
cursor.execute("SELECT * FROM p_shahr")
data = cursor.fetchall()
db.close()
return render_template('select.html', data=data)
I want to edit the select query in this script in order to have
SELECT * FROm p_shahr WHERE os = username
How should I edit the query to include the where clause above to set os to username that is coming from URL?
Use placeholders in the query and pass the parameters as a tuple to execute.
#app.route('/select/<username>')
def select(username):
db = MySQLdb.connect("localhost","myusername","mypassword","mydbname" )
cursor = db.cursor()
query_string = "SELECT * FROM p_shahr WHERE os = %s"
cursor.execute(query_string, (username,))
data = cursor.fetchall()
db.close()
return render_template('select.html', data=data)
But, be aware that this [passing data from URL directly to DB] is a very naive and attack prone approach.
See
https://stackoverflow.com/a/21734918/4183498
https://stackoverflow.com/a/902417/4183498

Converting SQL commands to Python's ORM

How would you convert the following codes to Python's ORM such as by SQLalchemy?
#1 Putting data to Pg
import os, pg, sys, re, psycopg2
#conn = psycopg2.connect("dbname='tkk' host='localhost' port='5432' user='noa' password='123'")
conn = psycopg2.connect("dbname=tk user=naa password=123")
cur = conn.cursor()
cur.execute("""INSERT INTO courses (course_nro)
VALUES ( %(course_nro)s )""", dict(course_nro='abcd'))
conn.commit()
#2 Fetching
cur.execute("SELECT * FROM courses")
print cur.fetchall()
Examples about the two commands in SQLalchemy
insert
sqlalchemy.sql.expression.insert(table, values=None, inline=False, **kwargs)
select
sqlalchemy.sql.expression.select(columns=None, whereclause=None, from_obj=[], **kwargs)
After the initial declarations, you can do something like this:
o = Course(course_nro='abcd')
session.add(o)
session.commit()
and
print session.query(Course).all()
The declarations could look something like this:
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import session_maker
# create an engine, and a base class
engine = create_engine('postgre://naa:123#localhost/tk')
DeclarativeBase = declarative_base(bind=engine)
metadata = DeclarativeBase.metadata
# create a session
Session = session_maker(engine)
session = Session()
# declare the models
class Cource(DelcarativeBase):
__tablename__ = 'courses'
course_nro = Column('course_nro', CHAR(12))
This declarative method is just one way of using sqlalchemy.
Even though this is old, more examples can't hurt, right? I thought I'd demonstrate how to do this with PyORMish.
from pyormish import Model
class Course(Model):
_TABLE_NAME = 'courses'
_PRIMARY_FIELD = 'id' # or whatever your primary field is
_SELECT_FIELDS = ('id','course_nro')
_COMMIT_FIELDS = ('course_nro',)
Model.db_config = dict(
DB_TYPE='postgres',
DB_CONN_STRING='postgre://naa:123#localhost/tk'
)
To create:
new_course = Course().create(course_nro='abcd')
To select:
# return the first row WHERE course_nro='abcd'
new_course = Course().get_by_fields(course_nro='abcd')

Categories