how make Select with SQLAlachemy? - python

I am using SQAlchemy and python, without flask, well when I do an infinite loop and invoke the select method that sqlalchemy offers, it returns the value of my table, but when changing the value of a specific column from phpmyadmin, in the python script is not reflected the change made, someone could give me some advice or help, please thank you in advance.
PD: I leave the code for you to analyze:
from sqlalchemy import Table, Column, Integer, String, MetaData, ForeignKey, create_engine, Float, DateTime, update, Date, Time
from sqlalchemy.sql import select
import time
import os
ahora = Table('ahora', metadata,
Column('id', Integer, primary_key=True),
Column('temperatura', Float()),
Column('humedad', Float()),
Column('canal1', Float()),
Column('canal2', Float()),
Column('canal3', Float()),
Column('canal4', Float()),
Column('hora', Time()),
)
engine = create_engine('mysql+pymysql://javi:javiersolis12#10.0.0.20/Tuti')
connection = engine.connect()
while True:
#Seleccionara la unica entrada en la tabla Configuracion
query = select([configuracion])
confi_actual = connection.execute(query).fetchone()
query_aux = select([ahora])
datos_actuales = connection.execute(query_aux).fetchone()
print(datos_actuales)
time.sleep(8)

You can specify the components you need to select into the Query in your Session. And then use, for example all() to get all the enterance. Please look for the next example:
import sqlalchemy as sa
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
...
class Ahora(Base):
__tablename__ = 'ahora'
id = sa.Column(sa.Integer, primary_key=True)
temperatura: int = sa.Column(sa.Float)
...
...
engine = create_engine('mysql+pymysql://javi:javiersolis12#10.0.0.20/Tuti')
session = Session(engine)
query = session.query(Ahora)
# some actions with query, for example filter(), limit(), etc
ahora = query.all()

Related

SQL-Alchemy: having problems creating relationships without ForeignKeys

Im trying to create relations but without foreign key constraints in db
quite similar to this post:
sqlalchemy: create relations but without foreign key constraint in db?
However im trying to do it with classical mapping
and I cant figure out what Im doing wrong with it
from sqlalchemy import (
Table,
MetaData,
Column,
String,
)
from sqlalchemy.orm import mapper, relationship
from uuid import uuid4
class InspectionRecord:
def __init__(self, equipment):
self.equipment = equipment
class InspectedItem:
def __init__(self, item):
self.item = item
metadata = MetaData()
inspected_items = Table(
'inspected_items',
metadata,
Column('inspection_id', String(50)),
Column('inspected_item_id', String(50), primary_key=True),
Column('item', String(50))
)
inspection_records = Table(
'inspection_records',
metadata,
Column('inspection_id', String(50), primary_key=True, default=uuid4),
Column('equipment', String(50))
)
def start_mappers():
inspected_items_mapper = mapper(InspectedItem, inspected_items)
inspection_records_mapper = mapper(InspectionRecord, inspection_records, properties={
"inspected_items": relationship(inspected_items_mapper,
primaryjoin='foreign(inspected_items.inspection_id) == inspection_records.inspection_id',
uselist=False)}
) # this is the part where I'm having difficulties with
if __name__ == '__main__':
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///foo.db')
metadata.drop_all(bind=engine)
metadata.create_all(engine)
start_mappers()
Session = sessionmaker(bind=engine)
session = Session()
inspection_record = InspectionRecord(equipment='equipment_01')
session.add(inspection_record)
after so many attempts i even with additional tinkering i only get
this error
sqlalchemy.exc.InvalidRequestError: When initializing mapper mapped class InspectionRecord->inspection_records, expression '[InspectedItem.inspection_id]' failed to locate a name ("name 'InspectedItem' is not defined")
Any help would be really really really appreciated :)
got this working:
change to mapper to mapper_registry.map_imperatively
def start_mappers():
inspected_items_mapper = mapper_registry.map_imperatively(InspectedItem, inspected_items)
inspection_records_mapper = mapper_registry.map_imperatively(InspectionRecord, inspection_records, properties={
"inspected_items": relationship(InspectedItem,
primaryjoin='foreign(InspectedItem.inspection_id) == InspectionRecord.inspection_id',
uselist=False)}

Exporting a pandas df to sqlite leads to duplicate datasets instead of one updated dataset

I'm uploading a pandas dataframe from a csv file into a sqlite database via sqlalchmemy.
The initial filling is working just fine, but when I rerun the following code, the same data is exported again and the database contains two identical datasets.
How can I change the code, so that only new or changed data is uploaded into the database?
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Numeric, DateTime
from sqlalchemy.orm import sessionmaker
from datetime import datetime
import pandas as pd
# Set up of the engine to connect to the database
# the urlquote is used for passing the password which might contain special characters such as "/"
engine = create_engine('sqlite:///historical_data3.db')
conn = engine.connect()
Base = declarative_base()
# Declaration of the class in order to write into the database. This structure is standard and should align with SQLAlchemy's doc.
class Timeseries_Values(Base):
__tablename__ = 'Timeseries_Values'
#id = Column(Integer)
Date = Column(DateTime, primary_key=True)
ProductID = Column(Integer, primary_key=True)
Value = Column(Numeric)
#property
def __repr__(self):
return "(Date='%s', ProductID='%s', Value='%s')" % (self.Date, self.ProductID, self.Value)
fileToRead = r'V:\PYTHON\ProjectDatabase\HistoricalDATA_V13.csv'
tableToWriteTo = 'Timeseries_Values'
# Panda to create a dataframe with ; as separator.
df = pd.read_csv(fileToRead, sep=';', decimal=',', parse_dates=['Date'], dayfirst=True)
# The orient='records' is the key of this, it allows to align with the format mentioned in the doc to insert in bulks.
listToWrite = df.to_dict(orient='records')
# Set up of the engine to connect to the database
# the urlquote is used for passing the password which might contain special characters such as "/"
metadata = sqlalchemy.schema.MetaData(bind=engine, reflect=True)
table = sqlalchemy.Table(tableToWriteTo, metadata, autoload=True)
# Open the session
Session = sessionmaker(bind=engine)
session = Session()
# Insert the dataframe into the database in one bulk
conn.execute(table.insert(), listToWrite)
# Commit the changes
session.commit()
# Close the session
session.close()
This is working now, I 've added the df.to_sql code:
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Numeric, DateTime
from sqlalchemy.orm import sessionmaker
from datetime import datetime
import pandas as pd
# Set up of the engine to connect to the database
# the urlquote is used for passing the password which might contain special characters such as "/"
engine = create_engine('sqlite:///historical_data3.db')
conn = engine.connect()
Base = declarative_base()
# Declaration of the class in order to write into the database. This structure is standard and should align with SQLAlchemy's doc.
class Timeseries_Values(Base):
__tablename__ = 'Timeseries_Values'
#id = Column(Integer)
Date = Column(DateTime, primary_key=True)
ProductID = Column(Integer, primary_key=True)
Value = Column(Numeric)
fileToRead = r'V:\PYTHON\ProjectDatabase\HistoricalDATA_V13.csv'
tableToWriteTo = 'Timeseries_Values'
# Panda to create a dataframe with ; as separator.
df = pd.read_csv(fileToRead, sep=';', decimal=',', parse_dates=['Date'], dayfirst=True)
# The orient='records' is the key of this, it allows to align with the format mentioned in the doc to insert in bulks.
listToWrite = df.to_dict(orient='records')
df.to_sql(name='Timeseries_Values', con=conn, if_exists='replace')
metadata = sqlalchemy.schema.MetaData(bind=engine, reflect=True)
table = sqlalchemy.Table(tableToWriteTo, metadata, autoload=True)
# Open the session
Session = sessionmaker(bind=engine)
session = Session()
# Insert the dataframe into the database in one bulk
conn.execute(table.insert(), listToWrite)
# Commit the changes
session.commit()
# Close the session
session.close()

Autoflush error and filter_by() query giving unexpected result

My goal is to read data off of an excel sheet and create a database on a SQL server. I am trying to write a sample code using SQLalchemy and I am new to it. The code that I have so far is:
import time
from sqlalchemy import create_engine, Column, Integer, Date, String, Table, MetaData,table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:///:memory:', echo = False)
Base = declarative_base()
class blc(Base):
__tablename__ = 'BLC_Databse'
date = Column(String, primary_key = True)
RES = Column(String)
BTTLCOLUMN = Column(String)
CS_HR = Column(Integer)
Base.metadata.create_all(engine)
sample = blc(date=time.strftime("%m/%d/%y") , RES = 'BDY_21', BTTLCOLUMN = '2075', CS_HR = 563)
Session = sessionmaker(bind=engine)
session = Session()
sample2 = blc(date=time.strftime("%m/%d/%y") , RES = 'BDY_21', BTTLCOLUMN = '2076', CS_HR = 375)
session.add(sample2)
session.commit()
with session.no_autoflush:
result = session.query(blc).filter_by(RES = 'BDY_21').first()
print(result)
When I am performing a filter query (which I am assuming it is similar to where clause in SQL) it gives <__main__.blc object at 0x00705770> error
Eventually, I plan to have the insert clause on a loop and it will read data from an excel sheet.
Result is an object that references the class blc. To get the desired column, I had to do result.ColName.

Updating row in SqlAlchemy ORM

I am trying to obtain a row from DB, modify that row and save it again.
Everything by using SqlAlchemy
My code
from sqlalchemy import Column, DateTime, Integer, String, Table, MetaData
from sqlalchemy.orm import mapper
from sqlalchemy import create_engine, orm
metadata = MetaData()
product = Table('product', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(1024), nullable=False, unique=True),
)
class Product(object):
def __init__(self, id, name):
self.id = id
self.name = name
mapper(Product, product)
db = create_engine('sqlite:////' + db_path)
sm = orm.sessionmaker(bind=db, autoflush=True, autocommit=True, expire_on_commit=True)
session = orm.scoped_session(sm)
result = session.execute("select * from product where id = :id", {'id': 1}, mapper=Product)
prod = result.fetchone() #there are many products in db so query is ok
prod.name = 'test' #<- here I got AttributeError: 'RowProxy' object has no attribute 'name'
session .add(prod)
session .flush()
Unfortunately it does not work, because I am trying to modify RowProxy object. How can I do what I want (load, change and save(update) row) in SqlAlchemy ORM way?
I assume that your intention is to use Object-Relational API.
So to update row in db you'll need to do this by loading mapped object from the table record and updating object's property.
Please see code example below.
Please note I've added example code for creating new mapped object and creating first record in table also there is commented out code at the end for deleting the record.
from sqlalchemy import Column, DateTime, Integer, String, Table, MetaData
from sqlalchemy.orm import mapper
from sqlalchemy import create_engine, orm
metadata = MetaData()
product = Table('product', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(1024), nullable=False, unique=True),
)
class Product(object):
def __init__(self, id, name):
self.id = id
self.name = name
def __repr__(self):
return "%s(%r,%r)" % (self.__class__.name,self.id,self.name)
mapper(Product, product)
db = create_engine('sqlite:////temp/test123.db')
metadata.create_all(db)
sm = orm.sessionmaker(bind=db, autoflush=True, autocommit=True, expire_on_commit=True)
session = orm.scoped_session(sm)
#create new Product record:
if session.query(Product).filter(Product.id==1).count()==0:
new_prod = Product("1","Product1")
print "Creating new product: %r" % new_prod
session.add(new_prod)
session.flush()
else:
print "product with id 1 already exists: %r" % session.query(Product).filter(Product.id==1).one()
print "loading Product with id=1"
prod = session.query(Product).filter(Product.id==1).one()
print "current name: %s" % prod.name
prod.name = "new name"
print prod
prod.name = 'test'
session.add(prod)
session.flush()
print prod
#session.delete(prod)
#session.flush()
PS SQLAlchemy also provides SQL Expression API that allows to work with table records directly without creating mapped objects. In my practice we are using Object-Relation API in most of the applications, sometimes we use SQL Expressions API when we need to perform low level db operations efficiently such as inserting or updating thousands of records with one query.
Direct links to SQLAlchemy documentation:
Object Relational Tutorial
SQL Expression Language Tutorial

How to join the same table in sqlalchemy

I'm trying to join the same table in sqlalchemy. This is a minimial version of what I tried:
#!/usr/bin/env python
import sqlalchemy as sa
from sqlalchemy import create_engine
from sqlalchemy.orm import mapper, sessionmaker, aliased
engine = create_engine('sqlite:///:memory:', echo=True)
metadata = sa.MetaData()
device_table = sa.Table("device", metadata,
sa.Column("device_id", sa.Integer, primary_key=True),
sa.Column("name", sa.String(255), nullable=False),
sa.Column("parent_device_id", sa.Integer, sa.ForeignKey('device.device_id')),
)
class Device(object):
device_id = None
def __init__(self, name, parent_device_id=None):
self.name = name
self.parent_device_id = parent_device_id
def __repr__(self):
return "<Device(%s, '%s', %s)>" % (self.device_id,
self.name,
self.parent_device_id )
mapper(Device, device_table)
metadata.create_all(engine)
db_session = sessionmaker(bind=engine)()
parent = Device('parent')
db_session.add(parent)
db_session.commit()
child = Device('child', parent.device_id)
db_session.add(child)
db_session.commit()
ParentDevice = aliased(Device, name='parent_device')
q = db_session.query(Device, ParentDevice)\
.outerjoin(ParentDevice,
Device.parent_device_id==ParentDevice.device_id)
print list(q)
This gives me this error:
ArgumentError: Can't determine join between 'device' and 'parent_device'; tables have more than one foreign key constraint relationship between them. Please specify the 'onclause' of this join explicitly.
But I am specifying a onclause for the join. How should I be doing this?
For query.[outer]join, you specify as list of joins (which is different to expression.[outer]join.) So I needed to put the 2 elements of the join, the table and the onclause in a tuple, like this:
q = db_session.query(Device, ParentDevice)\
.outerjoin(
(ParentDevice, Device.parent_device_id==ParentDevice.device_id)
)
Your mapper should specificy the connection between the two items, here's an example: adjacency list relationships.

Categories