gloabl name cx not defined - python

I wanna call an Oracle function returning an objectby using cx_Oracle`s cursor.callfunc(). But this is not working
Here you can see my code:
import cx_Oracle
import json
import web
urls = (
"/", "index",
"/grid", "grid",
)
app = web.application(urls, globals(),web.profiler )
web.config.debug = True
connection = cx_Oracle.Connection("TEST_3D/limo1013#10.40.33.160:1521/sdetest")
typeObj = connection.gettype("MDSYS.SDO_GEOMETRY")
class index:
def GET(self):
return "hallo moritz "
class grid:
def GET(self):
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
web.header('Content-Type', 'application/json')
cursor = connection.cursor()
cursor.arraysize = 10000 # default = 50
cursor.execute("""SELECT a.id AS building_nr, c.Geometry AS geometry, d.Classname FROM building a, THEMATIC_SURFACE b, SURFACE_GEOMETRY c, OBJECTCLASS d WHERE a.grid_id_400 = 4158 AND a.id = b.BUILDING_ID AND b.LOD2_MULTI_SURFACE_ID = c.ROOT_ID AND c.GEOMETRY IS NOT NULL AND b.OBJECTCLASS_ID = d.ID""")
obj = cursor.fetchone()
obj = obj[1]
print obj
cursor.callfunc("SDO2GEOJSON", cx.Oracle.OBJECT, [obj])
# Aufruf der App
if __name__ == "__main__":
app.run(web.profiler)
Error message:
at /grid
global name 'cx' is not defined
But I am sure that cx_Oracle is correct installed. Furthermore I use import cx_Oracle at the beginning and this is working.
What is wrong?

Simple typo. In the line
cursor.callfunc("SDO2GEOJSON", cx.Oracle.OBJECT, [obj])
You should use cx_Oracle.OBJECT

Related

How to improvise my DB connection code in python for best practices

I Have a config file to read different variables for different sources for Oracle and Mongo for different environments and the Configparser is defined in a module named config_utils.
I have the below code in a db_conn module in my Python framework. I would need suggestions on how to improvise the code for best practices
Note: Connectorx module will be used whenever I want to execute SQL and the module has no open or close connection methods
db_conn.py
from pymongo import MongoClient
import config.config_utils as config
import reusables.dataframes.mongo_utils as mongo_utils
oracle_conn_str = oracle_host = ''
schema_name = table_name = obj_name = batch_name = pk = sur_key = oracle_audit_col_lst = ''
max_updated_date = ''
mongo_job_lst = mongo_raw_job_col_lst = []
mongo_db_name = mongo_collection_name = ''
#------ Oracle -----
def oracle():
global oracle_conn_str, oracle_host
global schema_name, table_name, obj_name, batch_name, pk, sur_key, oracle_audit_col_lst
global max_updated_date
oracle_conn_str = config.test_env['oracle_conn_str']
# Similarly assigning values to other oracle related variables from config file
#------ Mongo ------
def mongo_conn_open():
global mongo_collection_name, mongo_db_name, mongo_job_lst, mongo_raw_job_col_lst
conn_str = config.test_env['mongo_con_string']
# Similarly assigning values to other mongo related variables from config file
return mongo_client
def mongo_query():
df = mongo_utils.mongo_df()
return df
def mongo_conn_close(client):
client.close()
def mongo():
client = mongo_conn_open()
df = mongo_query()
mongo_conn_close(client)
return df
mongo.py
import pandas as pd
import reusables.db.db_conn as db_conn
# Function to query in Mongo DB
def base_df():
db = db_conn.mongo_db_name
# mongo_query
return df
# Function to do the df operations/manipulations in the base_df
def mongo_df():
df = base_df()
# df operations
df.reset_index(inplace=True)
return df

Error: objects can be used in same thread

I'm working on a project very similar to this one: GitHub
I have a class:
class DBfunctions:
def __init__(self, dbname = '../example.db'):
self.debname = dbname
self.conn = sqlite3.connect(dbname)
def search_db(self, telegram_id):
telegram_id = (telegram_id,)
sql = 'SELECT * FROM user WHERE id = ?;'
row = self.conn.execute(sql,telegram_id)
return row
def newuser_db(self, tele_id, name, nick):
par = (tele_id, name, nick, 0)
sql = 'INSERT INTO user VALUES(?,?,?,?);'
self.conn.execute(sql, par)
self.conn.commit()
than i have the main project:
from file import DBfunctions
db = DBfunction()
def start(update: Update, context: CallbackContext): #befor edit: somethingtodo
flag = db.search_db(update.effective_user.id) # here problems start
if flag == None:
db.newuser_db(update.effective_user.id, update.effective_user.first_name, update.effective_user.username)
update.message.reply_text(
'Hi!',
reply_markup=markup,
)
else:
update.message.reply_text(
'Hey! Welcome back!',
reply_markup=markup,
)
def main():
db.setup() # this function is to create tables if not exist yet
dispatcher.add_handler(CommandHandler('start', start))
# other function but nothing correlated
if __name__ == '__main__':
main()
And than the error appears:
File "filefolder/file.py", line 29, in search_db
row = self.conn.execute(sql,telegram_id)
sqlite3.ProgrammingError: SQLite objects created in a thread can only be used in that same thread. The object was created in thread id 15004 and this is thread id 11036.
I can't figure out what i can do to fix it... and don't understand what is different from the project that I find on github (linked)

Homework project. Can't figure out how sum values via flask_sqlalchemy. Flask tossing 'AttributeError: 'BaseQuery' object has no attribute 'sum''

This is the problem. I can do count() (the count of this query is 1617) but can't figure out how to do a sum. FWIW, this is from a job satisfaction survey. Lots of 1 and 0 depending on whether they provided a response to a specific question.
This works:
#app.route('/list') def list_respondents(): all_working = Jf_Q1.query.filter((Jf_Q1.working==1) & (Jf_Q1.good_job==1)).count() return render_template('list.html', all_working=all_working)
This code above works, but what I need to be able to replicate this from postgres:
select sum(moderatewellbeing)/sum(good_job) from jf_q1
where working=1
and
good_job=1;
I've tried:
all_working = Jf_Q1.query.filter(Jf_Q1.working==1).sum()
return render_template('list.html', all_working=all_working)
But flask tosses me:
'AttributeError: 'BaseQuery' object has no attribute 'sum'
Here is all my code:
from flask import Flask,render_template, url_for, redirect
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import func
from flask_migrate import Migrate
######################################
#### SET UP OUR SQLite DATABASE #####
####################################
app = Flask(__name__)
app.config['SECRET_KEY'] = 'mysecretkey'
# Connects our Flask App to our Database
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:M1keD8nJ0e#localhost:5432/project2'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
Migrate(app,db)
#####################################
####################################
###################################
# Let's create our first model!
# We inherit from db.Model class
class Jf_Q1(db.Model):
__tablename__ = 'jf_q1'
#########################################
## CREATE THE COLUMNS FOR THE TABLE ####
#######################################
# Primary Key column, unique id for each puppy
id = db.Column(db.Integer,primary_key=True)
respondent_id = db.Column(db.Text)
good_job = db.Column(db.Numeric)
mediocre_job = db.Column(db.Numeric)
bad_job = db.Column(db.Numeric)
highwellbeing = db.Column(db.Numeric)
moderatewellbeing = db.Column(db.Numeric)
lowwellbeing = db.Column(db.Numeric)
working = db.Column(db.Numeric)
# This sets what an instance in this table will have
# Note the id will be auto-created for us later, so we don't add it here!
def __init__(self,respondent_id,good_job,mediocre_job,bad_job,highwellbeing,moderatewellbeing,lowwellbeing,working):
self.respondent_id = respondent_id
self.good_job = good_job
self.mediocre_job = mediocre_job
self.bad_job = bad_job
self.highwellbeing = highwellbeing
self.moderatewellbeing = moderatewellbeing
self.lowwellbeing = lowwellbeing
self.working = working
# def __repr__(self):
#app.route('/')
def index():
return render_template('home.html')
#app.route('/list')
def list_respondents():
# all_working = Jf_Q1.query.filter((Jf_Q1.working==1) & (Jf_Q1.good_job==1)).count()
# return render_template('list.html', all_working=all_working)
all_working = Jf_Q1.query.filter(Jf_Q1.working==1).sum()
return render_template('list.html', all_working=all_working)
# all_working = select([func.sum(Jf_Q1.working)]).\
# where(Jf_Q1.working==1)
# return render_template('list.html', all_working=all_working)
if __name__ == '__main__':
app.run(debug=True)
you can try this:-
from sqlalchemy.sql import func
all_working = session.query(func.sum(Jf_Q1.working)).filter(Jf_Q1.working==1)
or also you can use with_entities
all_working = Jf_Q1.with_entities(func.sum(jf_Q1.working)).filter(Jf_Q1.working==1)

process pool executor hangs using map

I'm having an issue using python 3 and concurrent.futures ProcessPoolExecutor and the map function.
My code is this:
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
import psycopg2
import psycopg2.extensions
import psycopg2.extras
from asq import query
import select
import concurrent.futures
import asyncio
class UpdateElastic:
def __init__(self):
conn = psycopg2.connect(
"dbname=db user=mad password=hat host=blah",
async_=True
)
self.wait(conn)
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute("SELECT * FROM table",)
self.wait(cur.connection)
self.report_files = cur.fetchall()
cur.execute("SELECT * FROM othertable",)
self.wait(cur.connection)
self.payment_events = cur.fetchall()
cur.close()
conn.close()
self.esconn = Elasticsearch([{'host':'elasticsearch.example.com','port':1234}])
# pass
def wait(self, conn):
while 1:
state = conn.poll()
if state == psycopg2.extensions.POLL_OK:
break
elif state == psycopg2.extensions.POLL_WRITE:
select.select([], [conn.fileno()], [])
elif state == psycopg2.extensions.POLL_READ:
select.select([conn.fileno()], [], [])
else:
raise psycopg2.OperationalError("poll() returned %s" % state)
def get_es_indices(self):
indices = self.esconn.indices.get_alias("digital-sales-csv*")
return list(indices.keys())
def update_documents(self, index, scroll_id=None):
print(index)
# return index
# documents = _get_es_documents(conn, index)
# print(documents['_scroll_id'])
# scroll_id = documents['_scroll_id']
# for document in documents['hits']['hits']:
# ids = {
# "report_id": document['_source']['report_id'],
# "payment_id": document['_source']['payment_id'],
# "document_id": document['_id']
# }
# asyncio.run(_update_es_document(conn, index, report_files, payment_events, ids))
# update_documents(index, conn, report_files, payment_events, scroll_id)
def main():
print('main called')
print('instantiating UpdateElastic')
us = UpdateElastic()
print('UpdateElastic instantiated')
print('setting up ProcessPoolExecutor')
blah = ['abc', 'def', 'ghi']
with concurrent.futures.ProcessPoolExecutor(max_workers=5) as executor:
print('calling executor.map')
executor.map(us.update_documents, blah, timeout=10)
if __name__ == "__main__":
main()
With this code, all I'm expecting it to do is print out the values of the array that I've passed, so:
'abc'
'def'
'ghi'
However, after printing: calling executor.map, it hangs.
When i change my constructor to be:
class UpdateElastic:
def __init__(self):
# conn = psycopg2.connect(
# "dbname=db user=mad password=hat host=blah",
# async_=True
# )
# self.wait(conn)
# cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
# cur.execute("SELECT * FROM table",)
# self.wait(cur.connection)
# self.report_files = cur.fetchall()
# cur.execute("SELECT * FROM othertable",)
# self.wait(cur.connection)
# self.payment_events = cur.fetchall()
# cur.close()
# conn.close()
# self.esconn = Elasticsearch([{'host':'elasticsearch.example.com','port':1234}])
pass
(containing only a "pass" in the constructor), it will actually print out the values of the array, as expected.
I'm running this on python 3.7.3, on OSX Mojave 10.14.2.

Insert into Odoo db with a specific id using cursor.commit and psycopg2

I'm trying to migrate some models from OpenERP 7 to Odoo 8 by code. I want to insert objects into new table maintaining the original id number, but it doesn't do it.
I want to insert the new object including its id number.
My code:
import openerp
from openerp import api, modules
from openerp.cli import Command
import psycopg2
class ImportCategory(Command):
"""Import categories from source DB"""
def process_item(self, model, data):
if not data:
return
# Model structure
model.create({
'id': data['id'],
'parent_id': None,
'type': data['type'],
'name': data['name']
})
def run(self, cmdargs):
# Connection to the source database
src_db = psycopg2.connect(
host="127.0.0.1", port="5432",
database="db_name", user="db_user", password="db_password")
src_cr = src_db.cursor()
try:
# Query to retrieve source model data
src_cr.execute("""
SELECT c.id, c.parent_id, c.name, c.type
FROM product_category c
ORDER BY c.id;
""")
except psycopg2.Error as e:
print e.pgerror
openerp.tools.config.parse_config(cmdargs)
dbname = openerp.tools.config['db_name']
r = modules.registry.RegistryManager.get(dbname)
cr = r.cursor()
with api.Environment.manage():
env = api.Environment(cr, 1, {})
# Define target model
product_category = env['product.category']
id_ptr = None
c_data = {}
while True:
r = src_cr.fetchone()
if not r:
self.process_item(product_category, c_data)
break
if id_ptr != r[0]:
self.process_item(product_category, c_data)
id_ptr = r[0]
c_data = {
'id': r[0],
'parent_id': r[1],
'name': r[2],
'type': r[3]
}
cr.commit()
How do I do that?
The only way I could find was to use reference attributes in others objects to relate them in the new database. I mean create relations over location code, client code, order number... and when they are created in the target database, look for them and use the new ID.
def run(self, cmdargs):
# Connection to the source database
src_db = psycopg2.connect(
host="localhost", port="5433",
database="bitnami_openerp", user="bn_openerp", password="bffbcc4a")
src_cr = src_db.cursor()
try:
# Query to retrieve source model data
src_cr.execute("""
SELECT fy.id, fy.company_id, fy.create_date, fy.name,
p.id, p.code, p.company_id, p.create_date, p.date_start, p.date_stop, p.special, p.state,
c.id, c.name
FROM res_company c, account_fiscalyear fy, account_period p
WHERE p.fiscalyear_id = fy.id AND c.id = fy.company_id AND p.company_id = fy.company_id
ORDER BY fy.id;
""")
except psycopg2.Error as e:
print e.pgerror
openerp.tools.config.parse_config(cmdargs)
dbname = openerp.tools.config['db_name']
r = modules.registry.RegistryManager.get(dbname)
cr = r.cursor()
with api.Environment.manage():
env = api.Environment(cr, 1, {})
# Define target model
account_fiscalyear = env['account.fiscalyear']
id_fy_ptr = None
fy_data = {}
res_company = env['res.company']
r = src_cr.fetchone()
if not r:
self.process_fiscalyear(account_fiscalyear, fy_data)
break
company = res_company.search([('name','like',r[13])])
print "Company id: {} | Company name: {}".format(company.id,company.name)
The previous code is only an extract from the whole source code.

Categories