Python using PRE-FETCH on Oracle 10 - python

import cx_Oracle
import wx
print "Start..." + str(wx.Now())
base = cx_Oracle.makedsn('xxx', port, 'yyyy')
connection = cx_Oracle.connect(user name, password, base)
cursor = connection.cursor()
cursor.execute('select data from t_table')
li_row = cursor.fetchall()
data = []
for row in li_row:
data.append(row[0])
cursor.close()
connection.close()
print "End..." + str(wx.Now())
print "DONE!!!"
Is there a way to integrate pre-fetch in this program? My goal is to get data from database as quick as possible.

Fetching 10000 rows...
cursor.arraysize = 10000

Related

How to connect to Sybase using Python Pycharm

I tried below 2 codes but both didn't worked. Using Pycharm to write Python code. Python version is 3.7 and Sybase ASE.
import pyodbc
import urllib
quoted = urllib.parse.quote_plus('DRIVER=FreeTDS};Server=ee;Database=w;UID=sw2;PWD=Liw9;TDS_Version=8.0;Port=5000;')
#connectionString = ('DRIVER='+driver+';PORT='+port+';SERVER='+server+';PORT='+port+';DATABASE='+db_environment+';UID='+username+';PWD='+ password))
db_connection = pyodbc.connect(quoted)
cursor = db_connection.cursor()
cursor.arraysize = 5000
cursor.execute('SELECT top 2 * FROM dbo.rat')
dataset = cursor.fetchall()
if len(dataset) > 0:
for row in dataset:
print('D_PK : ', row[0])
print('D_ID : ', row[1])
cursor.close()
db_connection.close()
import pyodbc
serv = 'Ddsad5'
usr = 'dsda'
passwd = 'dfd9'
db = 'rrg'
prt = '5000'
driver='FreeTDS'
conn = pyodbc.connect(driver=driver, server=serv, database=db,port = prt,
uid=usr, pwd=passwd,TDS_Version=9.5)
print(conn)
cursor = conn.cursor()
cursor.execute('SELECT top 2 * FROM dbo.tt1')
row = cursor.fetchall()
print(row)
Please let me know what code works to connect.

Fetching data from postgres database in batch (python)

I have the following Postgres query where I am fetching data from table1 with rows ~25 million and would like to write the output of the below query into multiple files.
query = """ WITH sequence AS (
SELECT
a,
b,
c
FROM table1 )
select * from sequence;"""
Below is the python script to fetch the complete dataset. How can I modify the script to fetch it to multiple files (eg. each file has 10000 rows)
#IMPORT LIBRARIES ########################
import psycopg2
from pandas import DataFrame
#CREATE DATABASE CONNECTION ########################
connect_str = "dbname='x' user='x' host='x' " "password='x' port = x"
conn = psycopg2.connect(connect_str)
cur = conn.cursor()
conn.autocommit = True
cur.execute(query)
df = DataFrame(cur.fetchall())
Thanks
Here are 3 methods that may help
use psycopg2 named cursor cursor.itersize = 2000
snippet
with conn.cursor(name='fetch_large_result') as cursor:
cursor.itersize = 20000
query = "SELECT * FROM ..."
cursor.execute(query)
for row in cursor:
....
use psycopg2 named cursor fetchmany(size=2000)
snippet
conn = psycopg2.connect(conn_url)
cursor = conn.cursor(name='fetch_large_result')
cursor.execute('SELECT * FROM <large_table>')
while True:
# consume result over a series of iterations
# with each iteration fetching 2000 records
records = cursor.fetchmany(size=2000)
if not records:
break
for r in records:
....
cursor.close() # cleanup
conn.close()
Finally you could define the a SCROLL CURSOR
Define a SCROLL CURSOR
snippet
BEGIN MY_WORK;
-- Set up a cursor:
DECLARE scroll_cursor_bd SCROLL CURSOR FOR SELECT * FROM My_Table;
-- Fetch the first 5 rows in the cursor scroll_cursor_bd:
FETCH FORWARD 5 FROM scroll_cursor_bd;
CLOSE scroll_cursor_bd;
COMMIT MY_WORK;
Please note Not naming the cursor in psycopg2 will cause the cursor to be client side as opposed to server side.

Batch downloading of table using cx_oracle

I need to download a large table from an oracle database into a python server, using cx_oracle to do so. However, the ram is limited on the python server and so I need to do it in a batch way.
I know already how to do generally a whole table
usr = ''
pwd = ''
tns = '(Description = ...'
orcl = cx_Oracle.connect(user, pwd, tns)
curs = orcl.cursor()
printHeader=True
tabletoget = 'BIGTABLE'
sql = "SELECT * FROM " + "SCHEMA." + tabletoget
curs.execute(sql)
data = pd.read_sql(sql, orcl)
data.to_csv(tabletoget + '.csv'
I'm not sure what to do though to load say a batch of 10000 rows at a time and then save it off to a csv and then rejoin.
You can use cx_Oracle directly to perform this sort of batch:
curs.arraysize = 10000
curs.execute(sql)
while True:
rows = cursor.fetchmany()
if rows:
write_to_csv(rows)
if len(rows) < curs.arraysize:
break
If you are using Oracle Database 12c or higher you can also use the OFFSET and FETCH NEXT ROWS options, like this:
offset = 0
numRowsInBatch = 10000
while True:
curs.execute("select * from tabletoget offset :offset fetch next :nrows only",
offset=offset, nrows=numRowsInBatch)
rows = curs.fetchall()
if rows:
write_to_csv(rows)
if len(rows) < numRowsInBatch:
break
offset += len(rows)
This option isn't as efficient as the first one and involves giving the database more work to do but it may be better for you depending on your circumstances.
None of these examples use pandas directly. I am not particularly familiar with that package, but if you (or someone else) can adapt this appropriately, hopefully this will help!
You can achieve your result like this. Here I am loading data to df.
import cx_Oracle
import time
import pandas
user = "test"
pw = "test"
dsn="localhost:port/TEST"
con = cx_Oracle.connect(user,pw,dsn)
start = time.time()
cur = con.cursor()
cur.arraysize = 10000
try:
cur.execute( "select * from test_table" )
names = [ x[0] for x in cur.description]
rows = cur.fetchall()
df=pandas.DataFrame( rows, columns=names)
print(df.shape)
print(df.head())
finally:
if cur is not None:
cur.close()
elapsed = (time.time() - start)
print(elapsed, "seconds")

Python script to read single value from MS SQL Server 2008

I just need to read a single value from an MS SQL Server 2008 connection, but I'm not sure how to do this. Here is the code
import pyodbc
querystring = """SELECT USER_NAME
FROM sem6.sem_computer, [sem6].[V_SEM_COMPUTER], sem6.IDENTITY_MAP, sem6.SEM_CLIENT
WHERE [sem6].[V_SEM_COMPUTER].COMPUTER_ID = SEM_COMPUTER.COMPUTER_ID
AND sem6.SEM_CLIENT.GROUP_ID = IDENTITY_MAP.ID
AND sem6.SEM_CLIENT.COMPUTER_ID = SEM_COMPUTER.COMPUTER_ID
AND [IP_ADDR1_TEXT] = '10.10.10.10'
"""
con = pyodbc.connect('DRIVER={SQL Server};SERVER=10.10.10.100;DATABASE=databasename;UID=username;PWD=password')
cur = con.cursor()
cur.execute(querystring)
con.commit()
con.close()
You need to fetch the result after executing the query. See PEP 249 for how the dbapi exposes this.
In your case, username = cur.fetchone()[0] will work.
According to python's DB-API, you need to do fetchone to retrieve the first row:
import pyodbc
querystring = """SELECT USER_NAME
FROM sem6.sem_computer, [sem6].[V_SEM_COMPUTER], sem6.IDENTITY_MAP, sem6.SEM_CLIENT
WHERE [sem6].[V_SEM_COMPUTER].COMPUTER_ID = SEM_COMPUTER.COMPUTER_ID
AND sem6.SEM_CLIENT.GROUP_ID = IDENTITY_MAP.ID
AND sem6.SEM_CLIENT.COMPUTER_ID = SEM_COMPUTER.COMPUTER_ID
AND [IP_ADDR1_TEXT] = '10.10.10.10'
"""
con = pyodbc.connect('DRIVER={SQL Server};SERVER=10.10.10.100;DATABASE=databasename;UID=username;PWD=password')
cur = con.cursor()
cur.execute(querystring)
row = cur.fetchone()
print(row[0])
con.close()

Python equivalent of PHP mysql_fetch_array

I would like to fetch an array in MySQL. Can someone please tell me how to use Python using MySQLdb to do so?
For example, this is what I would like to do in Python:
<?php
require_once('Config.php');
$q = mysql_query("SELECT * FROM users WHERE firstname = 'namehere'");
$data = mysql_fetch_array($q);
echo $data['lastname'];
?>
Thanks.
In python you have dictionary=True, I have tested in python3. This returns directory which is much similar to associative array in php.
eg.
import mysql.connector
cnx = mysql.connector.connect(user='root', password='',host='127.0.0.1',database='test1')
cursor = cnx.cursor(dictionary=True)
sql= ("SELECT * FROM `users` WHERE id>0")
cursor.execute(sql)
results = cursor.fetchall()
print(results)
You can use this (dictionary=True):
import mysql.connector
db = mysql.connector.connect(user='root', password='',host='127.0.0.1', database='test1')
cursor = db.cursor(dictionary=True)
cursor.execute("SELECT * FROM table")
for row in cursor:
print(row['column'])
Install MySQLdb (the drivers for MySQL for Python). Type pip install mysql-python
Read up on the Python DB API, which is the standard way to access databases in Python.
Then, try this:
>>> import MySQLdb
>>> connection = MySQLdb.connect(database='test')
>>> cursor = connection.cursor()
>>> cursor.execute('SELECT * FROM users WHERE firstname = %s',('somename',))
>>> results = cursor.fetchall()
>>> for i in results:
print i
I would use SQLAlchemy. Something like this would do the trick:
engine = create_engine('mysql://username:password#host:port/database')
connection = engine.connect()
result = connection.execute("select username from users")
for row in result:
print "username:", row['username']
connection.close()
Try:
import MySQLdb
connection = MySQLdb.connect(host="localhost", # your host
user="root", # username
passwd="password", # password
db="frateData") # name of the database)
cursor = connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM users WHERE firstname = %s',['namehere'])
data = cursor.fetchall()
print data['lastname']
Please note that by initiating your cursor by passing the following parameter: "MySQLdb.cursors.DictCursor"
a list instead of an array is returned so you can reference the data with their key name, which in your case in lastname.

Categories