So basically im using MySQLdb query dialy images of my tables and i want to save them in .csv but one of the fields has line terminators (\n) and i cant figure out how to get rid of them so my csv doesnt break.
Here is the python im using:
results = cur.execute(sql)
db = MySQLdb.connect(host="",
user="",
passwd="",
db="" )
cur = db.cursor()
sql = """" big query here """
results = cur.execute(sql)
with open("out.csv", "wb") as csv_file:
csv_writer = csv.writer(csv_file)
csv_writer.writerow([i[0] for i in cur.description])
csv_writer.writerow(cur)
Is there a easy way to replace \n chars for just spaces?
Try this:
import csv
import sys
csv_writer = csv.writer(sys.stdout, lineterminator='\n')
Or:
with open("out.csv","wb",newline='') as csv_file:
If the newline is in appearing in the text of your column maybe something like this wouldwork.
csv_writer.writerow([i[0].replace('\n',' ') for i in cur.description])
Related
I am making a program that fetches column names and dumps the data into csv format.
Now everything is working just fine and data is being dumped into csv, the problem is,
I am not able to fetch headers into csv. If I open the exported csv file into excel, only data shows up not the column headers. How do I do that?
Here's my code:
import cx_Oracle
import csv
dsn_tns = cx_Oracle.makedsn(--Details--)
conn = cx_Oracle.connect(--Details--)
d = conn.cursor()
csv_file = open("profile.csv", "w")
writer = csv.writer(csv_file, delimiter=',', lineterminator="\n", quoting=csv.QUOTE_NONNUMERIC)
d.execute("""
select * from all_tab_columns where OWNER = 'ABBAS'
""")
tables_tu = d.fetchall()
for row in tables_tu:
writer.writerow(row)
conn.close()
csv_file.close()
What code do I use to export headers too in csv?
Place this just above your for loop:
writer.writerow(i[0] for i in d.description)
Because d.description is a read-only attribute containing 7-tuples that look like:
(name,
type_code,
display_size,
internal_size,
precision,
scale,
null_ok)
I am running SQL query from python API and want to collect data in Structured(column-wise data under their header).CSV format.
This is the code so far I have.
sql = "SELECT id,author From researches WHERE id < 20 "
cursor.execute(sql)
data = cursor.fetchall()
print (data)
with open('metadata.csv', 'w', newline='') as f_handle:
writer = csv.writer(f_handle)
header = ['id', 'author']
writer.writerow(header)
for row in data:
writer.writerow(row)
Now the data is being printed on the console but not getting in .CSV file this is what I am getting as output:
What is that I am missing?
Here is a simple example of what you are trying to do:
import sqlite3 as db
import csv
# Run your query, the result is stored as `data`
with db.connect('vehicles.db') as conn:
cur = conn.cursor()
sql = "SELECT make, style, color, plate FROM vehicle_vehicle"
cur.execute(sql)
data = cur.fetchall()
# Create the csv file
with open('vehicle.csv', 'w', newline='') as f_handle:
writer = csv.writer(f_handle)
# Add the header/column names
header = ['make', 'style', 'color', 'plate']
writer.writerow(header)
# Iterate over `data` and write to the csv file
for row in data:
writer.writerow(row)
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from urllib.parse import quote_plus
params = quote_plus(r'Driver={SQL Server};Server=server_name; Database=DB_name;Trusted_Connection=yes;')
engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
sql_string = '''SELECT id,author From researches WHERE id < 20 '''
final_data_fetch = pd.read_sql_query(sql_string, engine)
final_data_fetch.to_csv('file_name.csv')
Hope this helps!
with mysql - export csv with mysqlclient library - utf8
import csv
import MySQLdb as mariadb;
import sys
tablelue="extracted_table"
try:
conn = mariadb.connect(
host="127.0.0.1",
port=3306,
user="me",
password="mypasswd",
database="mydb")
cur = conn.cursor()
instruction="show columns from " + tablelue
cur.execute(instruction)
myresult = cur.fetchall()
work=list()
i=0
for x in myresult:
work.append(x[0])
i=i+1
wsql = "SELECT * FROM " + tablelue
cur.execute(wsql)
wdata = cur.fetchall()
# Create the csv file
fichecrit=tablelue+".csv"
with open(fichecrit, 'w', newline='', encoding="utf8") as f_handle:
writer = csv.writer(f_handle,delimiter=";")
# Add the header/column names
header = work
writer.writerow(header)
# Iterate over `data` and write to the csv file
for row in wdata:
writer.writerow(row)
conn.close()
except Exception as e:
print(f"Error: {e}")
sys.exit(0)
You can dump all results to the csv file without looping:
data = cursor.fetchall()
...
writer.writerows(data)
I was trying for importing data from the csv file to the sqlite db using python script.
I have a DB table with the name NSETCS:
import sqlite3
import csv
with open (r'F:\mypractise_python\day11\NSE-TCS.csv','r') as infile:
content = [csv.DictReader(infile, delimiter=',')] # read the whole file at once
try:
db = sqlite3.connect('NSETCS')
cursor = db.cursor()
for line in content:
date = line['Date']
open_stock = float(line['Open'])
high = float(line['High'])
low = float(line['Low'])
last= float(line['Last'])
close= float(line['Close'])
tot_trade_qt= float(line['TotTrQt'])
turnover= float(line['Turnover (Lacs)'])
cursor.execute('''insert into NSETCS values (:date, :open_stock, :high, :low, :last, :close, :tot_trade_qt, :turnover)''',\
{'date':date, 'open_stock':open_stock, 'high':high, 'low':low, 'last':last, 'close':close,\
'tot_trade_qt':tot_trade_qt, 'turnover':turnover})
except Exception as E:
print "Error:", E
else:
db.commit()
db.close()
On running the code, I get an error:
Error: DictReader instance has no attribute __getitem__
You are not reading the contents into a list here:
content = [csv.DictReader(infile, delimiter=',')] # read the whole file at once
That only puts the reader into a list. Use list() instead:
content = list(csv.DictReader(infile, delimiter=','))
Not that you need to do this; you could just loop over the object directly:
reader = csv.DictReader(infile, delimiter=',')
then
for line in reader:
Do indent the code that loops to be part of the with statement, because you need to keep the file open to read until the loop is done:
with open (r'F:\mypractise_python\day11\NSE-TCS.csv','r') as infile:
reader = csv.DictReader(infile, delimiter=',')
try:
db = sqlite3.connect('NSETCS')
cursor = db.cursor()
for line in reader:
# ...
I'd not catch exceptions; just use the connection as a context manager (just like you do for the file) so that your inserts are committed only if no exceptions took place:
import sqlite3
import csv
db = sqlite3.connect('NSETCS')
# use both file and database connection as context managers
with open (r'F:\mypractise_python\day11\NSE-TCS.csv','r') as infile, db:
reader = csv.DictReader(infile, delimiter=',')
cursor = db.cursor()
for line in reader:
line['turnover'] = line['Turnover (Lacs)']
cursor.execute('''
insert into NSETCS values (
:Date, :Open, :High, :Low, :Last, :Close,
:TotTrQt, :turnover)
''', line)
sqlite doesn't really care if you pass in float() objects, or str(); the database is happy to accept either for numeric columns. You can use your line dictionaries directly if you don't convert to float(), except for the Turnover (Lacs) column (I used an alias in the dictionary for that one). This simplifies your loop somewhat.
I am trying to connect to oracle table and execute a sql. I need to export result set to a csv file. My code is below:
import pyodbc
import csv
cnxn = pyodbc.connect("DSN=11g;UID=test101;PWD=passwd")
cursor = cnxn.cursor()
cursor.execute(sql)
row = cursor.fetchall()
with open('data.csv', 'w', newline='') as fp:
a = csv.writer(fp, delimiter=',')
for line in row:
a.writerows(line)
cursor.close()
when I do print to line within for loop, I get something like this:
('Production', 'farm1', 'dc1prb01', 'web')
('Production', 'farv2', 'dc2pr2db01', 'app.3')
('Production', 'farm5', 'dc2pr2db02', 'db.3')
this is not working. Any ideas what I might be missing?
It would be writerow for a single row:
a.writerow(line)
writerows expects an iterable of iterables, so it will iterate over the substrings writing each char individually.
If you want to use writerows call it on row:
row = cursor.fetchall()
with open('data.csv', 'w', newline='') as fp:
a = csv.writer(fp, delimiter=',')
a.writerows(row)
If you are using python2 remove newline='', newline is a *python*3 keyword:
row = cursor.fetchall()
with open('data.csv', 'w') as fp:
a = csv.writer(fp, delimiter=',')
a.writerows(row)
here is what I try to achieve my current code is working fine I get the query to run on my sql server but I will need to gather information from several servers. How would I add a column with the dbserver listed in that column?
import pyodbc
import csv
f = open("dblist.ini")
dbserver,UID,PWD = [ variable[variable.find("=")+1 :] for variable in f.readline().split("~")]
connectstring = "DRIVER={SQL server};SERVER=" + dbserver + ";DATABASE=master;UID="+UID+";PWD="+PWD
cnxn = pyodbc.connect(connectstring)
cursor = cnxn.cursor()
fd = open('mssql1.txt', 'r')
sqlFile = fd.read()
fd.close()
cursor.execute(sqlFile)
with open("out.csv", "wb") as csv_file:
csv_writer = csv.writer(csv_file, delimiter = '!')
csv_writer.writerow([i[0] for i in cursor.description]) # write headers
csv_writer.writerows(cursor)
You could add the extra information in your sql query. For example:
select "dbServerName", * from table;
Your cursor will return with an extra column in front of your real data that has the db Server name. The downside to this method is you're transferring a little more extra data.