I am running SQL query from python API and want to collect data in Structured(column-wise data under their header).CSV format.
This is the code so far I have.
sql = "SELECT id,author From researches WHERE id < 20 "
cursor.execute(sql)
data = cursor.fetchall()
print (data)
with open('metadata.csv', 'w', newline='') as f_handle:
writer = csv.writer(f_handle)
header = ['id', 'author']
writer.writerow(header)
for row in data:
writer.writerow(row)
Now the data is being printed on the console but not getting in .CSV file this is what I am getting as output:
What is that I am missing?
Here is a simple example of what you are trying to do:
import sqlite3 as db
import csv
# Run your query, the result is stored as `data`
with db.connect('vehicles.db') as conn:
cur = conn.cursor()
sql = "SELECT make, style, color, plate FROM vehicle_vehicle"
cur.execute(sql)
data = cur.fetchall()
# Create the csv file
with open('vehicle.csv', 'w', newline='') as f_handle:
writer = csv.writer(f_handle)
# Add the header/column names
header = ['make', 'style', 'color', 'plate']
writer.writerow(header)
# Iterate over `data` and write to the csv file
for row in data:
writer.writerow(row)
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from urllib.parse import quote_plus
params = quote_plus(r'Driver={SQL Server};Server=server_name; Database=DB_name;Trusted_Connection=yes;')
engine = create_engine("mssql+pyodbc:///?odbc_connect=%s" % params)
sql_string = '''SELECT id,author From researches WHERE id < 20 '''
final_data_fetch = pd.read_sql_query(sql_string, engine)
final_data_fetch.to_csv('file_name.csv')
Hope this helps!
with mysql - export csv with mysqlclient library - utf8
import csv
import MySQLdb as mariadb;
import sys
tablelue="extracted_table"
try:
conn = mariadb.connect(
host="127.0.0.1",
port=3306,
user="me",
password="mypasswd",
database="mydb")
cur = conn.cursor()
instruction="show columns from " + tablelue
cur.execute(instruction)
myresult = cur.fetchall()
work=list()
i=0
for x in myresult:
work.append(x[0])
i=i+1
wsql = "SELECT * FROM " + tablelue
cur.execute(wsql)
wdata = cur.fetchall()
# Create the csv file
fichecrit=tablelue+".csv"
with open(fichecrit, 'w', newline='', encoding="utf8") as f_handle:
writer = csv.writer(f_handle,delimiter=";")
# Add the header/column names
header = work
writer.writerow(header)
# Iterate over `data` and write to the csv file
for row in wdata:
writer.writerow(row)
conn.close()
except Exception as e:
print(f"Error: {e}")
sys.exit(0)
You can dump all results to the csv file without looping:
data = cursor.fetchall()
...
writer.writerows(data)
Related
I have a database in MS Access. I am trying to export one column from one table to a CSV file, with Python using pypyodbc.
From the CSV file obtained, there are no commas in numbers greater than 1. Any idea to solve?
Screen from MS Access:
MS Access database
Screen from the obtained CSV:
CSV
Code:
import pypyodbc
import csv
import os
from pathlib import Path
import re
data_folder1 = Path("/Users/sfulc/Desktop/FileProva/")
data_folder2 = Path("/Users/sfulc/Desktop/FileOutput/")
for filename in os.listdir("/Users/sfulc/Desktop/FileProva/"):
file1 = r"Dbq=" + os.path.abspath(data_folder1 / filename) + r";"
file2 = re.sub("mdb", "csv", os.path.abspath(data_folder2 / filename))
pypyodbc.lowercase = False
conn = pypyodbc.connect(r"Driver={Microsoft Access Driver (*.mdb, *.accdb)};" + file1)
cur = conn.cursor()
cur.execute("SELECT LoadValue FROM OriginalData");
with open(file2, 'w', newline='') as f:
writer = csv.writer(f)
for row in cur.fetchall():
writer.writerow(row)
cur.close()
conn.close()
import csv
import psycopg2
conn = psycopg2.connect(database=" ", user=" ", password=" ", host=" ", port= )
cur = conn.cursor()
with open('21.csv', 'r') as f:
next(f)
cur.copy_from(f, 'temp_questions', sep=',')
conn.commit()
i have try to insert data into my db i got error:
cur.copy_from(f, 'temp_questions', sep=',')
psycopg2.errors.QueryCanceled: COPY from stdin failed: error in .read() call: exceptions.ValueError Mixing iteration and read methods would lose data
CONTEXT: COPY temp_questions, line 1
in my csv file -i have 18 column and
table(database)- id with 18 column
i don't know how to insert data
import csv
db=conn.connect('test.db')
print("connected succesfully")
csv_file="test.csv"
with open(csv_file,'r') as csv_file:
csvreader=csv.reader(csv_file)
fields=next(csvreader)
sql_insert_query='INSERT INTO Test (name,age) VALUES(?,?)'
db.executemany(sql_insert_query, csvreader)
print("inserted")
data=db.execute("SELECT * FROM Test")
for i in data:
print(i)
Read the data from csv file and use executemany to insert an array of elements to the database.
I am trying to read a file which contains a list of table_names and I want to execute a simple query:
SELECT *
FROM $TABLE_NAME
from each SQL Server database.
The results of which I need to store in a separate .csv file.
Can you please help how to achieve this?
You have to read data from server and write into csv:
get data from sql:
import pyodbc
import csv
mydb = pyodbc.connect("Driver={SQL Server Native Client 11.0};"
"Server=Server;"
"Database=Database;"
"uid=username;pwd=password")
cursor = mydb.cursor()
sql = """SELECT * FROM $TABLE_NAME"""
cursor.execute(sql)
row = cursor.fetchall()
write data into csv:
with open('test.csv', 'w', newline= '') as f:
a = csv.writer(f, delimiter=',')
a.writerow(["Header 1", "Header 2"]) ## etc
a.writerows(row)
Give this code a try.
import pyodbc
import csv
# SQL Server Connection settings
conn = pyodbc.connect("Driver={SQL Server Native Client 11.0};"
"Server=server;"
"Database=dbName;"
"uid=User;pwd=password"
"Trusted_Connection=yes;")
cursor = conn.cursor()
inputFile= open("absolute_inputfile_path","w+")
outputDataLocation="absolute_outputfile_path"
# Reading inout file line by line, assuming each line is a table name
line = inputFile.readline()
while line:
tableName = line
line = f.readline()
query = "SELECT * FROM " + str(tableName)
# Read query data
cursor.execute(query)
rows = cursor.fetchall()
# Write to File as CSV
fileWriter = open(outputDataLocation + "/" + str(tableName), 'w')
myFile = csv.writer(fileWriter)
myFile.writerows(rows)
fileWriter.close()
inputFile.close()
I am running SQL query from python API and want to collect data in Structured(column-wise data under their own header).CSV format.
This is the code so far I have.
import pymysql.cursors
import csv
conn = pymysql.connect(host='159.XXX.XXX.XXX',user='proXXX',password='PXX',db='pXX',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cursor = conn.cursor()
print (type(conn))
sql = "SELECT id,author From researches WHERE id < 20 "
cursor.execute(sql)
data = cursor.fetchall()
print (data)
with open('metadata.csv', 'w', newline='') as f_handle:
writer = csv.writer(f_handle,delimiter=',')
header = ['id', 'author']
writer.writerow(header)
for row in data:
writer.writerow(row)
Now the data is being printed on the console but not getting in.CSV file this is what I am getting asnoutput. What is that I am missing? Please help.
with open('metadata.csv', 'w', newline='') as f_handle:
fieldnames = ['id', 'author']
writer = csv.DictWriter(f_handle, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
So the thing is, your data is in the form of dictionaries, while the Writer object expects tuples. You should be using the DictWriter object instead.
I want to insert the data in my CSV file into the table that I created before.
so lets say I created a table named T
the csv_file is the following:
Last,First,Student Number,Department
Gonzalez,Oliver,1862190394,Chemistry
Roberts,Barbara,1343146197,Computer Science
Carter,Raymond,1460039151,Philosophy
Building on what was shared by Mumpo.
This has worked for me when inserting a CSV to SQL Server. You just need to provide your connection details, filepath, and the table you want to write to. The only caveat is your table must already exist, as this code will insert a CSV to an existing table.
import pyodbc
import csv
# DESTINATION CONNECTION
drivr = ""
servr = ""
db = ""
username = ""
password = ""
my_cnxn = pyodbc.connect('DRIVER={};SERVER={};DATABASE={};UID={};PWD={}'.format(drivr,servr,db,username,password))
my_cursor = cnxn.cursor()
def insert_records(table, yourcsv, cursor, cnxn):
#INSERT SOURCE RECORDS TO DESTINATION
with open(yourcsv) as csvfile:
csvFile = csv.reader(csvfile, delimiter=',')
header = next(csvFile)
headers = map((lambda x: x.strip()), header)
insert = 'INSERT INTO {} ('.format(table) + ', '.join(headers) + ') VALUES '
for row in csvFile:
values = map((lambda x: "'"+x.strip()+"'"), row)
b_cursor.execute(insert +'('+ ', '.join(values) +');' )
b_cnxn.commit() #must commit unless your sql database auto-commits
table = <sql-table-here>
mycsv = '...T.csv' # SET YOUR FILEPATH
insert_records(table, mycsv, my_cursor, my_cnxn)
cursor.close()