Appending data from excel in existing SQL Server table using python - python

I have some CSV files with data which is recurring and therefore I need to update SQL Server by using this python script.
I have tried updating the Microsoft driver for SQL and that doesn't help me.
Here is my python code :
import pandas as pd
import numpy as np
import seaborn as sns
import scipy.stats as stats
import matplotlib.pyplot as plt
from datetime import time
from datetime import date
import pandas.io.sql
import pyodbc
import xlrd
server ='asd'
db = 'asd'
conn = pyodbc.connect('DRIVER={SQL Server};SERVER=' + server + ';DATABASE=' + db + ';UID=asd ;PWD=asd')
cursor=conn.cursor()
query = """
INSERT INTO Db.table (
Emp_ID ,
Global_ID,
Emp_NAME,
Org,
SBU,
BU,
Sub_BU,
HR_Location,
Swipe_Loc,
Descp,
InOutDate,
InTime,
OutTime,
ActHrs,
ShiftCode,
AttendanceClassification,
ActualHrs
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"""
InOutDate= date.today()
InTime = time(11,11,11)
OutTime = time(11,11,11)
ActHrs = time(11,11,11)
ActualHrs = time(11,11,11)
values = ('2134123', '123213', 'Eqqwe', 'Org' , 'SBU' , 'BU ', 'Sub_BU' , 'HR_Location' ,'Swipe_Loc' ,' Descp' , InOutDate , InTime , OutTime , ActHrs , 'ShiftCode' ,'AttendanceClassification' ,ActualHrs )
cursor.execute(query, values)
conn.close()
Getting the following error when executing query:
Traceback (most recent call last):
File "update.py", line 97, in <module>
cursor.execute(query, values)
pyodbc.Error: ('HYC00', '[HYC00] [Microsoft][ODBC SQL Server Driver]Optional feature not implemented (0) (SQLBindParameter)')

Make sure the DateTime formats are compatible between python and SQL

You forgot to add cursor.commit() after execute. Execute command can be used only for some selects and read only queries. If you want to change something you shoud add cursor.commit() after.

Related

Problem while trying to insert multiple values to Sqlite database

I have to make a request to a Brazil ZIPCODES API to get JSON data and insert it on a sqlite database using python. I'm currenctly using pycharm but I need to insert a lot of columns, but somehow the code don't insert the values. Here's the code
import requests
import sqlite3
import json
CEPC = input("Please type the zipcode:")
print("Identifying the ZIP CODE")
Requisicao = requests.get(f"https://viacep.com.br/ws/{CEPC}/json")
if Requisicao.status_code == 200:
data = Requisicao.json()
# Database
con = sqlite3.connect("Banco de dados/CEPS.db")
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS Requisicao")
cur.execute("CREATE TABLE Requisicao (cep, logradouro, bairro, uf, ddd, siafi,
validation, created json)")
cur.executemany("insert into Requisicao values (?, ?, ?, ?, ?, ?, ?, ?)", (data["cep"],
json.dumps(data)))
con.commit()
con.close()
else:
print(f"Request failed with status code {Requisicao.status_code} ")
The outpout of the zipcode is:
{
"cep": "05565-000",
"logradouro": "Avenida General Asdrúbal da Cunha",
"complemento": "",
"bairro": "Jardim Arpoador",
"localidade": "São Paulo",
"uf": "SP",
"ibge": "3550308",
"gia": "1004",
"ddd": "11",
"siafi": "7107"
}
I need to insert all of these columns: "cep, logadouro, complemento, bairro, localidade, uf, ibge, gia, ddd, siafi".When I try to run the code, It gives me the error:
Traceback (most recent call last):
File "C:\Users\Gui\PycharmProjects\pythonProject\main.py", line 19, in <module>
cur.executemany("insert into Requisicao values (?, ?, ?, ?, ?, ?, ?, ?)", (data["cep"],
json.dumps(data)))
sqlite3.ProgrammingError: Incorrect number of bindings supplied. The current statement
uses 8, and there are 9 supplied
When I try to put the exact same value of columns with the "?", the errors says that "uses 8, and there are 7 supplied.
This code will insert all 10 values from the JSON into the table Requisicao and 0 for both validation and created, though that can be changed.
import requests
import sqlite3
import json
CEPC = input("Please type the zipcode:")
print("Identifying the ZIP CODE")
Requisicao = requests.get(f"https://viacep.com.br/ws/{CEPC}/json")
if Requisicao.status_code == 200:
data = Requisicao.json()
# Database
con = sqlite3.connect("CEPS.db")
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS Requisicao")
cur.execute("CREATE TABLE Requisicao (cep,logradouro,complemento,bairro,localidade,uf,ibge,gia,ddd,siafi, validation, created)")
cur.execute("insert into Requisicao values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",tuple(data.values())+(0, 0))
con.commit()
con.close()
else:
print(f"Request failed with status code {Requisicao.status_code} ")

Error using method and chunksize in pandas to_sql

I am trying to speed up the pandas .to_sql() function as it currently takes ~30 mins to dump a table of 22 columns and 100K rows to a MS SQL Server Db. I've tried using the method='multi' and chunksize=1000 (I've read is the max for sql server) but getting the following error, a bunch of ?s in the error, and my data in the [parameters: section of the error:
DBAPIError: (pyodbc.Error) ('07002', '[07002] [Microsoft][ODBC Driver 17 for SQL Server]COUNT field incorrect or syntax error (0) (SQLExecDirectW)')
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
[parameters: (one big tuple)]
Here is the code I am using:
user_name = 'username'
cred = open('filename', 'r').read()
server_name = 'XXXXXXXX'
port = 'XXXX'
DB = 'database'
driver = 'ODBC Driver 17 for SQL Server'
conn = create_engine('mssql+pyodbc://'+user_name+':'+cred+'#'+server_name+':'+port+'/'+DB+'?driver='+driver)
df.to_sql('test_table', con=conn, if_exists='replace', schema='dbo', method='multi', chunksize= 1000)
Any ideas on what is happening here or another alternative to speed this up?

Returning "KeyError: " in Python when trying to populate a SQlite database

I am currently getting the following error:
Traceback (most recent call last):
File "/Users/Stephen/Desktop/projects/Option_History/snapshot.py", line 44, in <module>
data = (current_timestamp, option['underlying'], option['symbol'], option['description'], option['strike'], option['bid'], option['ask'], option['volume'], option['greeks']['delta'], option['greeks']['gamma'], option['greeks']['theta'], option['greeks']['vega'], option['greeks']['rho'], option['greeks']['phi'], option['greeks']['mid_iv'], option['greeks']['smv_vol'])
KeyError: 'greeks'
My code is shown below:
import config, requests, pprint, sqlite3
from datetime import datetime
connection = sqlite3.connect('option_history.db')
cursor = connection.cursor()
try:
cursor.execute("""
CREATE TABLE option_history (
timestamp text,
underlying text,
symbol text,
description text,
strike real,
bid real,
ask real,
volume real,
delta real,
gamma real,
theta real,
vega real,
rho real,
phi real,
mid_iv real,
smv_vol real
)
""")
except:
pass
response = requests.get(config.OPTION_CHAIN_URL,
params={'symbol': 'SPY', 'expiration': '2020-12-04', 'greeks': 'true'},
headers=config.HEADERS
)
json_response = response.json()
options = json_response['options']['option']
current_timestamp = datetime.now().replace(second=0, microsecond=0).isoformat()
print(options)
for option in options:
data = (current_timestamp, option['underlying'], option['symbol'], option['description'], option['strike'], option['bid'], option['ask'], option['volume'], option['greeks']['delta'], option['greeks']['gamma'], option['greeks']['theta'], option['greeks']['vega'], option['greeks']['rho'], option['greeks']['phi'], option['greeks']['mid_iv'], option['greeks']['smv_vol'])
print(",".join(map(str, data)))
cursor.execute("""
INSERT INTO option_history (
timestamp, underlying, symbol, description, strike, bid, ask, volume, delta, gamma, theta, vega, rho, phi, mid_iv, smv_vol
)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""", data)
connection.commit()
connection.close()
I was running this code for an entire day every 5 minutes using cron and it properly inserted all the data I needed with no errors. Now when attempting to run it again today using crontab, the DB was not populated with any new data.
The interesting part is that even though I am getting the KeyError message, the data variable is still populated properly because the
print(",".join(map(str, data))
line is updating properly with a new API call of data and printing to the console without issue.

Python insert to sql server from multiple csv

Newbie here, trying to import from multiple csv to sql server, the code did run but no data inserted into sql database.
Attached is my code. Maybe the error is lie on the loop.
Please help.
import csv
import pyodbc as p
import os
# Database Connection Info
server = "cld-077\eform"
database = "E-form"
username = "wsmeform"
password = "M1loA1s!"
connStr = (
'DRIVER={ODBC Driver 13 for SQL Server};SERVER=' + server + ';DATABASE=' + database + ';UID=' + username + ';PWD=' + password)
# Open connection to SQL Server Table
conn = p.connect(connStr)
# Get cursor
cursor = conn.cursor()
# Assign path to Excel files
print("Inserting!")
folder_to_import = 'C:/Users/ck.law/Desktop/VBFU_NOV/'
print("path")
l_files_to_import = os.listdir(folder_to_import)
print("inside loop")
for file_to_import in l_files_to_import:
if file_to_import.endswith('.csv'):
csv_files = os.path.join(folder_to_import, file_to_import)
csv_data = csv.reader(csv_files)
for row in csv_data:
if len(row) >= 19:
cursor.execute(
"INSERT INTO VesselBFUData(ShortCode,DocDT,PostDT,DocNo,LineItm,GlCode,ExpType,InvRef,VBaseCurrcy,VBaseAmt,DocCurrcy,DocAmt,VendorCode,Description,InvFilePath,InvCreateDT,InvAppvDT,InvArriDT,PoRef)" " VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
row)
print("Loop!")
cursor.close()
conn.commit()
conn.close()
print("Script has successfully run!")

Python script write multiple CSV files into SQL Server table error

i'm trying to write an entire folder of CSV files into a SQL Server Table.
I'm getting the following error, and i'm really stumped:
Traceback (most recent call last):
File "C:\\Projects\Import_CSV.py", line 37, in <module>
cursor.execute("INSERT INTO HED_EMPLOYEE_DATA(Company, Contact, Email, Name, Address, City, CentralCities, EnterpriseZones, NEZ, CDBG)" "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", row)
DataError: ('22001', '[22001] [Microsoft][SQL Server Native Client 10.0][SQL Server]String or binary data would be truncated. (8152) (SQLExecDirectW); [01000] [Microsoft][SQL Server Native Client 10.0][SQL Server]The statement has been terminated. (3621)')
I'm not sure what's wrong in my code. I also need it to skip the first row in the CSV files as that is the header row. Any help would be greatly appreciated. Thank you.
# Import arcpy module
import csv
import arcpy
import pyodbc as p
import os
# Database Connection Info
server = "myServer"
database = "myDB"
connStr = ('DRIVER={SQL Server Native Client 10.0};SERVER=' + server + ';DATABASE=' + database + ';' + 'Trusted_Connection=yes')
# Open connection to SQL Server Table
conn = p.connect(connStr)
# Get cursor
cursor = conn.cursor()
# Assign path to Excel files
folder_to_import = "\\\\Server\\HED_DATA_CSV"
l_files_to_import = os.listdir(folder_to_import)
for file_to_import in l_files_to_import:
if file_to_import.endswith('.CSV'):
csv_files = os.path.join(folder_to_import, file_to_import)
csv_data = csv.reader(file(csv_files))
for row in csv_data:
cursor.execute("INSERT INTO HED_EMPLOYEE_DATA(Company, Contact, Email, Name, Address, City, CentralCities, EnterpriseZones, NEZ, CDBG)" "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", row)
cursor.close()
conn.commit()
conn.close()
print"Script has successfully run!"
You can skip the first line this way:
csv_data.next() #throw away first row
for row in csv_data:
if len(row) >= 10:
cursor.execute("INSERT ..." ...)
Also, you should check to make sure that row contains enough elements before executing:
if len(row) >= 10: #use first ten values in row, if there are at least ten
cursor.execute("INSERT ...", row[:10])
You currently have your insert statement listed as two strings next to each other. This has the effect of joining them together with no space in between. You may want a space before "VALUES".

Categories