I created a table with mysql.connector like this:
CREATE TABLE %s (ID int NOT NULL AUTO_INCREMENT,
date VARCHAR(200) NOT NULL default'{}',
s_time VARCHAR(30) NOT NULL default'{}',
shukkin VARCHAR(30) NOT NULL default'{}',
taikin VARCHAR(30) NOT NULL default'{}',
t_time VARCHAR(30) NOT NULL default'{}',
shucchou VARCHAR(30) NOT NULL default'{}',
shucchou_time VARCHAR(30) NOT NULL default'{}',
shucchou_kaeri_time VARCHAR(30) NOT NULL default'{}',
PRIMARY KEY (ID))" %val_s
And I'm trying to insert there and date now with this code block
now2 = datetime.datetime.now()
now = now2.strftime("%m/%d/%Y")
but when I insert to date VARCHAR(200) it becomes something like this
0.000742942050520059
And I dont know where is a problem... I tried inserting directly like this 06/04/2019 but when I selected * from table it shows same number as above.
Can someone please tell me where is a problem?
now2 = datetime.datetime.now()
now = now2.strftime("%m/%d/%Y")
now_t = now2.strftime("%H:%M:%S")
# For showing image of above settings --OPTION--
# show the output image
#cv2.imshow("Image", image)
#cv2.waitKey(0)
# SQL for "shukkin"
try:
connection = mysql.connector.connect(host='localhost', database='ninsho', user='root', password='0308', unix_socket="/var/run/mysqld/mysqld.sock")
cursor = connection.cursor()
valler = name.copy()
val_s = valler.replace(" ", "")
stmt = "SHOW TABLES LIKE '%s'" %val_s
cursor.execute(stmt)
result = cursor.fetchone()
if result:
print("je")
dates = now
# print ("date=", dates, "now=", now)
# Check if there is record from today ("shukkin")
query = "SELECT date FROM %s WHERE date = %s AND shukkin = %s" % (val_s, dates, str("'"+name+"'"))
try:
# print("rorororo")
cursor.execute(query)
myresult = cursor.fetchall()
# print(myresult)
for x in myresult:
#print("ttt")
a = x[0]
print(a)
if a == now:
# If there is record from today - Update it
names.set(name + "さん" + "\n" + "エラー:もう登録済")
memo.set("今日はすでに出勤を登録しました")
# If there is no record from today - Create it
else:
now2 = datetime.datetime.now()
now = now2.strftime("%m/%d/%Y")
val = name
val_s = val.replace(" ", "")
sql_insert_query = "INSERT INTO `%s`(`date`, `s_time`, `shukkin`) VALUES (%s, %s, %s)" % (val_s, now, now_t, name)
cursor = connection.cursor()
result = cursor.execute(sql_insert_query)
connection.commit()
#print ("Record inserted successfully into table")
except:
print("except")
now2 = datetime.datetime.now()
now3 = now2.strftime("%m/%d/%Y")
val = name
val_s = val.replace(" ", "")
sql_insert_query2 = "INSERT INTO `%s`(`date`, `s_time`, `shukkin`) VALUES (%s, %s, %s)" % (val_s, now3, str("'"+now_t+"'"), str("'"+name+"'"))
print(val_s, now3, now_t, name)
cursor = connection.cursor()
result = cursor.execute(sql_insert_query2)
print("except2")
connection.commit()
else:
print("nieje")
val = name
val_s = val.replace(" ", "")
query = "CREATE TABLE %s (ID int NOT NULL AUTO_INCREMENT, date VARCHAR(200) NOT NULL default'{}', s_time VARCHAR(30) NOT NULL default'{}', shukkin VARCHAR(30) NOT NULL default'{}', taikin VARCHAR(30) NOT NULL default'{}', t_time VARCHAR(30) NOT NULL default'{}', shucchou VARCHAR(30) NOT NULL default'{}', shucchou_time VARCHAR(30) NOT NULL default'{}', shucchou_kaeri_time VARCHAR(30) NOT NULL default'{}', PRIMARY KEY (ID))" %val_s
cursor.execute(query)
myresult = cursor.fetchall()
gettr()
except mysql.connector.Error as error :
connection.rollback() #rollback if any exception occured
#print("Failed inserting record into table {}".format(error))
finally:
if(connection.is_connected()):
cursor.close()
connection.close()
#print("MySQL connection is closed")
Related
I have just created a new column, Id in DB Browser sqlite. I am not sure how am I suppose to code this portion in App.py. Should I use a id = request.form['id]?
App.py
#app.route('/addrec', methods=['POST', 'GET'])
def addrec():
if request.method == 'POST':
id =
use = session['user'].get("name")
ema = session['user'].get("preferred_username")
type = request.form['type']
uploadre = request.form['uploadre']
amt = request.form['amt']
description = request.form['description']
if request.form.get("price"):
price_checked = "Yes"
else:
price_checked = "No"
conn = sql.connect(db_path)
c = conn.cursor()
c.execute(
"INSERT INTO SubmitClaim VALUES (?,?,?,?,?,?,?,?)", (id, use,ema, type, uploadre, amt,price_checked, description))
conn.commit()
c.execute("SELECT * FROM SubmitClaim")
print(c.fetchall())
conn.close()
return render_template('base.html', user=session["user"], version=msal.__version__)
This is my table in DB Browswer Sqlite
CREATE TABLE "SubmitClaim" (
"id" INTEGER,
"Name" TEXT NOT NULL,
"Email" TEXT NOT NULL,
"ClaimType" TEXT NOT NULL,
"UploadReceipt" TEXT NOT NULL,
"ClaimAmount" INTEGER NOT NULL,
"checkbox" TEXT NOT NULL,
"ClaimDescription" TEXT NOT NULL,
PRIMARY KEY("id")
)
Here's how you set up an integer primary key column in sqlite and then insert and select from it:
import sqlite3
conn = sqlite3.connect('test.db')
conn.execute('''
CREATE TABLE SubmitClaim (
Id INTEGER PRIMARY KEY NOT NULL,
Name NVARCHAR NOT NULL,
Email NVARCHAR NOT NULL,
ClaimType NVARCHAR NOT NULL,
UploadReceipt NVARCHAR NOT NULL,
ClaimAmount INTEGER NOT NULL,
Checkbox NVARCHAR NOT NULL,
ClaimDescription NVARCHAR NOT NULL
)
''')
conn.commit()
conn.execute("INSERT INTO SubmitClaim (Name, Email, ClaimType, UploadReceipt, ClaimAmount, Checkbox, ClaimDescription) VALUES ('Foo Bar', 'foo#bar.com', 'A', 'Blah', 10, 'Checked', 'Description goes here')")
conn.commit()
cursor = conn.execute('SELECT * FROM SubmitClaim')
for row in cursor:
print(row)
And here's a colab notebook demonstration: https://colab.research.google.com/drive/1OhV9lWSBxLpOv45bNKmtRx9H0j6BZ-S3?usp=sharing
So your code sample above becomes:
#app.route('/addrec', methods=['POST', 'GET'])
def addrec():
if request.method == 'POST':
use = session['user'].get("name")
ema = session['user'].get("preferred_username")
type = request.form['type']
uploadre = request.form['uploadre']
amt = request.form['amt']
description = request.form['description']
if request.form.get("price"):
price_checked = "Yes"
else:
price_checked = "No"
conn = sql.connect(db_path)
conn.execute('''
INSERT INTO SubmitClaim
(Name, Email, ClaimType, UploadReceipt, ClaimAmount, Checkbox, ClaimDescription)
VALUES (?,?,?,?,?,?,?)''',
(use, ema, type, uploadre, amt, price_checked, description))
conn.commit()
c = conn.execute("SELECT * FROM SubmitClaim")
print(c.fetchall())
conn.close()
return render_template('base.html', user=session["user"], version=msal.__version__)
I have a simplified postgres (ver 13) table below with updated rows generated in python with psycopg2.
My question is when I update the price field in the rows, I can't complete the update because of the following errors of ON CONFLICT DO UPDATE. If I don't use ON CONFLICT DO UPDATE , I can update the chart but I would like ON CONFLICT DO UPDATE because it eliminates duplicate rows.
With ON CONFLICT DO UPDATE , I only need to update the fields "price" and "last_updated" but update only when the rows match the "id,item,original_price_date"
The following errors I get ON CONFLICT DO UPDATE :
Error : syntax error at or near "="
# update the prices within the existing data
df = pd.DataFrame(np.array([['5/3/2010', 'rock', 15],
['4/15/2010', 'paper', 11],
['2/3/2015', 'scissor', 13]]),
columns = ['original_price_date', 'item', 'price'])
tuples_for_dB = [tuple(x) for x in df.to_numpy()]
sql_script = '''INSERT INTO ''' + TABLE_ + ''' (
original_price_date, item, price, created_date, last_updated)
VALUES (%s, %s, %s, transaction_timestamp(), transaction_timestamp())
ON CONFLICT (id, item, original_price_date)
DO UPDATE SET (price, last_updated = EXCLUDED.price, EXCLUDED.transaction_timestamp());'''
Error : relation "price_data" does not exist
sql_script = '''INSERT INTO ''' + TABLE_ + ''' (
original_price_date, item, price, created_date, last_updated)
VALUES (%s, %s, %s, transaction_timestamp(), transaction_timestamp())
ON CONFLICT (id, item, original_price_date)
DO UPDATE SET (price, last_updated) = (EXCLUDED.price, EXCLUDED.transaction_timestamp());'''
My original creation of the data :
# postGRESQL connection details
DATABASE_INITIAL_ = 'postgres'
DATABASE_ = 'data'
TABLE_ = 'price_data'
USER_ = 'postgres'
SERVERNAME_ = 'localhost'
PASSWORD_ = password_
HOST_ = '127.0.0.1'
PORT_ = '5432'
#establishing the connection
conn = psycopg2.connect(database = DATABASE_,
user = USER_,
password = PASSWORD_,
host = HOST_,
port = PORT_);
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT);
conn.autocommit = True
# Creating a cursor object using the cursor() method
cursor = conn.cursor()
sql = "SELECT 1 FROM pg_catalog.pg_database WHERE datname = " + "'" + DATABASE_ + "'"
cursor.execute(sql)
# If dB does not exist create the dB
exists = cursor.fetchone()
print(exists)
if not exists:
print('does not exist')
#Preparing query to create a database
sql = '''CREATE database '''+DATABASE_;
#Creating a database
cursor.execute(sql)
# Creating the table
sql = '''CREATE TABLE IF NOT EXISTS ''' + TABLE_ + ''' (
id SERIAL PRIMARY KEY,
original_price_date DATE NOT NULL,
item TEXT NOT NULL,
price NUMERIC NULL DEFAULT NULL,
created_date TIMESTAMPTZ NULL DEFAULT TRANSACTION_TIMESTAMP(),
last_updated TIMESTAMPTZ NULL DEFAULT TRANSACTION_TIMESTAMP());'''
cursor.execute(sql)
# update the table with data
df = pd.DataFrame(np.array([['5/3/2010', 'rock', 0.9],
['4/15/2010', 'paper', 6.5],
['2/3/2015', 'scissor', 3.9],
['3/23/2017', 'ball', 1.1],
['4/7/2013', 'tire', 5.4]]),
columns = ['original_price_date', 'item', 'price'])
tuples_for_dB = [tuple(x) for x in df.to_numpy()]
sql_script = '''INSERT INTO ''' + TABLE_ + ''' (
original_price_date, item, price, created_date, last_updated)
VALUES (%s, %s, %s, transaction_timestamp(), transaction_timestamp());'''
try:
cursor.executemany(sql_script, tuples_for_dB);
success = True
except psycopg2.Error as e:
error = e.pgcode
print(f'Error : {e.args[0]}')
success = False
if success:
print(f'\nData inserted successfully........')
print(f'Table INSERT sql commit comment :\n"{sql_script}"\n')
elif success == False:
print(f'\nData NOT inserted successfully XXXXXX')
# Preparing query to drop a table
sql = '''DROP TABLE IF EXISTS ''' + TABLE_ + ";"
# Creating the table
cursor.execute(sql)
conn.close()
I added a constraint row (CONSTRAINT com UNIQUE (original_price_date,item))) where I created the table.
sql = '''CREATE TABLE IF NOT EXISTS ''' + TABLE_ + ''' (
id SERIAL PRIMARY KEY,
original_price_date DATE NOT NULL,
item TEXT NOT NULL,
price NUMERIC NULL DEFAULT NULL,
created_date TIMESTAMPTZ NULL DEFAULT TRANSACTION_TIMESTAMP(),
last_updated TIMESTAMPTZ NULL DEFAULT TRANSACTION_TIMESTAMP(),
CONSTRAINT com UNIQUE (original_price_date,item));'''
Then I could insert the data NOT creating duplicate rows of (original_price_date,item) by the following statement.
sql = '''INSERT INTO ''' + TABLE_ + '''(original_price_date, item, price)
VALUES (%s, %s, %s)
ON CONFLICT (original_price_date, item)
DO UPDATE
SET (price, last_updated) = (EXCLUDED.price,TRANSACTION_TIMESTAMP());'''
Im receiving an error where I am using an incorrect integer value for userID_fk and target. The error comes up for values which have an integer as their data type and if its changed to text or varchar it will state a site has been created and the siteID will increase but no other data will be included. I want the user to input their username so its matched with its userID and inserted into userID_fk through python with Tkinter.
Below is the structure for my users and sites table
users:
CREATE TABLE `users` (
`userID` int(255) NOT NULL AUTO_INCREMENT,
`userName` varchar(255) CHARACTER SET latin1 COLLATE latin1_general_cs NOT NULL,
`userPassword` varchar(225) CHARACTER SET latin1 COLLATE latin1_general_cs NOT NULL,
`Name` varchar(255) NOT NULL,
`phoneNum` text NOT NULL,
`email` varchar(230) NOT NULL,
`region` text NOT NULL,
`accessLevel` int(10) NOT NULL,
PRIMARY KEY (`userID`)
) ENGINE=InnoDB AUTO_INCREMENT=10002 DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
sites:
CREATE TABLE `sites` (
`siteID` int(225) NOT NULL AUTO_INCREMENT,
`siteName` text CHARACTER SET latin1 COLLATE latin1_general_cs NOT NULL,
`userID_fk` int(255) NOT NULL,
`region` text NOT NULL,
`risklevel` text NOT NULL,
`siteType` text NOT NULL,
`target` int(225) NOT NULL,
PRIMARY KEY (`siteID`),
KEY `userID_fk` (`userID_fk`),
CONSTRAINT `sites_ibfk_1` FOREIGN KEY (`userID_fk`) REFERENCES `users` (`userID`) ON UPDATE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8 ROW_FORMAT=COMPACT
Python code to insert a site into the sites table:
def register_site():
sitename_info = sitename2.get()
username2_info = username2.get()
region_info = region.get()
risklevel_info = risklevel.get()
sitetype_info = sitetype.get()
targetpercent_info = targetpercent.get()
# Sql code for writing the data that was written in the regsitering page.
cursor = cnn.cursor()
sitequery = "INSERT INTO `sites`(`siteID`, `siteName`, `userID_fk`, `region`, `risklevel`, `siteType`, `target`) VALUES (NULL,%s,%s,%s,%s,%s,%s)"
sitequery_vals = (sitename_info, username2_info, region_info, risklevel_info, sitetype_info, targetpercent_info)
cursor.execute(sitequery, sitequery_vals)
cnn.commit()
cursor.close()
cnn.close()
# removes the values in the entrys once the user selects that the registration was successful
sitename2_entry.delete(0, END)
region_entry.delete(0, END)
risklevel_entry.delete(0, END)
sitetype_entry.delete(0, END)
targetpercent_entry.delete(0, END)
Label(screen10, text = "Site Created", fg = "green", font = ("calibri", 11)).pack()
If username2_info is the userName, you need to get the userID from the users table:
sitequery = ("INSERT INTO `sites` (`siteName`, `userID_fk`, `region`, `risklevel`, `siteType`, `target`) "
"SELECT %s, `userID`, %s, %s, %s, %s FROM `users` WHERE `userName` = %s")
sitequery_vals = (sitename_info, region_info, risklevel_info, sitetype_info, targetpercent_info, username2_info)
cursor.execute(sitequery, sitequery_vals)
cnn.commit()
I am trying to extract my code from pydev using eclipse to postgresql. For the games table, the FK columns (TeamID and RefereeID) are showing as null. What is the solution for them to actually show the data from other tables?
Also, how can I change the serial to number between 90000 and 90500 with increments of one? Many thanks!
#!/usr/bin/python
# -*- coding: utf-8 -*-
import psycopg2
import sys
import csv
from itertools import count, cycle
from _tkinter import create
from setuptools.dist import sequence
from email.policy import default
path = r'C:\Users\sammy\Downloads\E0.csv'
with open(path, "r") as csvfile:
readCSV = csv.reader(csvfile, delimiter=",")
firstline = 1
con = None
con = psycopg2.connect("host='localhost' dbname='football' user='postgres' password='XXX'")
cur = con.cursor()
cur.execute("DROP TABLE games")
cur.execute("DROP TABLE teams")
cur.execute("DROP TABLE referees")
cur.execute("CREATE TABLE teams (HomeTeamID SERIAL PRIMARY KEY, HomeTeam VARCHAR, AwayTeamID VARCHAR, AwayTeam VARCHAR)")
cur.execute("CREATE TABLE referees (RefereeID SERIAL PRIMARY KEY, RefereeName VARCHAR, AwayTeamID VARCHAR)")
cur.execute("CREATE TABLE games (GAMEID SERIAL PRIMARY KEY, HomeTeamID INTEGER, FOREIGN KEY (HomeTeamID) REFERENCES teams(HomeTeamID), HomeTeam VARCHAR, AwayTeamID VARCHAR, AwayTeam VARCHAR, FTHG INTEGER, ATHG INTEGER, FTR VARCHAR, RefereeID INTEGER, FOREIGN KEY (RefereeID) REFERENCES referees(RefereeID), RefereeName VARCHAR, HY INTEGER, AY INTEGER)")
try:
for row in readCSV:
if firstline:
firstline=0
continue
HomeTeamID = row[2]
HomeTeam = row[2]
AwayTeamID = row[3]
AwayTeam = row[3]
FTHG = row[4]
ATHG = row[5]
FTR = row[6]
RefereeID = row[10]
RefereeName = row[10]
HY = row[19]
AY = row[20]
data1 = (HomeTeam, AwayTeamID, AwayTeam)
data2 = (RefereeName, AwayTeamID)
data3 = (HomeTeam, AwayTeamID, AwayTeam, FTHG, ATHG, FTR, RefereeName, HY, AY)
query1 = "INSERT INTO teams (HomeTeam, AwayTeamID, AwayTeam) VALUES (%s, %s, %s);"
query2 = "INSERT INTO Referees (RefereeName, AwayTeamID) VALUES (%s, %s);"
query3 = "INSERT INTO games (HomeTeam, AwayTeamID, AwayTeam, FTHG, ATHG, FTR, RefereeName, HY, AY) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);"
print(HomeTeam)
print(AwayTeam)
print (FTHG)
print (ATHG)
print (FTR)
print (RefereeID)
print (RefereeName)
print (HY)
print (AY)
cursor = con.cursor()
cursor.execute(query1, data1)
cursor.execute(query2, data2)
cursor.execute(query3, data3)
except psycopg2.DatabaseError as e:
if con:
con.rollback()
print ("Error %s % e", e)
sys.exit(1)
finally:
if con:
con.commit()
con.close()
print(" ".join(row))
out=open("new_data.csv", "w")
output = csv.writer(out)
for row in data1:
output.writerow(row)
out.close()
Im new to python and trying to save raw post data in python into mysql.
I want to iterate over each element in the json that is posted and save all the data to DB.
json list of objects: (30 objects with each 11 columns)
[
{
"col1":7878,
"col2":"c004979d3969a86a8fdcda2f92eb39e3",
"col3":"b000yht23",
...
"col11":2
},
{
"col1":7878,
"col2":"c004979d3969a86a8fdcda2f92eb39e3",
"col3":"b000yht23"
...
"col11":43
},
#upto 30 objects
....
]
'json_test' table desc:
CREATE TABLE json_test (
`col1` varchar(250) NOT NULL,
`col2` varchar(250) NOT NULL,
`col3` varchar(250) NOT NULL,
`col4` varchar(250) NOT NULL,
`col5` varchar(250) NOT NULL,
`col6` varchar(250) NOT NULL,
`col7` varchar(250) NOT NULL,
`col8` varchar(250) NOT NULL,
`col9` varchar(250) NOT NULL,
`col10` varchar(250) NOT NULL,
`col11` varchar(200) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
UPDATED to save data to DB:
My py code looks like:
from flask import Flask, abort, request
import json
import pymysql
app = Flask(__name__)
#app.route('/foo', methods=['GET','POST'])
def foo():
jsonobject=request.json
if not jsonobject:
abort(400)
# load- converts JSON source text to a Python value
#readable_json=json.dumps(jsonobject)
#UPDATED with column_names
k=0
for i in jsonobject:
# Connect to the database
conn = pymysql.connect(host='10.20.3.4', port=3306, user='root', passwd='', db='python_db')
try:
with conn.cursor() as cursor:
column_names = ['col1','col2','col3',...'col11']
column_names_str = ', '.join(column_names)
binds_str = ', '.join('%s' for _ in range(len(column_names)))
sql=("INSERT INTO `json_test` ({column_names})" \
" VALUES({binds})"
.format(column_names=column_names_str,binds=binds_str))
for data_dict in jsonobject:
values = [data_dict[column_name]
for column_name in column_names]
cursor.execute(sql, values)
print("Insert successfull!")
#UPDATED
k+=1
conn.commit()
finally:
conn.close()
return "Insert successful"
#return json.dumps(jsonobject)
if __name__ == '__main__':
app.run(host='10.22.1.168',debug=True,port=7845)
UPDATED code result:
Only the last record seems to be inserting
Replace this mess
#UPDATED with column_names
k=0
for i in jsonobject:
# Connect to the database
conn = pymysql.connect(host='10.20.3.4', port=3306, user='root', passwd='', db='python_db')
try:
with conn.cursor() as cursor:
column_names = ['col1','col2','col3',...'col11']
column_names_str = ', '.join(column_names)
binds_str = ', '.join('%s' for _ in range(len(column_names)))
sql=("INSERT INTO `json_test` ({column_names})" \
" VALUES({binds})"
.format(column_names=column_names_str,binds=binds_str))
for data_dict in jsonobject:
values = [data_dict[column_name]
for column_name in column_names]
cursor.execute(sql, values)
print("Insert successfull!")
#UPDATED
k+=1
conn.commit()
finally:
conn.close()
return "Insert successful"
with
try:
with conn.cursor() as cursor:
columns_names = ['col1', 'col2', 'col3', 'col4', 'col5', 'col6',
'col7', 'col8', 'col9', 'col10', 'col11']
columns_names_str = ', '.join(columns_names)
binds_str = ', '.join('%s' for _ in range(len(columns_names)))
for data_dict in jsonobject:
sql = ("INSERT INTO json_test ({columns_names}) "
"VALUES ({binds})"
.format(columns_names=columns_names_str,
binds=binds_str))
values = [data_dict[column_name]
for column_name in columns_names]
cursor.execute(sql, values)
print("Insert successfull!")
conn.commit()
finally:
conn.close()
Summation
k object is redundant,
also name i is unclear and makes me think like it is some kind of index when it is not: it is a dict object,
we don't need to create connection for each object from jsonobject because it is an expensive operation,
we don't need to create sql object on each iteration as well (it remains unchanged),
storing columns names in list/tuple will save us from writing them twice: in a query and in values extraction.
creating binds str
%s, %s, ...
dynamically based on number of columns saves us from typo when we've missed/added too many bind aliases
json.dumps does the opposite of what you claim; it converts a Python object into a string.
The result of request.json is already a Python datastructure. You don't need to do anything else with it.