This function always gives me the following errors when run:
CRITICAL >> Not all parameters were used in the SQL statement
CRITICAL >> Exception for errors programming errors
I've spent hours looking at the code but cannot find the problem - What is wrong?
#************ TestCase Table Insertion *********************
def insertUpdateTestCase(prev_max_weeknumber):
log.Log('insertUpdateTestCase START', 'info')
insertUpdateTestCase_start_time = datetime.now()
testcases = """INSERT INTO prtm_testcase (testplan_identifier, testcase_name, testcase_identifier, testcase_longidentifier, testcase_uri, globalconfiguration_identifier, weeknumber, localconfiguration_identifier)
VALUES
(%s,%s,%s,%s,%s,%s,%s,%s)
ON CONFLICT (testplan_identifier, testcase_identifier, globalconfiguration_identifier, localconfiguration_identifier, weeknumber)
DO
UPDATE SET testcase_name = EXCLUDED.testcase_name,
testcase_longidentifier=EXCLUDED.testcase_longidentifier,
testcase_uri=EXCLUDED.testcase_uri,
weeknumber=EXCLUDED.weeknumber """
# Define some variables for executing Select Query based on limits
offset = 0
per_query = 10000
while True:
#execute query based on limits using projects
cursor_remets.execute("select tsdata.ts_moduleid, coalesce(tsdata_extended.ts_objecttext,'Unknown') as ts_objecttext, tsdata.ts_objectidentifier, SUBSTRING_INDEX(tsdata.ts_variant, ',', -1) as after_comma_value, tsdata.weeknumber,SUBSTRING_INDEX(tsdata.ts_variant, ',', -1) as project_id,SUBSTRING_INDEX(tsdata.ts_variant, ',', 1) as gc_id from tsdata left join tsdata_extended on tsdata_extended.ts_objectidentifier = tsdata.ts_objectidentifier and tsdata.ts_moduleid = tsdata_extended.ts_moduleid and tsdata.ts_variant = tsdata_extended.ts_variant where tsdata.weeknumber=%s OFFSET %s", (prev_max_weeknumber, per_query, offset))
rows = cursor_remets.fetchall()
if len(rows) > 0:
for row in rows:
#print(row)
testcond = (row[0] and row[1] and row[2] and row[4] and row[5] and row[6])
#testcond = True
if testcond:
cursor_prtm.execute(testcases,(row[0],row[1].replace("\x00", "\uFFFD").replace('\\', '\\\\'),row[2],None,None,row[6],row[4],row[5]))
conn_prtm.commit()
DataMigration.validateInsertedRecord('insertUpdateTestCase', row)
else:
log.Log('In insertUpdateTestCase, row validation failed ' + str(row), 'info')
else:
break
#print(str(len(rows)) + ' rows written successfully to prtm_testcase')
offset += per_query
log.Log('insertUpdateTestCase completed execution in ' + str(datetime.now()-insertUpdateTestCase_start_time), 'info')
Executed the SQL statement in the source database and rows are returned with no nulls or empty fields in the data.
Compared the list of fields against the defined statement and count the same number of parameters in both
Related
I am trying to use python to use a parametrized query through a list. This is the following code:
loan_records =['604150062','604150063','604150064','604150065','604150066','604150067','604150069','604150070']
borr_query = "select distinct a.nbr_aus, cast(a.nbr_trans_aus as varchar(50)) nbr_trans_aus, c.amt_finl_item, case when a.cd_idx in (-9999, 0) then null else a.cd_idx end as cd_idx, a.rate_curr_int, case when a.rate_gr_mrtg_mrgn = 0 then null else a.rate_gr_mrtg_mrgn end as rate_gr_mrtg_mrgn, a.rate_loln_max_cap, case when a.rate_perdc_cap = 0 then null else a.rate_perdc_cap end as rate_perdc_cap from db2mant.i_lp_trans a left join db2mant.i_lp_trans_borr b on a.nbr_aus = b.nbr_aus and a.nbr_trans_aus = b.nbr_trans_aus left join db2mant.i_lp_finl_item c on a.nbr_aus = c.nbr_aus and a.nbr_trans_aus = c.nbr_trans_aus where a.nbr_trans_aus in (?) and c.cd_finl_item = 189"
ODS.execute(borr_query, loan_records)
#PML.execute(PML_SUBMN_Query, (first_evnt, last_evnt, x))
ODS_records = ODS.fetchall()
ODS_records = pd.DataFrame(ODS_records, columns=['nbr_aus', 'nbr_trans_aus', 'amt_finl_item', 'cd_idx', 'rate_curr_int', 'rate_gr_mrtg_mrgn', 'rate_loln_max_cap', 'rate_perdc_cap'])
When I try to run this code: this is the following error message:
error message
Query = search _entry.get()
Sql = *SELECT FROM customers where last_name = %s"
Data = (query,)
Result = my_cursor. Execute (sql, data)
Result = my_cursor. fetchall ()
If not result :
Result = "record not found... "
Query_label = Label(search _customer _window, text =result)
Query_label. Place (x=40,y=130)
else :
For index, x in enumerate (result) :
Num =0
Index +=2
For y in x:
Query_label = Label(search _customer_window, text=y)
Query_label.grid(row=index,column=num)
Num +=1
I set the value of index to 2 but nothing happens. Thanks for your help.
When I run the program, the label (query_lqbel) is shown at top left side of the window (row 0, column =0), how can I change the location of label. Its actually a label on which some data are shown.
My problem is that when I run project or debug, the first query run so fast in about < 1s, but when it comes to run second query, it costs more than 30s. I'm so confused about it. I have already ran it in DB editor, both of them run so fast, doesn't have any problem. First look, two queries is quite similar so I do not know why caused it.
By the way sometimes I debug, it pop up a red notice in the left debug and run tab, but I cannot get screen shot of this. It just appear once or twice .
This is screen shot of sql query
query 1: rows = db.select("SELECT recruiter_id FROM linkedin.candidates WHERE recruiter_id in (" + ",".join(recruiter_ids) + ")")
query 2: rows = db.select("select c.recruiter_id, c.updated from linkedin.candidates c where c.recruiter_id in (" + ",".join(duplicates_rid) + ")")
This is my code
if recruiter_ids:
print("Creating connection to MySQL in recruiter 12")
rows = db.select("SELECT recruiter_id FROM linkedin.candidates WHERE recruiter_id in (" + ",".join(recruiter_ids) + ")")
db_recruiter_ids = [r['recruiter_id'] for r in rows] + [get_recruiter_id(url) for url in duplicates]
print("Recruiter ids in database:", len(db_recruiter_ids), db_recruiter_ids[:5])
duplicates = [url for url in profile_urls if any(get_recruiter_id(url) == rid for rid in db_recruiter_ids)]
duplicates_rid = [ get_recruiter_id(url) for url in duplicates]
if duplicates_rid:
rows = db.select("select c.recruiter_id, c.updated from linkedin.candidates c where c.recruiter_id in (" + ", ".join(duplicates_rid) + ")")
#rows = db.select("select c.recruiter_id, c.updated from linkedin.candidates c where c.recruiter_id in {}".format(tuple(duplicates_rid)))
rows = [r['recruiter_id'] for r in rows if r['updated'] < datetime.datetime.now() - datetime.timedelta(days=90)]
old_resumes = [url for url in profile_urls if any(get_recruiter_id(url) == r for r in rows)]
profile_urls = [url for url in profile_urls if not any(get_recruiter_id(url) == rid for rid in db_recruiter_ids)]
print("Found duplicates in list:", len(duplicates), duplicates[:3])
if db_recruiter_ids:
tag_candidate_by_recruiter_id(db, db_recruiter_ids, project_id, tracker_id)
Thank you guy so much !!
I'm new in python and sqlalchemy.
I already have a delete method working if I construct the where conditions by hand.
Now, I need to read the columns and values from an enter request in yaml format and create the where conditions.
#enter data as yaml
items:
- item:
table: [MyTable,OtherTable]
filters:
field_id: 1234
#other_id: null
Here is what I try and can't go ahead:
for i in use_case_cfg['items']:
item = i.get('item')
for t in item['table']:
if item['filters']:
filters = item['filters']
where_conditions = ''
count = 0
for column, value in filters.items():
aux = str(getattr(t, column) == bindparam(value))
if count == 0:
where_conditions += aux
else:
where_conditions += ', ' + aux
count += 1
to_delete = inv[t].__table__.delete().where(text(where_conditions))
#to_delete = t.__table__.delete().where(getattr(t, column) == value)
else:
to_delete = inv[t].__table__.delete()
CoreData.session.execute(to_delete)
To me, it looks ok, but when I run, I got the error below:
sqlalchemy.exc.StatementError: (sqlalchemy.exc.InvalidRequestError) A value is required for bind parameter '9876'
[SQL: DELETE FROM MyTable WHERE "MyTable".field_id = %(1234)s]
[parameters: [{}]]
(Background on this error at: http://sqlalche.me/e/cd3x)
Can someone explain to me what is wrong or the proper way to do it?
Thanks.
There are two problems with the code.
Firstly,
str(getattr(t, column) == bindparam(value))
is binding the value as a placeholder, so you end up with
WHERE f2 = :Bob
but it should be the name that maps to the value in filters (so the column name in your case), so you end up with
WHERE f2 = :f2
Secondly, multiple WHERE conditions are being joined with a comma, but you should use AND or OR, depending on what you are trying to do.
Given a model Foo:
class Foo(Base):
__tablename__ = 'foo'
id = sa.Column(sa.Integer, primary_key=True)
f1 = sa.Column(sa.Integer)
f2 = sa.Column(sa.String)
Here's a working version of a segment of your code:
filters = {'f1': 2, 'f2': 'Bob'}
t = Foo
where_conditions = ''
count = 0
for column in filters:
aux = str(getattr(t, column) == sa.bindparam(column))
if count == 0:
where_conditions += aux
else:
where_conditions += ' AND ' + aux
count += 1
to_delete = t.__table__.delete().where(sa.text(where_conditions))
print(to_delete)
session.execute(to_delete, filters)
If you aren't obliged to construct the WHERE conditions as strings, you can do it like this:
where_conditions = [(getattr(t, column) == sa.bindparam(column))
for column in filters]
to_delete = t.__table__.delete().where(sa.and_(*where_conditions))
session.execute(to_delete, filters)
Why the script sends only 100 data and calls error?
I think it must continue after committing the first 100 rows because it stays in the loop
C:\Python36\lib\site-packages\pymysql\connections.py:756: UserWarning: Previous unbuffered result was left incomplete
warnings.warn("Previous unbuffered result was left incomplete")
def parse(d,param):
r={}
if str(type(d)) == "<class 'dict'>":
return (d)
return -1
s_con = pymysql.connect(host="xxx", user="xxxx", password="xxxx"}, port=3306,
db="xxx", cursorclass=pymysql.cursors.SSCursor)
s_cur = s_con.cursor()
s_con.commit()
s_cur.execute("select id, name from table")
while True:
num_rows = 0
st = datetime.now()
r = [dict((s_cur.description[i][0], value) \
for i, value in enumerate(row)) for row in s_cur.fetchmany(100)]
print(r)
for d in r:
j = parse(d, param)
producer = KafkaProducer(bootstrap_servers="xxxxx",
value_serializer=lambda v: json.dumps(v, default = myconverter).encode('utf-8'), )
print(producer)
print(j)
num_rows = num_rows + 1
if j != -1:
print(param["TOPIC"])
producer.send(param["TOPIC"], value=j)
s_con.commit()
time.sleep(10)
if not r:
break
finally:
s_cur.close()
s_con.close()
producer.close()
just like the message tells you, the previous unbuffered result was left incomplete
it's just a waring, you don't have to take it serious.
the root cause is you are using
cursorclass=pymysql.cursors.SSCursor
How many records do you have?
if not too many, you can use regular cursor.