Python KeyError: 'destinationAccount' - python

i've code with the following structure from a website i'm scraping data:
destinationAccount:
ownerBuilding: ( collapse to destinationAccount)
label: ( collapse to ownerBuilding )
_id: ( collapse to ownerBuilding )
vban: ( collapse to destinationAccount)
_id: ( collapse to destinationAccount)
When I try to read this Key with this
vban = str(transaction["destinationAccount"]["vban"])
It gives me KeyError: 'destinationAccount'
Anyone have an Idea why this comes up? When I run my Code, it will copy everything I need into the MySQL Database but as I already said, the KeyError popup and the Interval isn't working
sched = BlockingScheduler()
sched.add_job(start, 'interval', seconds=5)
sched.start()
because it stop runing after the error appears. When I comment out this one vban = str(transaction["destinationAccount"]["vban"]) no error is coming up. I checked now more than 10 times, the structure is there on the website as I showed at the top. Any solution would be amazing.
def getData():
databaseConn = dbConnect()
cursor = databaseConn.cursor()
for x in range(3):
x = x * 25
transactions = json.loads(makeRequest("URL.bla/transactions?offset=" + str(x), authToken, True).text)
for transaction in transactions:
person = ""
try:
person = transaction["destinationAccount"]["ownerCharacter"]["name"]
except:
try:
person = transaction["destinationAccount"]["ownerFactory"]["label"]
except:
try:
person = transaction["destinationAccount"]["ownerBuilding"]["label"]
except:
person = str("unbekannt")
reference = ""
try:
reference = str(translateTable[transaction["reference"]])
except:
reference = str(transaction["reference"])
vban = str(transaction["destinationAccount"]["vban"])
amount = str(transaction["amount"])
taxAmount =str(transaction["taxAmount"])
gesamt = (float(amount) + float(taxAmount))
created = parse(str(transaction["created"]))
date = str(created.date())
time = str(created.time()).split(".")[0]
sql = "INSERT INTO finanzen (transaktion, date, time, sendto, vban, amount, tax, gesamt, text) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
val = (str(transaction["uuid"]), date, time, str(person), vban, amount, taxAmount, gesamt, reference)
try:
cursor.execute(sql, val)
databaseConn.commit()
except:
print("Fehler Datenbank")
dbClose(databaseConn,cursor)
Print result :
{'_id': 'CENSORED',
'uuid': 'CENSORED',
'amount': 11.8421,
'taxAmount': 3.1479,
'type': 'digital',
'created': 'Date',
'reference': 'CENSORED',
'sourceAccount': {'_id': 'CENSORED',
'ownerCharacter': {'_id': 'CENSORED',
'name': 'NAME'},
'vban': 'NUMBER'},
'destinationAccount': {'_id': 'CENSORED',
'vban': 'NUMBER',
'ownerBuilding': {'_id': 'CENSORED',
'label': 'Eclipse Towers'}}}

Difficult without seeing the full list but I suspect some of the items are missing the key. Have you tried a check on the key existing. Using your example:
transaction = {
"_id":"CENSORED",
"uuid":"CENSORED",
"amount":11.8421,
"taxAmount":3.1479,
"type":"digital",
"created":"Date",
"reference":"CENSORED",
"sourceAccount":{
"_id":"CENSORED",
"ownerCharacter":{
"_id":"CENSORED",
"name":"NAME"
},
"vban":"NUMBER"
},
"destinationAccount":{
"_id":"CENSORED",
"ownerBuilding":{
"_id":"CENSORED",
"label":"Eclipse Towers"
}
}
}
if 'vban' in transaction['destinationAccount']:
vban = str(transaction["destinationAccount"]["vban"])
else:
vban = "none"

Thanks to #Johnny John Boy for the Hint.
vban = ""
try:
vban = str(transaction["destinationAccount"]["vban"])
except:
try:
vban = str(transaction["sourceAccount"]["vban"])
except:
vban = str("Unbekannt")
This is the solution to fix the KeyError because there was a second part. now it works as it should without any error.

Related

INSERT Error Python MySQL Connector: Failed executing the operation

I am scraping a shopify store using the products.json page. Attempting to insert the scraped products into my MySQL DB using the Python connector, but hitting below error:
Something went wrong: Failed executing the operation; b'Name'
Code is below:
import json
import pandas as pd
import mysql.connector
import ScraperConfig as conf
class myScraper():
def __init__(self, baseurl):
self.baseurl = baseurl
def downloadjson(self, page):
r = requests.get(self.baseurl + f'products.json?limit=250&page={page}', timeout=5)
if r.status_code != 200:
print('Bad status code', r.status_code)
if len(r.json()['products']) > 0:
data = r.json()['products']
return data
else:
return
def parsejson(self, jsondata):
products = []
for prod in jsondata:
vendor = prod['vendor']
name = prod['title']
handle = prod['handle']
createdDateTime = prod['created_at']
description = prod['body_html']
productType = prod['product_type']
for images in prod['images']:
vendorProductId = images['product_id']
try:
imageURL = images['src']
except:
imageURL = 'None'
for variant in prod['variants']:
item = {
'name': name,
'handle': handle,
'description': description,
'productVariantId': variant['id'],
'createdDateTime': createdDateTime,
'productType': productType,
'vendorProductId': vendorProductId,
'imageURL': imageURL,
'price': variant['price'],
'salePrice': variant['compare_at_price'],
'available': variant['available'],
'updatedDateTime': variant['updated_at'],
'vendor': vendor
}
products.append(item)
return products
def main():
scrape = Scraper('https://www.someshopifysite.com/')
results = []
for page in range(1,2):
data = scrape.downloadjson(page)
print('Getting page: ', page)
try:
results.append(scrape.parsejson(data))
except:
print(f'Completed, total pages = {page - 1}')
break
return results
if __name__ == '__main__':
db = mysql.connector.connect(
user=conf.user,
host=conf.host,
passwd=conf.passwd,
database=conf.database)
cursor = db.cursor()
products = main()
totals = [item for i in products for item in i]
for p in totals:
sql = """INSERT INTO `table` (`Name`, `Handle`, `Descritpion`, `VariantId`, `CreatedDateTime`, `ProductType`, `VendorProductId`, `ImageURL`, `Price`, `SalePrice`, `Available`, `UpdatedDateTime`, `Vendor`)
VALUES (%(`Name`)s, %(`Handle`)s, %(`Descritpion`)s, %(`VariantId`)s, %(`CreatedDateTime`)s, %(`ProductType`)s, %(`VendorProductId`)s, %(`ImageURL`)s, %(`Price`)s, %(`SalePrice`)s, %(`Available`)s, %(`UpdatedDateTime`)s, %(`Vendor`)s)"""
try:
cursor.executemany(sql, totals)
print('Committed to DB')
except mysql.connector.Error as err:
print("Something went wrong: {}".format(err))
db.commit() ```
Remove backticks from the following and all similar parts of the query:
%(`Name`)s
In general I'd remove backticks except for quoting column names that map to keywords.

How to achieve rollback in pymongo?

I'm quite new to MongoDB so this might be a rookie question.
I've written a script to insert a few records in my database but I want it to be transactional. So that even if an exception arises, all the changes made before this is rollbacked.
try:
# CONNECTIONS
db_sql = pymysql.connect(host, user, password, name)
mongo_client = pymongo.MongoClient(MONGO_URL, retryWrites=True)
mongo_db = mongo_client[MONGO_DATABASE_NAME]
mongo_collection = mongo_db[MONGO_COLLECTION]
input_list = get_input(db_sql)
doc_list = get_docs(mongo_collection)
added_by = "SYSTEM"
for doc in doc_list:
for input in input_list:
bot_ref = input['bot_ref']
new_doc = {
'customer_id': doc['customer_id'],
'channel': doc['channel'],
'locale': doc['locale'],
'available_for': bot_ref,
'display_name': doc['display_name'],
'added_by': added_by,
'reply_text': doc['reply_text'],
'created_on': doc['created_on'],
'created_by': doc['created_by'],
'updated_on': doc['updated_on'],
'updated_by': doc['updated_by']
}
mongo_collection.insert_one(new_doc)
except Exception as e:
print(traceback.format_exc())
raise
finally:
db_sql.close()
get_input() and get_docs() are functions I've written to fetch data from respective DBs.
In mongoDB version >=4.0(For replicaSets) and >=4.2(For Sharded cluster) multi document transactions are supported , so you can do it following way:
with client.start_session() as s:
def cb(s):
collection_one.insert_one(doc_one, session=s)
collection_two.insert_one(doc_two, session=s)
s.with_transaction(cb)
If something happen and operation is not successful during the transaction commit phase , all the changes will be rolled back automatically ...

Add name to create ec2 snapshot python script

I am a novice at Python but am learning as I go. I found this script and it works well but I wanted to make some edits to it so that it also saves the name of the instance that it created a snapshot for.
import boto3
import collections
import datetime
#allows Python developers to write software that makes use of Amazon services like S3 and EC2
ec = boto3.client('ec2')
#finds tags-keys with the name "backup" or "Backup"
def lambda_handler(event, context):
reservations = ec.describe_instances(
Filters=[
{'Name': 'tag-key', 'Values': ['backup', 'Backup']},
]
).get(
'Reservations', []
)
instances = [
i for r in reservations
for i in r['Instances']
]
print "Found %d instances that need backing up" % len(instances)
to_tag = collections.defaultdict(list)
#find tag-keys with the name Retention default value if NULL is 7 days
for instance in instances:
try:
retention_days = [
int(t.get('Value')) for t in instance['Tags']
if t['Key'] == 'Retention'][0]
except IndexError:
retention_days = 7
for dev in instance['BlockDeviceMappings']:
if dev.get('Ebs', None) is None:
continue
vol_id = dev['Ebs']['VolumeId']
print "Found EBS volume %s on instance %s" % (
vol_id, instance['InstanceId'])
snap = ec.create_snapshot(
VolumeId=vol_id,
)
to_tag[retention_days].append(snap['SnapshotId'])
print "Retaining snapshot %s of volume %s from instance %s for %d days" % (
snap['SnapshotId'],
vol_id,
instance['InstanceId'],
retention_days,
)
#set retention days according to the value int input
for retention_days in to_tag.keys():
delete_date = datetime.date.today() + datetime.timedelta(days=retention_days)
delete_fmt = delete_date.strftime('%Y-%m-%d')
print "Will delete %d snapshots on %s" % (len(to_tag[retention_days]), delete_fmt)
ec.create_tags(
Resources=to_tag[retention_days],
Tags=[
{'Key': 'DeleteOn', 'Value': delete_fmt},
]
)
So far I have this but am a little lost as to how to make it work in with the current script above:
snapshot = ec(to_tag['SnapshotId'])
volumename = ''
# Add volume name to snapshot for easier identification
snapshot.create_tags(Tags=[{'Key': 'Name', 'Value': volumename}])
Any ideas welcomed! Thanks.
import boto3
import collections
import datetime
#allows Python developers to write software that makes use of Amazon services like S3 and EC2
ec = boto3.client('ec2')
sns_client = boto3.client('sns')
#finds tags-keys with the name "backup" or "Backup"
def lambda_handler(event, context):
reservations = ec.describe_instances(
Filters=[
{'Name': 'tag-key', 'Values': ['backup', 'Backup']},
]
).get(
'Reservations', []
)
instances = [
i for r in reservations
for i in r['Instances']
]
print "Found %d instances that need backing up" % len(instances)
to_tag = collections.defaultdict(list)
#find tag-keys with the name Retention default value if NULL is 7 days
for instance in instances:
try:
retention_days = [
int(t.get('Value')) for t in instance['Tags']
if t['Key'] == 'Retention'][0]
except IndexError:
retention_days = 7
for dev in instance['BlockDeviceMappings']:
if dev.get('Ebs', None) is None:
continue
vol_id = dev['Ebs']['VolumeId']
print "Found EBS volume %s on instance %s" % (
vol_id, instance['InstanceId'])
volumes = ec2_client.describe_volumes()
volumes = volumes["Volumes"]
volumes_list = []
for volume in volumes:
volumes_list.append([volume["Tags"][0]["Value"], volume["VolumeId"]])
for volume in volumes_list:
try:
create_snapshot_response = ec2_client.create_snapshot(
VolumeId=volume[1],
Description=volume[0] + " " + str(datetime.now()).split(" ")[0],
)
snapshot_id = create_snapshot_response["SnapshotId"]
tags = ec2_client.create_tags(
Resources=[snapshot_id],
Tags=[{
"Key": "Name",
"Value": "{}: {}".format(volume[0], str(datetime.now()).split(" ")[0])
}]
)
to_tag[retention_days].append(snap['SnapshotId'])
print "Retaining snapshot %s of volume %s from instance %s for %d days" % (
snap['SnapshotId'],
vol_id,
instance['InstanceId'],
retention_days,
)
#set retention days according to the value int input
for retention_days in to_tag.keys():
delete_date = datetime.date.today() + datetime.timedelta(days=retention_days)
delete_fmt = delete_date.strftime('%Y-%m-%d')
print "Will delete %d snapshots on %s" % (len(to_tag[retention_days]), delete_fmt)
ec.create_tags(
Resources=to_tag[retention_days],
Tags=[
{'Key': 'DeleteOn', 'Value': delete_fmt},
]
)
import boto3
ec2_client = boto3.client('ec2')
def lambda_handler(event, context):
instances = ec2_client.describe_instances()['Reservations']
for i in instances:
try:
create_snapshot_response = ec2_client.create_snapshot(
VolumeId=i['Instances'][0]['BlockDeviceMappings'][0]["Ebs"]["VolumeId"]
)
snapshot_id = create_snapshot_response["SnapshotId"]
tags = ec2_client.create_tags(
Resources=[snapshot_id],
Tags=[{
"Key": "Name",
"Value": "{}".format(i['Instances'][0]["Tags"][0]['Value'])
}]
)
except Exception as e:
print(e)
return "Success"
Krishna did what I have in my lambda for providing instance names on my snapshots....one change i had was
instance_name = ""
if 'Tags' in instance:
for tag in instance['Tags']:
if tag['Key'] == 'Name':
instance_name = tag['Value']
if not instance_name:
instance_name = instance['InstanceId']
snap = ec.create_snapshot(
VolumeId=vol_id,
TagSpecifications=[{
'ResourceType': 'snapshot',
'Tags': [{
'Key': 'Name',
'Value': instance_name
}]
}]
)
Update lines 43-45 to following:
instance_name = ""
if 'Tags' in instance:
for tag in instance['Tags']:
if tag['Key'] == 'Name':
instance_name = tag['Value']
if not instance_name:
instance_name = instance['InstanceId']
snap = ec.create_snapshot(
VolumeId=vol_id,
TagSpecifications=[{
'ResourceType': 'snapshot',
'Tags': [{
'Key': 'Name',
'Value': instance['InstanceId']
}]
}]
)

Flask Python: Unable to get parameters using request.args

I want to request some parameters from a external web app. I create an API with flask and query data from MySQL. I able to query the data if I gave a fix input but not when using request.args. I try both request.args['name'] and request.args.get('name') but return the output of Exception path.
Below is my current code. I comment out the fix input I used.
from flask import Flask,jsonify,abort,make_response,request,render_template
import MySQLdb
import MySQLdb.cursors
#app.route('/KLSE/search', methods=['GET'])
def KLSEsearch():
db = MySQLdb.connect(host='vinus.mysql.pythonanywhere-services.com',user='vinus',passwd='Vindex2016',db='vinus$default',cursorclass=MySQLdb.cursors.DictCursor)
curs = db.cursor()
#name ='P'
#macd = 'H'
#volumeMin = '_'
#volumeMax = '_'
#stoch ='H1'
#bollinger ='H'
#rsi ='H1'
#atr ='LV'
#trade = 'HOLD'
#limit = 3
#offSet = 1
name = request.args.get('name')
volumeMin = request.args['volumeMin']
volumeMax = request.args['volumeMax']
macd = request.args['macd']
stoch = request.args['stoch']
bollinger = request.args['bollinger']
rsi = request.args['rsi']
atr = request.args['atr']
trade = request.args['trade']
limit = request.args['limit']
offSet = request.args['offSet']
query0 = "SELECT * FROM KLSE WHERE Stock LIKE '%s' AND"
#query1 = "(Vindex BETWEEN (IF(%s='_',-5000,%s)) AND (IF(%s='_',5000,%s))) AND "
query2 = "(Volume_changes_pc BETWEEN (IF (%s='_',-5000,%s)) AND (IF(%s='_',5000,%s))) AND "
query3 = "MACD LIKE %s AND "
query4 = "STOCH LIKE %s AND "
query5 = "BOLLINGER LIKE %s AND "
query6 = "RSI LIKE %s AND "
query7 = "ATR LIKE %s AND "
query8 = "TRADE LIKE %s LIMIT %s OFFSET %s"
query = query0+query2+query3+query4+query5+query6+query7+query8
input = name+"%",volumeMin,volumeMin,volumeMax,volumeMax,macd,stoch,bollinger,rsi,atr,trade,limit,offSet
try:
curs.execute(query,(input))
g = curs.fetchall()
except Exception:
return 'Error: unable to fetch items'
#return "hihi"
return jsonify({'Stock': g})
The output with fix value as below. I think it shows the query to MySQL is correct.
http://vinus.pythonanywhere.com/KLSE/search1
For the user input value, which use the args,
http://vinus.pythonanywhere.com/KLSE/search?atr=%&bollinger=%&macd=%&name=M&rsi=%&stoch=%&volumeMax=&volumeMin=&trade=HOLD&limit=5&offSet=1
What is the right way, get the parameters? volumeMin,volumeMax,limit and offSet are in float and integers.
You have to serialize your data first.
def serialize():
return {
"id" : g.id,
"volumeMin" : g.name,
"volumeMax" : g.address,
"macd" : g.city,
"stoch" : g.state,
"zipCode" : g.zipCode,
"bollinger" : g.bollinger,
}
#app.route("/KLSE/search/.json")
def stock_json():
query = your.db.query()
return jsonify(Stock=[i.serialize for i in query])

Insert into Odoo db with a specific id using cursor.commit and psycopg2

I'm trying to migrate some models from OpenERP 7 to Odoo 8 by code. I want to insert objects into new table maintaining the original id number, but it doesn't do it.
I want to insert the new object including its id number.
My code:
import openerp
from openerp import api, modules
from openerp.cli import Command
import psycopg2
class ImportCategory(Command):
"""Import categories from source DB"""
def process_item(self, model, data):
if not data:
return
# Model structure
model.create({
'id': data['id'],
'parent_id': None,
'type': data['type'],
'name': data['name']
})
def run(self, cmdargs):
# Connection to the source database
src_db = psycopg2.connect(
host="127.0.0.1", port="5432",
database="db_name", user="db_user", password="db_password")
src_cr = src_db.cursor()
try:
# Query to retrieve source model data
src_cr.execute("""
SELECT c.id, c.parent_id, c.name, c.type
FROM product_category c
ORDER BY c.id;
""")
except psycopg2.Error as e:
print e.pgerror
openerp.tools.config.parse_config(cmdargs)
dbname = openerp.tools.config['db_name']
r = modules.registry.RegistryManager.get(dbname)
cr = r.cursor()
with api.Environment.manage():
env = api.Environment(cr, 1, {})
# Define target model
product_category = env['product.category']
id_ptr = None
c_data = {}
while True:
r = src_cr.fetchone()
if not r:
self.process_item(product_category, c_data)
break
if id_ptr != r[0]:
self.process_item(product_category, c_data)
id_ptr = r[0]
c_data = {
'id': r[0],
'parent_id': r[1],
'name': r[2],
'type': r[3]
}
cr.commit()
How do I do that?
The only way I could find was to use reference attributes in others objects to relate them in the new database. I mean create relations over location code, client code, order number... and when they are created in the target database, look for them and use the new ID.
def run(self, cmdargs):
# Connection to the source database
src_db = psycopg2.connect(
host="localhost", port="5433",
database="bitnami_openerp", user="bn_openerp", password="bffbcc4a")
src_cr = src_db.cursor()
try:
# Query to retrieve source model data
src_cr.execute("""
SELECT fy.id, fy.company_id, fy.create_date, fy.name,
p.id, p.code, p.company_id, p.create_date, p.date_start, p.date_stop, p.special, p.state,
c.id, c.name
FROM res_company c, account_fiscalyear fy, account_period p
WHERE p.fiscalyear_id = fy.id AND c.id = fy.company_id AND p.company_id = fy.company_id
ORDER BY fy.id;
""")
except psycopg2.Error as e:
print e.pgerror
openerp.tools.config.parse_config(cmdargs)
dbname = openerp.tools.config['db_name']
r = modules.registry.RegistryManager.get(dbname)
cr = r.cursor()
with api.Environment.manage():
env = api.Environment(cr, 1, {})
# Define target model
account_fiscalyear = env['account.fiscalyear']
id_fy_ptr = None
fy_data = {}
res_company = env['res.company']
r = src_cr.fetchone()
if not r:
self.process_fiscalyear(account_fiscalyear, fy_data)
break
company = res_company.search([('name','like',r[13])])
print "Company id: {} | Company name: {}".format(company.id,company.name)
The previous code is only an extract from the whole source code.

Categories