Summary problem: Building an API endpoint and trying to push new key:values to the existing API. Not knowing if I am correctly adding key value pairs or not. Familiar with Ruby but first time Python user!
Context:
I currently have a method that will format given information into a Python Dictionary to be used as a JSON for my API. I have one method that pushes information to this method but another that is not functioning. Can anybody spot why?
Things I've tried:
Feature test using command line environment
Getting visibility - Printing
Environment:
MacOS 11.1, python 3.9.1, VSCode
Code:
METHOD THAT IS FORMATTING
class Database(object):
def __init__(self):
self.data = {}
def insert_entity(self, kind, entity):
kind_dict = self.data.get(kind, {})
entity_id = entity.get('id', str(uuid4()))
if not isinstance(entity_id, str):
raise Exception('Entity `id` must be a string')
entity['id'] = entity_id
kind_dict[entity_id] = entity
self.data[kind] = kind_dict
return entity
def get_entity(self, kind, entity_id):
entities = self.data.get(kind, {})
return entities.get(entity_id, None)
def get_all_entities(self, kind):
return list(self.data.get(kind, {}).values())
METHOD THAT IS WORKING:
def initialise_user_data():
first_names = ['Ron', 'Paul', 'Simon', 'David', 'Phil', 'Ada', 'Julia']
last_names = [
'Legend', 'Mac', 'Stuartson', 'Sili', 'Word', 'Nine',
'Smith'
]
for index in range(len(first_names)):
first_name = first_names[index]
last_name = last_names[index]
email = str(random.randint(0, 9999)) + "#email.com"
user_data = {
'firstName': first_name,
'lastName': last_name,
'email': email
}
database.insert_entity('User', user_data)
METHOD THAT IS NOT WORKING:
def initialise_event_data():
users = database.get_all_entities('User')
for user in users:
for _ in range(random.randint(0, 10)):
database.insert_entity(
'Event', {
'userId': user['id'],
'points': 100,
'eventName': 'levels_completed'
})
ALL METHODS ARE INVOKED AS SUCH:
database = InMemoryDatabase()
initialise_data()
def initialise_data():
initialise_user_data()
initialise_event_data()
initialise_follow_data()
Related
Im fairly new to working with flask and want to know how to insert a dictionary into a column.
I am making a shopping website and the dictionary I want to insert into the order table is shown below:
product_features = {
product_id: {'name': product.product_name,
'price': product.product_price,
'quantity': product_quantity,
'discount': product.product_discount,
'information': product.product_information,
'size': product_size,
'color': product_color,
'image': product.product_image_1,
'all_colors': product.product_color,
'all_sizes': product.product_size,
'max_quantity': product.product_quantity}
}
session['cart'] = product_features
I then store this in a session called cart.
This represents the goods that the user is ordering. Once the user makes an order I then want to add this session data into a column called orders in a table called CustomerOrder.
In a previous thread I saw the code below being used:
class Person(db.Model):
__tablename__ = 'persons'
id = db.Column(db.Integer, primary_key=True)
fancy_name = db.Column(JsonEncodedDict)
import json
from sqlalchemy.ext import mutable
db = SQLAlchemy()
class JsonEncodedDict(db.TypeDecorator):
"""Enables JSON storage by encoding and decoding on the fly."""
impl = db.Text
def process_bind_param(self, value, dialect):
if value is None:
return '{}'
else:
return json.dumps(value)
def process_result_value(self, value, dialect):
if value is None:
return {}
else:
return json.loads(value)
mutable.MutableDict.associate_with(JsonEncodedDict)
I am unsure of how this exactly works and do not want to just copy code that I do not understand. Any advice on this would be very much appreciated as I have looked on YouTube and online and their is not much information on this.
I'm trying to import 5000+ rows in Odoo 12 it's basically a mapping from a CSV developed in a custom method in a module, the problem I'm getting timeout in the request, that's happening when writing to the database, I'm using the standard ERP methods create and write.
How can I work around a solution to this? I know bulk insert is not possible to this, any other solution to this?
is a SQL command for insertion OK to use?
class file_reader(models.TransientModel):
_name = "rw.file.reader"
csv_file = fields.Binary(string='CSV File', required=True)
#api.multi
def import_csv(self):
# csv importer handler
file = base64.b64decode(self.csv_file).decode().split('\n')
reader = csv.DictReader(file)
# account.analytic.line
ignored = []
time1 = datetime.now()
self._cr.execute('select id, name from project_project where active = true')
projects = self._cr.fetchall()
self._cr.execute('select id, login from res_users')
users = self._cr.fetchall()
self._cr.execute('select id, work_email from hr_employee')
employees = self._cr.fetchall()
LOG_EVERY_N = 100
for row in reader:
project_name = row['Project - Name']
email = row['User - Email Address']
project = [item for item in projects if item[1] == project_name]
if len(project) >0:
user = [item for item in users if item[1] == email]
employee = [item for item in employees if item[1] == email]
if len(user)>0 and len(employee)>0:
task = self.env['project.task'].search([['user_id','=',user[0][0]],
['project_id','=',project[0][0] ]],limit=1)
if task:
y = row['Duration'].split(':')
i, j = y[0], y[1]
model = {
'project_id': project[0][0],
'task_id': task['id'],
'employee_id':employee[0][0],
'user_id': user[0][0],
'date': row['Date'],
'unit_amount': int(i) + (float(j) / 60), # Time Spent convertion to float
'is_timesheet': True,
'billable': True if row['Billable'] == 'Yes' else False,
'nexonia_id':row['ID']
}
time_sheet = self.env['account.analytic.line'].search([['nexonia_id','=', row['ID']]],limit=1)
if time_sheet:
model.update({'id':time_sheet.id})
self.env['account.analytic.line'].sudo().write(model)
else:
self.env['account.analytic.line'].sudo().create(model)
else:
if email not in ignored:
ignored.append(email)
else:
if project_name not in ignored:
ignored.append(project_name)
all_text = 'Nothing ignored'
if ignored is not None:
all_text = "\n".join(filter(None, ignored))
message_id = self.env['message.wizard'].create({
'message': "Import data completed",
'ignored': all_text
})
time2 = datetime.now()
logging.info('total time ------------------------------------------ %s',time2-time1)
return {
'name': 'Successfull',
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'message.wizard',
# pass the id
'res_id': message_id.id,
'target': 'new'
}
I Enhanced your code a litle bit because you are searching for each project, user and employee using loop
for each row and for 5000+ row.
Using ORM method is always good because, they handle the stored compute fields and python constrains, but this will take time too
if you don't have any complex compute you can use INSERT or UPDATE query this will speed up the importion 100 times.
#api.multi
def import_csv(self):
# when you use env[model] for more than ones extract it to variable its better
# notice how I added sudo to the name of variable
AccountAnalyticLine_sudo =self.env['account.analytic.line'].sudo()
# csv importer handler
file = base64.b64decode(self.csv_file).decode().split('\n')
reader = csv.DictReader(file)
# account.analytic.line
ignored = []
time1 = datetime.now()
# convert result to dictionary for easy access later
self._cr.execute('select id, name from project_project where active = true order by name')
projects = {p[1]: p for p in self._cr.fetchall()}
self._cr.execute('select id, login from res_users order by login')
users = {u[1]: u for u in self._cr.fetchall()}
self._cr.execute('select id, work_email from hr_employee order by work_email')
employees = {emp[1]: emp for emp in self._cr.fetchall()}
LOG_EVERY_N = 100
for row in reader:
project_name = row['Project - Name']
email = row['User - Email Address']
# no need for loop and the dicionary loopkup is so fast
project = projects.get(project_name)
if project:
user = user.get(email)
employee = employees.get(email)
if user and employee:
task = self.env['project.task'].search([('user_id','=',user[0]),
('project_id','=',project[0])],
limit=1)
if task:
y = row['Duration'].split(':')
i, j = y[0], y[1]
# by convention dictionary that are passed to create or write should be named vals or values
vals = {
'project_id': project[0],
'task_id': task['id'],
'employee_id':employee[0],
'user_id': user[0],
'date': row['Date'],
'unit_amount': int(i) + (float(j) / 60), # Time Spent convertion to float
'is_timesheet': True,
'billable': True if row['Billable'] == 'Yes' else False,
'nexonia_id':row['ID']
}
time_sheet = AccountAnalyticLine_sudo.search([('nexonia_id','=', row['ID'])],limit=1)
# I think adding logger message here will be or create and update counters to know how much record record were updated or created
if time_sheet:
# I think you want to update the existing time sheet record so do this
# record.write(vals)
time_sheet.write(vals)
# you are updating an empty RecordSet
#self.env['account.analytic.line'].sudo().write(model)
else:
# create new one
AccountAnalyticLine_sudo.create(model)
else:
if email not in ignored:
ignored.append(email)
else:
if project_name not in ignored:
ignored.append(project_name)
all_text = 'Nothing ignored'
# ignored is not None is always True because ignored is a list
if ignored:
all_text = "\n".join(filter(None, ignored))
message_id = self.env['message.wizard'].create({
'message': "Import data completed",
'ignored': all_text
})
time2 = datetime.now()
logging.info('total time ------------------------------------------ %s',time2-time1)
return {
'name': 'Successfull',
'type': 'ir.actions.act_window',
'view_mode': 'form',
'res_model': 'message.wizard',
# pass the id
'res_id': message_id.id,
'target': 'new'
}
I hope this will help you a little bit even that the question is meant for somethinng else but I'm confused Odoo usually allow request to be handled
for 60 minutes.
While you are importing records through script, code optimization is very important.Try to reduce the number of search/read calls by using dictionary to save each result or use the SQL which i don't recommend.
I need to override the create method in my model on odoo 10 :
in my module i have Three Models :
Asset With
validated = fields.Boolean("Is validated")
survey2_ids = fields.One2many('mymodule.survey2', 'asset_id', string='Survey2')
Survey2 with :
name = fields.Char()
asset_id = fields.Many2one('asset.asset', description='Asset')
survey1_id = fields.Many2one('mymodule.survey1', description="Survey1")
description = fields.Text(description="description")
Survey1 with :
name = fields.Char(description="Name")
ok = fields.Boolean("Is ok")
description = fields.Text()
The goal in here is when creating a new asset, and if validated = True: all records in mymodule.survey1 with ok==True should be copied in survey2_ids, i tried this function but it doesn't seem to be working:
#api.model
def create(self, vals):
survey1_ids = self.env['mymodule.survey1'].search([('ok', '=', True)])
if self.validated:
for rec in survey1_ids:
vals['survey2_ids'] = [(0, False, {'asset_id': self.id, 'survey2_id': rec.id,'name':rec.name,'description':})]
return super(asset_asset, self).create(vals)
Any help will be aappreciated
There are two problems in your code :
Create is kind of a "class method" (it is tied to the model, no to the record). So when you ask for the value of self.validated, this will always be false because self is not the record you're creating, it's the model. You should check vals.get('validated') instead. Or create the record before-hand and use it instead of self (in my example, res in the newly created record).
You're not really copying survey 1 into survey 2. You just have to create survey 2 using the data in survey 1.
The solution that I think is best :
#api.model
def create(self, vals):
res = super(asset_asset, self).create(vals)
if vals.get('validated'):
survey1_ids = self.env['mymodule.survey1'].search([('ok', '=', True)])
for s in survey1_ids:
v = {
'name': s.name,
'description': s.description,
'survey1_id': s.id,
'asset_id': res.id
}
self.env['mymodule.survey2'].create(v)
return res
Assuming that there are no errors in the logs, you are not getting what you intended to do. Once the code has executed, you are only getting 1 survey attached to the asset.
This is because inside the create function you wrote:
vals['survey2_ids'] = [(0, False, {'asset_id': self.id, 'survey2_id': rec.id,'name':rec.name,'description':})]
This will override the survey2_id in the vals each and every time in the for loop.
What you should do here is:
survey_2_list = []
for rec in survey1_ids:
survey_2_list.append((0, False, {'asset_id': self.id, 'survey2_id': rec.id,'name':rec.name,'description':rec.description}))
vals['survey2_ids'] = survey_2_list
Try the following:
#api.model
def create(self, vals):
survey_2_list = []
if self.validated:
survey1_ids = self.env['mymodule.survey1'].search([('ok', '=', True)])
if survey1_ids:
for rec in survey1_ids:
values = {
'asset_id': self.id,
'survey2_id': rec.id,
'name':rec.name,
'description':rec.description,
}
survey_2_list.append((0, False, values))
vals['survey2_ids'] = survey_2_list
return super(asset_asset, self).create(vals)
I have some basic data that I want to store and I'm looking for a better solution then what I've come up with.
I have multiple entries of data with 4 fields per entry, name, url, currYear, availYears
I can solve this with a simple array of arrays like so:
data = [
['test-name', ['http://example.com', '2015', '2015,2014']]
['next-name', ['http://example.org', '1999', '1999']]
]
But this gets messy when trying to access data in each array. I end up with a for loop like this
for each in data:
name = each[0]
url = each[1][0]
currYear = each[1][1]
I'd prefer to do something similar to a dict where I can reference what I want by a key name. This isn't valid syntax, but hopefully it gets the point across.
data = {'entry1': {'name': 'test-name'}, {'url': 'http://example.com'}, {'currYear': '2015'}, {'availYears': '2015,2014'}}
Then I could pull the url data for entryX.
EDIT:
Several good responses. I decided to go with creating a class since 1) it satisfies my need 2) helps clean up the code by segregating functionality and 3) learn how packages, modules and classes work compared to Java (which I'm more familiar with).
In addition to creating the class, I also created getters and setters.
class SchoolSiteData(object):
def __init__(self, name, url, currYear, availYears):
self.name = name
self.url = url
self.currYear = currYear
self.availYears = availYears
def getName(self):
return self.name
def getURL(self):
return self.url
def getCurrYear(self):
return self.currYear
def getAvailYears(self):
return self.availYears
def setName(self, name):
self.name = name
def setURL(self, url):
self.url = url
def setCurrYear(self, currYear):
self.currYear = currYear
def setAvailYears(self, availYears):
self.availYears = availYears
A class may make this easier to use: eg:
class Entry(object):
def __init__(self, name, url, currYear, availYears):
self.name = name
self.url = url
self.currYear = currYear
self.availYears = availYears
entry1 = Entry('test-name', 'http://example.com', '2015', '2015,2014')
entry2 = Entry('next-name', 'http://example.org', '1999', '1999')
data = [entry1, entry2]
for entry in data:
print entry.name
print entry.url
print entry.currYear
print entry.availYears
print
Use the names as the keys in a dictionary:
data = {'test-name':
{'url': 'http://example.com',
'currYear': '2015',
'availYears': '2015,2014'
}
}
Access like so:
data['test-data']['url']
You seem to have needlessly complicated things with the list-in-list solution. If you keep it a little flatter, you can just unpack the rows into variables:
data = [
['test-name', 'http://example.com', '2015', '2015,2014'],
['next-name', 'http://example.org', '1999', '1999']
]
for name, url, currYear, availYears in data:
....
The most light-weight solution for what you want is probably a namedtuple.
>>> from collections import namedtuple
>>> mytuple = namedtuple("mytuple", field_names="url currYear availYears")
>>> data = [ 'test-name': mytuple('http://example.com', '2015', '2015,2014'), ...
... ]
>>> print(data['test-name'])
mytuple(url='http://example.com', currYear='2015', availYears='2015,2014')
You can access members by numerical index or by name:
>>> x = data['test-name']
>>> print(x.currYear)
2015
>>> print(x[1])
2015
data = [
{'name': 'test-name', 'url': 'http://example.com', 'currYear': '2015', 'availYears': '2015,2014'},
{'name': 'next-name', 'url': 'http://example.org', 'currYear': '1999', 'availYears': '1999'}
]
for each in data:
name = each['name']
url = each['url']
currYear = each['currYear']
I am trying to create a new dictionary out of html form data that was submitted by the user. I end up writing repetitive if statements, checking if xyz key is in the dictionary in the form data. I know this is a quite suboptimal approach though I am not quite sure how to implement this using python.
This is the form data dictionary:
form_data = {
'urls': ['www.google.com', 'www.bing.com'],
'useremail': ['my#email.com'],
'emailfield': ['1'],
'addressfield': ['1'],
'addressfield_info':['Company'],
'addressfield_instruction': ['Please only if the company is a LLC'],
'phonefield': ['1'],
'phonefield_instruction': ['please include area code']
}
and I want to create a dictionary that looks like this:
new_dic = {
'urls': ['www.google.com', 'www.bing.com'],
'useremail': ['my#email.com'],
'infofield': [
{'field': 'email'},
{'field': 'address', 'info':'Company', 'instruction': 'Please only if the company is a LLC'},
{'field':'phone', 'instruction': 'please include area code'}
]
}
Important note: The 'xyzfield' is mandatory and the 'xyzfield_info' and 'xyzfield_instruction' are both optional. Also: the user can add more fields and create for instance an 'agefield', 'agefield_info' and 'agefield_instruction'.
The problem I have is about how to efficiently check if xyzfield (email, phone, etc) is in the dictionary. If it is in there, check also if any of the optional fields are in there as well. This looks currently something like this:
if 'emailfield' in form_data:
infofield = {'field': 'email'}
if 'emailfield_info' in form_data:
infofield['info'] = form_data['emailfield_info']
if 'emailfield_instruction' in form_data:
infofield['instruction'] = form_data['emailfield_instruction']
cleaned_data['infofields'].append(infofield)
...
and I do this for every field, hence I have 4-5 of this. Additional, I will not be able to process any of the fields that the user has created himself since I don't know the name upfront.
Long story short: How can I make this more efficient and dynamic?
The standard answer to how to avoid repeated code applies here --- extract the repeated code to a function:
def extract_field(form_data, clean, fieldname, optional=('info', 'instruction')):
if fieldname+'field' in form_data:
infofield = { 'field': fieldname }
for opt in optional:
optname = '{}field_{}'.format(fieldname, opt)
if optname in form_data:
infofield[opt] = form_data[optname]
clean.append(infofield)
extract_field(form_data, cleaned_data['infofields'], 'email')
extract_field(form_data, cleaned_data['infofields'], 'address')
extract_field(form_data, cleaned_data['infofields'], 'phone')
This assumes you just want to clean whatever is actually submitted. If you are looking for specific things to be there, I suggest making a list of things to look for, and iterating over the list and checking to see if the things are there.
form_data = {
'urls': ['www.google.com', 'www.bing.com'],
'useremail': ['my#email.com'],
'emailfield': ['1'],
'addressfield': ['1'],
'addressfield_info':['Company'],
'addressfield_instruction': ['Please only if the company is a LLC'],
'phonefield': ['1'],
'phonefield_instruction': ['please include area code']
}
def make_field_dict(form_data, base):
field_dict = {}
name_field = base + "field"
name_info = base + "field_info"
name_inst = base + "field_instruction"
if name_field not in form_data:
raise KeyError, "%s not found in form_data" % name_field
if form_data[name_field] != ['1']:
raise ValueError, "%s not valid in form_data" % name_field
field_dict["field"] = base
if name_info in form_data:
lst = form_data[name_info]
if len(lst) != 1:
raise ValueError, "%s not valid in form_data" % name_info
field_dict["info"] = lst[0]
if name_inst in form_data:
lst = form_data[name_inst]
if len(lst) != 1:
raise ValueError, "%s not valid in form_data" % name_inst
field_dict["instruction"] = lst[0]
return field_dict
def parse_form_data(form_data):
cleaned_data = {}
cleaned_data["infofield"] = []
seen = set()
for key, value in form_data.items():
if "field" not in key:
cleaned_data[key] = value
else:
base, _, tail = key.partition("field")
if base in seen:
continue
cleaned_data["infofield"].append(make_field_dict(form_data, base))
seen.add(base)
return cleaned_data
new_dic = {
'urls': ['www.google.com', 'www.bing.com'],
'useremail': ['my#email.com'],
'infofield': [
{'field': 'email'},
{'field': 'address', 'info':'Company', 'instruction': 'Please only if the company is a LLC'},
{'field':'phone', 'instruction': 'please include area code'}
]
}
clean_data = parse_form_data(form_data)
new_dic['infofield'].sort()
clean_data['infofield'].sort()
assert(new_dic == clean_data)