I have an app contains these models
class Transaction(models.Model):
chp_reference = models.CharField(max_length=50, unique=True)
rent_effective_date = ..
income_period = ..
property_market_rent =..
number_of_family_group = ..
cruser = ..
prop_id = ..
state = ..
group =..
class FamilyGroup(models.Model):
name = models.CharField(..
transaction =models.ForeignKey(Transaction,..
...
class FamilyMember(models.Model):
transaction = models.ForeignKey(Transaction, ..
family_group = models.ForeignKey(FamilyGroup..
name = models.CharField..
date_of_birth = models.DateField..
....
Im trying to make Imports app that will accept xlsx files with some certain format.
after i imported the models from the other apps, therefore i've created a model that have a field for each field i n the above models , i removed a lot so it look readable.
im trying to make it update_or_create since i think its the best approach to do, since maybe in future maybe i want to update some fields. I have created the first update_or_create for Transaction but since family_group and family_member are childs of Transaction and Inlines i cant figure out how to apply this. the main idea is i have a transaction contains family_groups and family_members inside it .
class Batch(models.Model):
batch = models.CharField(max_length=50)
transaction_chp_reference = models.CharField(unique=True)
transaction_rent_effective_date = models.DateField(..
transaction_property_market_rent = models.DecimalField(..
transaction_number_of_family_group = models.PositiveSmallIntegerField(..
family_group_name = models.CharField(..
family_group_family_type = models.CharField(..
family_group_alloc_id = models.PositiveIntegerField(..
family_group_last_rent = models.DecimalField(..
family_member_name = models.CharField(..
family_member_contact_id = models.PositiveIntegerField(..
family_member_surname = models.CharField(..
family_member_partnered = models.BooleanField(..
def __str__(self):
return str(self.batch)
def save(self, *args, **kwargs):
self.message = ''
if self.transaction_chp_reference:
trans, t = Transaction.objects.update_or_create(
# filter on the unique value of `chp_reference`
chp_reference=self.transaction_chp_reference,
# update these fields, or create a new object with these values
defaults={
'income_period':self.transaction_income_period,
'property_market_rent':self.transaction_property_market_rent,
'number_of_family_group':self.transaction_number_of_family_group,
'rent_effective_date':self.transaction_rent_effective_date,
'cruser':self.transaction_cruser,
'prop_id':self.transaction_prop_id,
'state':self.transaction_state,
}
)
self.message += 'Transaction "' + str(trans.chp_reference) + '" Created\n'
obj, mt = MaintenanceType.objects.update_or_create(
name=self.family_group_maintenance_type,
)
obj, ft = FamilySituation.objects.update_or_create(
name= self.family_group_family_type,
)
obj, fg = FamilyGroup.objects.update_or_create(
transaction=t,
name=self.family_group_name,
defaults={
'alloc_id':self.family_group_alloc_id,
'any_income_support_payment':self.family_group_any_income_support_payment,
'cra_amount':self.family_group_cra_amount,
'cra_eligibilty':self.family_group_cra_eligibilty,
'family_type':ft,
'ftb_a':self.family_group_ftb_a,
'ftb_b':self.family_group_ftb_b,
'last_rent':self.family_group_last_rent,
'maintenance_amount':self.family_group_maintenance_amount,
'maintenance_type':mt,
'name':self.family_group_name,
'number_of_additional_children':self.family_group_number_of_additional_children,
}
)
self.message += 'Family Group "' + str(obj.name) + '" Created\n'
now im getting an error when try to import xlsx file:
Cannot assign "False": "FamilyGroup.transaction" must be a "Transaction" instance.
Traceback:
Traceback (most recent call last):
File "E:\15-12\venv\lib\site-packages\django\db\models\query.py", line 575, in update_or_create
obj = self.select_for_update().get(**kwargs)
File "E:\15-12\venv\lib\site-packages\django\db\models\query.py", line 417, in get
self.model._meta.object_name
calculator.models.FamilyGroup.DoesNotExist: FamilyGroup matching query does not exist.
UPDATE
I have replaced the save() method with this code.
#receiver(post_save, sender=Batch)
def post_save_tranaction(sender, instance, created, **kwargs):
message = ''
if created:
Transaction.objects.update_or_create(
chp_reference=instance.transaction_chp_reference, defaults=
{rent_effective_date':instance.rent_effective_date,... , ... })
## now since FamilyGroup is a child (Foreignkey) to Transaction
## im not sure how to make it instance of Transaction
## FamilyMember is also a child of FamilyGroup and Transaction - same issue
## i tried this --->
transactions = []
transaction = Transaction.objects.all()
for i in transaction:
transactions.append(i.pk)
FamilyGroup.objects.update_or_create(name=instance.family_group_name,
transaction__in=transactions
)
Related
customer_count = fields.Char(string='Customer Count', compute='cust_count')
class ResBuilding(models.Model):
_name = "res.partner.building"
_description = "Customer BUilding"
_order = "id desc"
region_id = fields.Many2one('res.state.city', string='Region', required=True, ondelete='cascade')
city_id = fields.Many2one('city.covered.area', string='Area', required=True, ondelete='cascade')
name = fields.Char(string='Name', required=True, translate=True, copy=False)
image = fields.Binary(string="Building image")
latitude = fields.Char(string='Latitude')
customer_count = fields.Char(string='Customer Count', compute='cust_count', store=True)
longitude = fields.Char(string='Longitude')
active = fields.Boolean(string='Active', default=True, track_visibility='onchange')
partner_ids = fields.One2many('res.partner', 'building_id', string='Customer List', readonly=True)
#api.multi
#api.depends('partner_ids')
def cust_count(self):
for record in self:
count = self.env['res.partner'].search_count([('building_id', '=', record.id)])
record.customer_count = count
#api.multi
def name_get(self):
result = []
for route in self:
name = route.city_id.name + '-' + route.name
result.append((route.id, name))
return result
#api.multi
def write(self, vals):
res = super(ResBuilding, self).write(vals)
print(vals, self.id)
if vals.get('city_id'):
customers = self.env['res.partner'].search([('building_id', '=', self.id)])
for c in customers:
c.living_area = vals.get('city_id')
return res
#api.multi
#api.depends('latitude', 'longitude')
def on_change_location(self):
for li in self:
if li.latitude and li.longitude:
self.env.cr.execute("""update res_partner set location_latitude=""" + str(li.latitude) + """,
location_longitude=""" + str(li.longitude) + """where building_id=""" + str(
li.id))
return True
#api.multi
def write(self, vals):
res = super(ResBuilding, self).write(vals)
self.on_change_region_id()
return res
#api.multi
#api.depends('region_id')
def on_change_region_id(self):
for li in self:
if li.region_id:
self.env.cr.execute(
"""update res_partner set city_id=""" + str(li.region_id.id) + """where building_id=""" + str(
li.id))
return True
I want find the customer count in a specific building based on building id. and want The coustomer_count field to be added to the add custom filter
by this code ,i am getting count correctly . But the field customer_count does not appear in custom filter
when i add store=True, the field is coming in custom filter but the count is coming as 0.
Your code is not correct, even with storing the field.
customer_count = fields.Char(
string='Customer Count', compute='cust_count', store=True)
#api.multi
#api.depends()
def cust_count(self):
for record in self:
count = self.env['res.partner'].search_count([('building_id', '=', record.id)])
record.customer_count = count
Always use for each loops in compute methods, because in case of multi relational fields using your building model or just by presenting this computed field in a list of your building model will lead to a multi recordset behind self.
But that's not all. There should be a possibility to trigger the recomputation of the field and if easy to do using depends. Right now i don't see any easy possibility, because i don't know all your relations and workflows. Without storing the field you probably don't need that, but it would work there too.
So what to do to trigger a recomputation? Just work up from the other site of the relation: res.partner. Override it's write, create and unlink method to trigger the recomputation "manually".
class ResPartner(models.Model):
_inherit = "res.partner"
#api.multi
def write(self, values):
old_buildings = self.mapped('building_id')
res = super(ResPartner, self).write(values)
if 'building_id' in values:
new_buildings = self.mapped('building_id')
trigger_buildings = old_buildins | new_buildings
trigger_buildings.cust_count()
return res
#api.model
def create(self, values):
partner = super(ResPartner, self).create(values)
partner.building_id.cust_count()
return partner
#api.multi
def unlink(self):
buildings = self.mapped('building_id')
res = super(ResPartner, self).unlink()
buildings.cust_count()
return res
Another way is to use a one2many field on your building model in relation to res.partner and fill depends with it, like depends('partner_ids'). But in my experience one2many fields in such and lot of other situations tend to lead to bad performance.
I've created a function in my serializers.py that call an external API and give me a dict back.
How can I use the output from return downloads in the get_all_files as a field in class Meta?
After the first answer, I've got the following error message:
Exception Value: 'NoneType' object is not subscriptable
Exception Location: /app/api/serializers.py, line 68, in get_all_files
Line 68 is the following: return get_all_files(instance.bands)
serializers.py
from rest_framework import serializers
from . import views
from api.models import Application, Indice, Satellite, Band
from satsearch import Search
class IndiceSerializer(serializers.ModelSerializer):
class Meta:
model = Indice
fields = ['name', 'accr', 'description', 'is_NormalizedDifference', 'calc', ]
class SatelliteSerializer(serializers.ModelSerializer):
class Meta:
model = Satellite
fields = ['name', 'accr', 'operator', ]
class BandSerializer(serializers.ModelSerializer):
class Meta:
model = Band
fields = ['band', 'description', 'wavelength', 'resolution', ]
class OsdSerializer(serializers.ModelSerializer):
bands = BandSerializer(source='indice_to_use.needed_bands', many=True)
satellite = SatelliteSerializer(source='indice_to_use.satellite_to_use')
indice = IndiceSerializer(source='indice_to_use')
files = serializers.SerializerMethodField()
def get_files(self, instance):
def get_all_files(bands):
# configuration
url = 'https://earth-search.aws.element84.com/v0' # URL to Sentinel 2 AWS catalog
collection = 'sentinel-s2-l2a-cogs'
# search parameter
startDate = '2021-04-10'
endDate = '2021-04-12'
location = [ 13.6677,
43.7232,
16.2605,
45.4522
]
bbox_search = Search(
bbox=location,
datetime=startDate+"/"+endDate,
query={'eo:cloud_cover': {'lt': 50}},
collections=[collection],
url=url,
sort={'field': 'eo:cloud_cover', 'direction': 'desc'},
)
items = bbox_search.items()
downloads = {}
for i, item in enumerate(items):
data = {}
data['Product ID']= item.properties["sentinel:product_id"]
data['Preview']= item.asset("thumbnail")["href"]
data['Date']= item.properties["datetime"]
data['Cloud cover']= item.properties["eo:cloud_cover"]
for band in bands.split(','):
data[band] = item.asset(band)["href"]
downloads[i] = data
return downloads
return get_all_files(instance.bands)
class Meta:
model = Application
fields = ['machine_name', 'name', 'description', 'indice', 'satellite', 'bands', 'files', ]
In order to do this, you would need to do the following:
Alongside the other serializers (bands, satellite and indices)
You should put:
files = SerializerMethodField()
This SerializerMethodField gets the value by calling the method get_{field_name}, in this case would be get_files. More on this
So you could move all the logic you have up there in this method:
def get_files(self, instance):
return get_all_fields(instance.bands)
And now you would only have to put files in the fields list like this:
fields = ['machine_name', 'name', 'description', 'indice', 'satellite', 'bands', 'files']
Your nested function is throwing off the interpreter, which results in an incomplete stack trace. I'd start by converting your get_files method to a more typical function:
class OsdSerializer(serializers.ModelSerializer):
bands = BandSerializer(source='indice_to_use.needed_bands', many=True)
satellite = SatelliteSerializer(source='indice_to_use.satellite_to_use')
indice = IndiceSerializer(source='indice_to_use')
files = serializers.SerializerMethodField()
def get_files(self, instance):
bands = instance.bands
# configuration
url = 'https://earth-search.aws.element84.com/v0' # URL to Sentinel 2 AWS catalog
collection = 'sentinel-s2-l2a-cogs'
# search parameter
startDate = '2021-04-10'
endDate = '2021-04-12'
location = [ 13.6677,
43.7232,
16.2605,
45.4522
]
bbox_search = Search(
bbox=location,
datetime=startDate+"/"+endDate,
query={'eo:cloud_cover': {'lt': 50}},
collections=[collection],
url=url,
sort={'field': 'eo:cloud_cover', 'direction': 'desc'},
)
items = bbox_search.items()
downloads = {}
for i, item in enumerate(items):
data = {}
data['Product ID']= item.properties["sentinel:product_id"]
data['Preview']= item.asset("thumbnail")["href"]
data['Date']= item.properties["datetime"]
data['Cloud cover']= item.properties["eo:cloud_cover"]
for band in bands.split(','):
data[band] = item.asset(band)["href"]
downloads[i] = data
return downloads
Now when you run it, you will still get the TypeError, but it will point to the line where the error is actually occuring, which I assume is in the downloads loop.
If my assumption is correct, then you will need to make sure you are parsing the data correctly, because you are trying to access a key on an object that doesn't exist. For example, in the line:
data['Preview']= item.asset("thumbnail")["href"]
if item.asset('thumbnail') returns None, then you would get this error.
If you still can't figure it out, please provide a link to the satsearch package, and/or show example data that is returned from Search(...)
The code works fine. The error was caused by a mistake in my backend. The key of the sat-search result was in 3 digest, e.g. B03. The content in my field was in two digst, e.g. B3. That's why the matching doesn't worked.
I have a function which search for json files in a directory, parse the file and write data in the database. My problem is writing in database, because it take around 30 minutes. Any idea how can I speed up writting in a database? I have few quite big files to parse, but parsing the file is not a problem it take around 3 minutes. Currently I am using sqlite but in the future I will change it to PostgreSQL.
Here is my function:
def create_database():
with transaction.atomic():
directory = os.fsencode('data/web_files/unzip')
for file in os.listdir(directory):
filename = os.fsdecode(file)
with open('data/web_files/unzip/{}'.format(filename.strip()), encoding="utf8") as f:
data = json.load(f)
cve_items = data['CVE_Items']
for i in range(len(cve_items)):
database_object = DataNist()
try:
impact = cve_items[i]['impact']['baseMetricV2']
database_object.severity = impact['severity']
database_object.exp_score = impact['exploitabilityScore']
database_object.impact_score = impact['impactScore']
database_object.cvss_score = impact['cvssV2']['baseScore']
except KeyError:
database_object.severity = ''
database_object.exp_score = ''
database_object.impact_score = ''
database_object.cvss_score = ''
for vendor_data in cve_items[i]['cve']['affects']['vendor']['vendor_data']:
database_object.vendor_name = vendor_data['vendor_name']
for description_data in cve_items[i]['cve']['description']['description_data']:
database_object.description = description_data['value']
for product_data in vendor_data['product']['product_data']:
database_object.product_name = product_data['product_name']
database_object.save()
for version_data in product_data['version']['version_data']:
if version_data['version_value'] != '-':
database_object.versions_set.create(version=version_data['version_value'])
My models.py:
class DataNist(models.Model):
vendor_name = models.CharField(max_length=100)
product_name = models.CharField(max_length=100)
description = models.TextField()
date = models.DateTimeField(default=timezone.now)
severity = models.CharField(max_length=10)
exp_score = models.IntegerField()
impact_score = models.IntegerField()
cvss_score = models.IntegerField()
def __str__(self):
return self.vendor_name + "-" + self.product_name
class Versions(models.Model):
data = models.ForeignKey(DataNist, on_delete=models.CASCADE)
version = models.CharField(max_length=50)
def __str__(self):
return self.version
I will appreciate if you can give me any advice how can I improve my code.
Okay, given the structure of the data, something like this might work for you.
This is standalone code aside from that .objects.bulk_create() call; as commented in the code, the two classes defined would actually be models within your Django app.
(By the way, you probably want to save the CVE ID as an unique field too.)
Your original code had the misassumption that every "leaf entry" in the affected version data would have the same vendor, which may not be true. That's why the model structure here has a separate product-version model that has vendor, product and version fields. (If you wanted to optimize things a little, you might deduplicate the AffectedProductVersions even across DataNists (which, as an aside, is not a perfect name for a model)).
And of course, as you had already done in your original code, the importing should be run within a transaction (transaction.atomic()).
Hope this helps.
import json
import os
import types
class DataNist(types.SimpleNamespace): # this would actually be a model
severity = ""
exp_score = ""
impact_score = ""
cvss_score = ""
def save(self):
pass
class AffectedProductVersion(types.SimpleNamespace): # this too
# (foreign key to DataNist here)
vendor_name = ""
product_name = ""
version_value = ""
def import_item(item):
database_object = DataNist()
try:
impact = item["impact"]["baseMetricV2"]
except KeyError: # no impact object available
pass
else:
database_object.severity = impact.get("severity", "")
database_object.exp_score = impact.get("exploitabilityScore", "")
database_object.impact_score = impact.get("impactScore", "")
if "cvssV2" in impact:
database_object.cvss_score = impact["cvssV2"]["baseScore"]
for description_data in item["cve"]["description"]["description_data"]:
database_object.description = description_data["value"]
break # only grab the first description
database_object.save() # save the base object
affected_versions = []
for vendor_data in item["cve"]["affects"]["vendor"]["vendor_data"]:
for product_data in vendor_data["product"]["product_data"]:
for version_data in product_data["version"]["version_data"]:
affected_versions.append(
AffectedProductVersion(
data_nist=database_object,
vendor_name=vendor_data["vendor_name"],
product_name=product_data["product_name"],
version_name=version_data["version_value"],
)
)
AffectedProductVersion.objects.bulk_create(
affected_versions
) # save all the version information
return database_object # in case the caller needs it
with open("nvdcve-1.0-2019.json") as infp:
data = json.load(infp)
for item in data["CVE_Items"]:
import_item(item)
This code only creates one record. What is wrong?
class PartnerTagCreate(models.TransientModel):
""" Choose tags to be added to partner."""
_name = 'partner.tags.create'
_description = __doc__
market_id = fields.Many2one('partner.tags', string='Market Tag')
application_id = fields.Many2one('partner.tags', string='Application Tag')
partner_id = fields.Integer()
#api.multi
def create_contact_tag(self):
for record in self.env['sale.order.line'].browse(self._context.get('active_ids', [])):
vals = {}
vals['partner_id'] = record.order_partner_id
self.write(vals)
return True
I need this function to create one record for each order_partner_id I selected before opening the wizard...
How to achieve that?
Here my new code (function) ...
def create_contact_tag(self):
sale_order_line_ids = self.env['sale.order.line'].browse(self._context.get('active_ids', []))
for partner in sale_order_line_ids:
values = {}
values['partner_id'] = partner.order_partner_id
self.create(values)
return {}
This creates one record for marketing_id and/or application_id and dedicated records for each partner_id in the record.
You use the 'create' method to create new records; this is the same for TransientModel as for the persistent Model.
So, replace
self.write(vals)
by
self.create(vals)
and you should be fine.
Is it possible to call
tasks = models.Conference.objects.filter(location_id=key)
data = serializers.serialize("json", tasks)
and have it return the verbose field names rather than the variable names?
One way to accomplish this, is by monkey patching the methods within the django.core.serializers.python.Serializer class to return each fields verbose_name opposed to the standard name attribute.
Take for example the following code...
models.py
from django.db import models
class RelatedNode(models.Model):
name = models.CharField(max_length=100, verbose_name="related node")
class Node(models.Model):
name = models.CharField(max_length=100, verbose_name="verbose name")
related_node = models.ForeignKey(RelatedNode, verbose_name="verbose fk related node", related_name="related_node")
related_nodes = models.ManyToManyField(RelatedNode, verbose_name="verbose related m2m nodes", related_name="related_nodes")
I create these model objects within the database...
RelatedNode.objects.create(name='related_node_1')
RelatedNode.objects.create(name='related_node_2')
RelatedNode.objects.create(name='related_node_fk')
Node.objects.create(name='node_1', related_node=RelatedNode.objects.get(name='related_node_fk'))
Node.objects.all()[0].related_nodes.add(RelatedNode.objects.get(name='related_node_1'))
Node.objects.all()[0].related_nodes.add(RelatedNode.objects.get(name='related_node_2'))
views.py
from testing.models import Node
from django.utils.encoding import smart_text, is_protected_type
from django.core.serializers.python import Serializer
from django.core import serializers
def monkey_patch_handle_field(self, obj, field):
value = field._get_val_from_obj(obj)
# Protected types (i.e., primitives like None, numbers, dates,
# and Decimals) are passed through as is. All other values are
# converted to string first.
if is_protected_type(value):
self._current[field.verbose_name] = value
else:
self._current[field.verbose_name] = field.value_to_string(obj)
def monkey_patch_handle_fk_field(self, obj, field):
if self.use_natural_foreign_keys and hasattr(field.rel.to, 'natural_key'):
related = getattr(obj, field.name)
if related:
value = related.natural_key()
else:
value = None
else:
value = getattr(obj, field.get_attname())
self._current[field.verbose_name] = value
def monkey_patch_handle_m2m_field(self, obj, field):
if field.rel.through._meta.auto_created:
if self.use_natural_foreign_keys and hasattr(field.rel.to, 'natural_key'):
m2m_value = lambda value: value.natural_key()
else:
m2m_value = lambda value: smart_text(value._get_pk_val(), strings_only=True)
self._current[field.verbose_name] = [m2m_value(related)
for related in getattr(obj, field.name).iterator()]
Serializer.handle_field = monkey_patch_handle_field
Serializer.handle_fk_field = monkey_patch_handle_fk_field
Serializer.handle_m2m_field = monkey_patch_handle_m2m_field
serializers.serialize('json', Node.objects.all())
This outputs for me...
u'[{"fields": {"verbose fk related node": 3, "verbose related m2m nodes": [1, 2], "verbose name": "node_1"}, "model": "testing.node", "pk": 1}]'
As we could see, this actually gives us back the verbose_name of each field as keys in the returned dictionaries.