I have a Django model with start date/time and end date/time where all four components may (independently) be a null value (and there is a semantic difference between a null/unknown value and a known value). I am trying to implement a database constraint [1, 2] to check that if they are non-null that the start date/time is before the end date/time.
I have implemented the constraint in two different ways (commented as Option 1, a single constraint, and Option 2, as two constraints) below:
from django.db import models
class Event( models.Model ):
start_date = models.DateField( blank = True, null = True )
start_time = models.TimeField( blank = True, null = True )
end_date = models.DateField( blank = True, null = True )
end_time = models.TimeField( blank = True, null = True )
class Meta:
constraints = [
# Option 1
models.CheckConstraint(
check = ( models.Q( start_date__isnull = True )
| models.Q( end_date__isnull = True )
| models.Q( start_date__lt = models.F( 'end_date' ) )
| ( ( models.Q( start_time__isnull = True )
| models.Q( end_time__isnull = True )
| models.Q( start_time__lte = models.F( 'end_time' ) )
)
& models.Q( start_date = models.F( 'end_date' ) ) # This line
)
),
name = 'start_date_and_time_lte_end_date_and_time'
),
# Option 2
models.CheckConstraint(
check = ( models.Q( start_date__isnull = True )
| models.Q( end_date__isnull = True )
| models.Q( start_date__lte = models.F( 'end_date' ) )
),
name = 'start_date_lte_end_date'
),
models.CheckConstraint(
check = ~( models.Q( start_date = models.F( 'end_date' ) )
& models.Q( start_time_gt = models.F( 'end_time' ) )
),
name = 'not_start_date_eq_end_date_and_start_time_gt_end_time'
),
]
When I run makemigrations both options succeed.
With Option 1, when I try to use the model in a test:
class EventModelTest( TestCase ):
def test_simple(self):
obj = Event.objects.create()
self.assertTrue( isinstance( obj, Event ) )
I get the error:
django.db.utils.DatabaseError: malformed database schema (packagename_event) - no such column: new_packagename_event.start_time
This error goes away if I comment out the line marked # this line (but doing that would make the constraint function incorrectly).
Option 2 appears to work perfectly but is less obvious that it is going to consider null values correctly.
Are the check constraints in option 1 and option 2 equivalent?
Why does option 1 fail and what can be done to fix it? Is it because I am trying to compare the value of the same column(s) in two places in the same constraint or is there another reason?
Found a 3rd way to implement the constraints so that it is more obvious that NULL values are being considered correctly using two constraints:
from django.db import models
class Event( models.Model ):
start_date = models.DateField( blank = True, null = True )
start_time = models.TimeField( blank = True, null = True )
end_date = models.DateField( blank = True, null = True )
end_time = models.TimeField( blank = True, null = True )
class Meta:
constraints = [
# Option 3
models.CheckConstraint(
check = ( models.Q( start_date__isnull = True )
| models.Q( end_date__isnull = True )
| models.Q( start_date__lte = models.F( 'end_date' ) )
),
name = 'start_date_lte_end_date'
),
models.CheckConstraint(
check = ( models.Q( start_date__isnull = True )
| models.Q( end_date__isnull = True )
| models.Q( start_date__lt = models.F( 'end_date' ) )
| models.Q( start_time__isnull = True )
| models.Q( end_time__isnull = True )
| models.Q( start_time__lte = models.F( 'end_time' ) )
),
name = 'not_start_date_eq_end_date_and_start_time_gt_end_time'
),
]
The two constraints will overlap exactly in the cases when:
start_date is null;
end_date is null; or
start_date < end_date
The remaining ways the checks can pass are when the first constraint is start_date = end_date and the second constraint is:
start_time is null;
end_time is null; or
start_time <= end_time
Which matches all the possible cases in Option 1.
On further testing, the model with constraints below demonstrates the same issue:
class SimpleModel( models.Model ):
value = models.IntegerField()
class Meta:
constraints = [
models.CheckConstraint(
check = ( models.Q( value__gte = 0 )
& ( models.Q( value__gte = 0 )
| models.Q( value__gte = 0 ) # this line
)
),
name = "simplemodel_check1"
),
models.CheckConstraint(
check = ( models.Q( value__gte = 0 )
& ( models.Q( value__gte = 0 )
& models.Q( value__gte = 0 )
)
),
name = "simplemodel_check2"
),
models.CheckConstraint(
check = ( models.Q( value__gte = 0 ) | models.Q( value__gte = 0 ) ),
name = "simplemodel_check3"
),
models.CheckConstraint(
check = ( models.Q( value__gte = 0 ) & models.Q( value__gte = 0 ) ),
name = "simplemodel_check4"
),
]
The 2nd, 3rd and 4th constraints work without issue but the 1st constraint causes an exception when trying to create an instance of the model with an error:
django.db.utils.DatabaseError: malformed database schema (packagename_event) - no such column: new_packagename_simplemodel.value
It appears to be an issue with the combination of & and |.
Related
I am using the peewee as ORM and my goal is to serialize the result of the complex query whcih also contains subqueries:
machine_usage_alias = RecordDailyMachineUsage.alias()
subquery = (
machine_usage_alias.select(
machine_usage_alias.machine_id,
fn.MAX(machine_usage_alias.date).alias('max_date'),
)
.group_by(machine_usage_alias.machine_id)
.alias('machine_usage_subquery')
)
record_subquery = RecordDailyMachineUsage.select(
RecordDailyMachineUsage.machine_id, RecordDailyMachineUsage.usage
).join(
subquery,
on=(
(RecordDailyMachineUsage.machine_id == subquery.c.machine_id)
& (RecordDailyMachineUsage.date == subquery.c.max_date)
),
)
query = (
Machine.select(
Machine.id, # 0
Machine.name,
Machine.location,
Machine.arch,
Machine.platform,
Machine.machine_version,
Machine.status,
record_subquery.c.usage.alias('usage'),
fn.GROUP_CONCAT(Tag.name.distinct()).alias('tags_list'),
fn.GROUP_CONCAT(Project.full_name.distinct()).alias('projects_list'),
) # 10
.join(MachineTag)
.join(Tag)
.switch(Machine)
.join(MachineProject)
.join(Project)
.join(
record_subquery,
JOIN.LEFT_OUTER,
on=(Machine.id == record_subquery.c.machine_id),
)
.where((Machine.id != 0) & (Machine.is_alive == 1))
.group_by(Machine.id)
)
I've tried to use the method model_to_dict:
jsonify({'rows': [model_to_dict(c) for c in query]})
But this way gives me the columns and and values from the Machine model only. My aim is include all the columns from the select query.
It turned out that I had to use the dicts method of the query and jsonify the result.
machine_usage_alias = RecordDailyMachineUsage.alias()
subquery = (
machine_usage_alias.select(
machine_usage_alias.machine_id,
fn.MAX(machine_usage_alias.date).alias('max_date'),
)
.group_by(machine_usage_alias.machine_id)
.alias('machine_usage_subquery')
)
record_subquery = RecordDailyMachineUsage.select(
RecordDailyMachineUsage.machine_id, RecordDailyMachineUsage.usage
).join(
subquery,
on=(
(RecordDailyMachineUsage.machine_id == subquery.c.machine_id)
& (RecordDailyMachineUsage.date == subquery.c.max_date)
),
)
query = (
Machine.select(
Machine.id, # 0
Machine.name,
Machine.location,
Machine.arch,
Machine.platform,
Machine.machine_version,
Machine.status,
record_subquery.c.usage.alias('usage'),
fn.GROUP_CONCAT(Tag.name.distinct()).alias('tags_list'),
fn.GROUP_CONCAT(Project.full_name.distinct()).alias('projects_list'),
) # 10
.join(MachineTag)
.join(Tag)
.switch(Machine)
.join(MachineProject)
.join(Project)
.join(
record_subquery,
JOIN.LEFT_OUTER,
on=(Machine.id == record_subquery.c.machine_id),
)
.where((Machine.id != 0) & (Machine.is_alive == 1))
.group_by(Machine.id)
).dicts()
return jsonify({'rows': [c for c in query]})
I'm trying to iterate through 4 columns in a CSV that each contain a different amount sale ids.
I make a pandas dataframe and convert each row to a list.
If a column has a greater amount of sale ids than the following column it gives me an error:
Message: no such element: Unable to locate element: {"method":"xpath","selector":"/html/body/form[1]/div/select/option[#value=nan]"}
however if all columns have the same amount of id's each, the code works fine.
def get_report_data(self):
current_date = helpers.currentDate
data = pd.read_csv(r'C:\Users\rford\Desktop\sale_ids.csv')
everyone_ids = data['Everyone'].tolist()
dd_ids = data['Daily Deal'].tolist()
targeted_ids = data['Targeted'].tolist()
push_ids = data['Push Notification'].tolist()
acq_ids = data['Acquisition'].tolist()
for form_code, sales_type, idlist in (
( 1, "Everyone", everyone_ids ),
( 1, "Daily Deal", dd_ids ),
( 2, "Targeted", targeted_ids ),
( 2, "Push Notification", push_ids ),
( 2, "Acquisition", acq_ids ) ):
print('Gathering {} Sale Information'.format(sales_type))
for sale_id in idlist:
results = []
helpers.WebDriverWait(helpers.driver, 10)
helpers.driver.find_element_by_xpath('/html/body/form[{}]/div/select/option[#value={}]'.format(form_code, sale_id)).click()
The built-in function any might be useful in conjunction with each list's pop method:
def get_report_data(self):
current_date = helpers.currentDate
data = pd.read_csv(r'C:\Users\rford\Desktop\sale_ids.csv')
everyone_ids = data['Everyone'].tolist()
dd_ids = data['Daily Deal'].tolist()
targeted_ids = data['Targeted'].tolist()
push_ids = data['Push Notification'].tolist()
acq_ids = data['Acquisition'].tolist()
for form_code, sales_type, idlist in (
( 1, "Everyone", everyone_ids ),
( 1, "Daily Deal", dd_ids ),
( 2, "Targeted", targeted_ids ),
( 2, "Push Notification", push_ids ),
( 2, "Acquisition", acq_ids ) ):
print('Gathering {} Sale Information'.format(sales_type))
while any(idlist):
results = []
helpers.WebDriverWait(helpers.driver, 10)
helpers.driver.find_element_by_xpath(
'/html/body/form[{}]/div/select/option[#value={}]'.format(
form_code, idlist.pop(0)
)
).click()
Turns out pandas was reading some cells of the csv as float.
The fix ended up being to use .fillna(0) on my dataframe and then turn each column to a list and make them integers with .astype(int)
df = pd.read_csv(r'C:\Users\rford\Desktop\sale_ids.csv')
data = df.fillna(0)
everyone_ids = data['Everyone'].astype(int).tolist()
dd_ids = data['Daily Deal'].astype(int).tolist()
targeted_ids = data['Targeted'].astype(int).tolist()
push_ids = data['Push Notification'].astype(int).tolist()
acq_ids = data['Acquisition'].astype(int).tolist()
I am trying to build a complex filter with the Django ORM and am running into an issue where objects without a foreign key are not being included. This is due to an inner join that is being generated and I believe it should be a left outer join. I have two models Report and Message. Report has a reference to a particular Message but that could also be null.
class Report(BaseModel):
message = models.ForeignKey(
Message,
on_delete=models.SET_NULL,
related_name="message_reports",
null=True,
)
created_at = models.DateTimeField()
# other fields
class Message(BaseModel):
should_send_messages = models.BooleanField(
default=True
)
num_hold_hours = models.IntegerField(
default=None,
null=True,
)
# other fields
Here is the filter that I am trying to use.
Report.objects.filter(
Q(message__isnull=True) | Q(message__should_send_messages=True),
created_at__lte=
Case(
When(
Q(message__isnull=True) | Q(message__num_hold_hours__isnull=True),
then=ExpressionWrapper(
timezone.now() - timedelta(hours=1) * Cast(
settings.NUM_HOURS_TO_NOTIFY
, output_field=IntegerField())
, output_field=DateTimeField())),
default=ExpressionWrapper(
timezone.now() - timedelta(hours=1) * Cast(
F('message__num_hold_hours')
, output_field=IntegerField())
, output_field=DateTimeField()),
output_field=DateTimeField()),
)
Here is the sql that is generated as a result of that filter block. (I'm not sure why the datetimes look like that ugly)
SELECT "report"."message_id"
FROM "report"
INNER JOIN "message" ON (
"report"."message_id" = "message"."id"
)
WHERE (
(
"report"."message_id" IS NULL
OR "message"."should_send_messages" = True
)
AND "report"."created_at" <= (
CASE
WHEN (
"report"."message_id" IS NULL
OR "message"."num_hold_hours" IS NULL
) THEN (
2020 -11 -24 07 :09 :22.401276 + 00 :00 - (1 :00 :00 * (48)::integer)
)
ELSE (
2020 -11 -24 07 :09 :22.401833 + 00 :00 - (
1 :00 :00 * ("message"."num_hold_hours")::integer
)
)
END
)
)
ORDER BY "report"."created_at" DESC
I believe the output of that sql query should be.
SELECT "report"."message_id"
FROM "report"
LEFT OUTER JOIN "message" ON (
"report"."message_id" = "message"."id"
)
WHERE (
(
"report"."message_id" IS NULL
OR "message"."should_send_messages" = True
)
AND "report"."created_at" <= (
CASE
WHEN (
"report"."message_id" IS NULL
OR "message"."num_hold_hours" IS NULL
) THEN (
2020 -11 -24 07 :09 :22.401276 + 00 :00 - (1 :00 :00 * (48)::integer)
)
ELSE (
2020 -11 -24 07 :09 :22.401833 + 00 :00 - (
1 :00 :00 * ("message"."num_hold_hours")::integer
)
)
END
)
)
ORDER BY "report"."created_at" DESC
Any thoughts?
I am trying to get this renaming working as the locators are being duplicated and moved to the position of the joints.
For example, if I have a thigh_jnt, knee_jnt, ankle_jnt, the created locators will be named loc_thigh_jnt, loc_knee_jnt etc
However it is not working for me, as I am getting errors such as # ValueError: No object matches name: loc_0 #
Needless to say, the locator may be created but it is not at the position of the joint.
Also, can I ask if it is possible to create the locator for all the joint? Currently it is only creating for the thigh and knee but not the ankle
import maya.cmds as cmds
def createLoc():
cmds.select( cmds.listRelatives( type = 'joint', fullPath = True, allDescendents = True ) )
cmds.select( cmds.listRelatives( parent = True, fullPath = True ) )
sel = cmds.ls ( selection = True, type = 'joint' )
if not sel :
cmds.warning( "Please select a joint / No joints in selection " )
return
locGrp = cmds.group(n="loc_Grp_#", em=True)
cmds.addAttr ( locGrp, attributeType = 'double' , longName = 'locScale' , defaultValue = 1.0 , keyable = 1 )
masterLoc = cmds.spaceLocator(n="loc_0")[0]
cmds.parent( masterLoc, locGrp )
for attr in ["scaleZ", "scaleY", "scaleX"]:
cmds.connectAttr ( locGrp + ".locScale" , "%s.%s" % ( masterLoc, attr ) )
for jnt in sel:
print jnt
coords = cmds.xform ( jnt, query = True, worldSpace = True, pivots = True )[0:3]
cmds.select( masterLoc, replace = True )
cmds.duplicate( returnRootsOnly = True , inputConnections = True )
# This is where the errors starts
#cmds.rename(str(masterLoc), ("loc_" + str(sel)))
cmds.move( coords[0], coords[1], coords[2], rotatePivotRelative = True )
Here is your code snippet with some modifications and corrections to make it work.
import maya.cmds as cmds
def createLoc():
cmds.select( cmds.listRelatives( type='joint', fullPath=True, allDescendents=True ), add=True )
cmds.select( cmds.listRelatives( parent=True, fullPath=True ), add=True )
sel = cmds.ls ( selection = True, type = 'joint' )
if not sel :
cmds.warning( "Please select a joint / No joints in selection " )
return
locGrp = cmds.group(n="loc_Grp_#", em=True)
cmds.addAttr ( locGrp, attributeType='double' , longName='locScale' , defaultValue=1.0 , keyable=1 )
masterLoc = cmds.spaceLocator(n="loc_0")[0]
cmds.parent( masterLoc, locGrp )
for attr in ["scaleZ", "scaleY", "scaleX"]:
cmds.connectAttr ( locGrp + ".locScale" , "%s.%s" % ( masterLoc, attr ) )
is_root_loop = True
loc_to_rename = masterLoc
for jnt in sel:
print jnt
coords = cmds.xform ( jnt, query=True, worldSpace=True, pivots=True )[0:3]
cmds.select( masterLoc, replace=True )
if not is_root_loop:
loc_to_rename = cmds.duplicate( returnRootsOnly=True , inputConnections=True )[0]
# No more errors!
renamed_loc = cmds.rename(str(loc_to_rename), ("loc_" + str(jnt)))
if is_root_loop:
masterLoc = renamed_loc
cmds.move( coords[0], coords[1], coords[2], rotatePivotRelative=True )
is_root_loop = False
In the first two cmds.select() calls, I added add=True flag. Without that flag, cmds.select() will assume replace=True by default. That is why your root joint was being ignored after this call.
In the for loop, the masterLoc was being duplicated N times, where N is the number of joints, thus resulting in N+1 locators (including the masterLoc). So I added the is_root_loop flag to check if the loop is running for the first time. During this run, we manipulate the masterLoc itself (without duplicating it), rename the masterLoc and store the name. From the second iteration of this loop, we use this masterLoc to duplicate and rename the duplicated locators as you had previously written.
Another change I did was storing the name of the duplicated locator
loc_to_rename = cmds.duplicate( returnRootsOnly=True , inputConnections=True )[0]
And used this to rename. That is where you were getting the errors because you were trying to rename masterLoc in every iteration.
Also, it is always important to catch the return results of commands like cmds.duplicate and cmds.rename, as the name they assign may not always be as expected, as Maya will append a number or increment the number at the end of the new name if a name clash occurs with something else in the scene.
I hope this helped!
I want to add an xml to a QTextEdit, this is my code
self.XMLField = QtGui.QTextEdit() # Alternative: QTextEdit
self.XMLField.setReadOnly( True )
self.XMLField.setAcceptRichText( True )
self.XMLField.append( data.toxml() )
print( data.toxml() )
The print is working, so I get the whole XML, but in the text box I only get the nodeValues and attributes.
ADDED
This is the full code:
path = "settings/%s.xml" % str( self.clientName )
print( path );
data = xml.dom.minidom.parse( path )
lidar = data.getElementsByTagName( 'lidar' )
if( lidar.length > 0 ):
positive_towards_LOS = lidar[0].getAttribute( 'positive_towards_LOS' )
scanner_3D = lidar[0].getAttribute( 'scanner_3D' )
name = ( lidar[0].getElementsByTagName( 'name' ) )
if( name.length > 0 ):
title = 'Windscanner - %s Lidar Properties' % name[0].firstChild.nodeValue
self.setWindowTitle( title )
""" XML Showup """
self.XMLField = QtGui.QTextEdit() # Alternative: QTextEdit
self.XMLField.setReadOnly( True )
self.XMLField.setAcceptRichText( True )
self.XMLField.append( data.toxml() )