Using data class with strings - python

I'm simplifying a project, so it's my first time splitting an entire script in modules/packages so I have the following class:
class Decorators:
def __init__(self):
pass
decorator = r'''
/>
( //-------------------------------------(
(*)OXOXOXOXO(*>======================================\
( \\---------------------------------------)
\>
'''
special_decorator = r'''
.
/ )) |\ ) ).
c--. (\ ( `. / ) (\ ( `. ). ( (
| | )) ) ) ( ( `.`. ) ) ( ( ) )
| | ( ( / _..----.._ ) | ( ( _..----.._ ( (
,-. | |---) V.'-------.. `-. )-/.-' ..------ `--) \._
| /===========| | ( | ) ( ``-.`\/'.-'' ( ) ``-._
| | / / / / / | |---------------------> <-------------------------_>=-
| \===========| | ..-'./\.`-.. _,,-'
`-' | |-------._------''_.-'----`-._``------_.-----'
| | ``----'' ``----''
| |
c--`
'''
I want to know how can I call the strings from this class to print it out on the main function for example:
import decorators
def main():
print(decorators.special_decorator)
I want it to print the string that I'm calling from the module

Related

"sh: no closing quote" When trying to make an ASCII echo convert script

I'm trying to convert an ASCII art file to an echo command which can run it without any errors, I'm getting an error when it tries to run the script at the last line, the error is:
sh: no closing quote
I probably made my script the wrong way, how would I fix it?
The script:
import sys, os
text = sys.stdin.read()
full = "echo -e \""
chars_to_escape = ["!",'"',"#","$","&","'","(",")","*",";","<",">","?",
for char in text:
if char in chars_to_escape:
full += f"\"\{char}\""
else:
full += char
print(full)
os.system(full)
The ASCII art:
,--.
{ }
K, }
/ `Y`
_ / /
{_'-K.__/
`/-.__L._
/ ' /`\_}
/ ' /
____ / ' /
,-'~~~~ ~~/ ' /_
,' ``~~~%%',
( % Y
{ %% I
{ - % `.
| ', % )
| | ,..__ __. Y
| .,_./ Y ' / ^Y J )|
\ |' / | | ||
\ L_/ . _ (_,.'(
\, , ^^""' / | )
\_ \ /,L] /
'-_`-, ` ` ./`
`-(_ )
^^\..___,.--`
I forgot to add the last quote to the full variable.

Apache Beam : WriteToBigQuery does not work if preceded by a stateful Transform unless re-windowing is applied

I have a simple pipeline that does not work as expected, and any documentation I find does not explain its behaviour. In short, WriteToBigQuery in streaming mode fails if it is preceded by a stateful Transform, like GroupIntoBatches, unless global windows are re-applied before streaming it into BQ. Does anyone have a meaningful explanation?
This does work:
result = (
p
| "Read Data"
>> beam.io.ReadFromPubSub(
subscription="projects/myproject/subscriptions/mysubscription",
with_attributes=False,
).with_output_types(bytes)
| "Decompose Data" >> beam.ParDo(DecomposeData())
| "Window into Fixed Intervals"
>> beam.WindowInto(window.FixedWindows(self.window_size))
| "Transform Data" >> beam.ParDo(TransformData())
| "Write to BigQuery Table" >> beam.io.WriteToBigQuery(
table=_choose_table,
schema=_choose_schema,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND))
This fails:
result = (
p
| "Read Data"
>> beam.io.ReadFromPubSub(
subscription="projects/myproject/subscriptions/mysubscription",
with_attributes=False,
).with_output_types(bytes)
| "Decompose Data" >> beam.ParDo(DecomposeData())
| "Window into Fixed Intervals"
>> beam.WindowInto(window.FixedWindows(self.window_size))
| "Add Dummy Key" >> beam.Map(lambda elem: (None, elem))
| "Group into Batches"
>> beam.GroupIntoBatches(
max_buffering_duration_secs=self.window_size,
batch_size=self.batch_size,
)
| "Discard Dummy Key" >> beam.MapTuple(lambda _, val: val)**
| "Transform Data" >> beam.ParDo(TransformData())
| "Write to BigQuery Table" >> beam.io.WriteToBigQuery(
table=_choose_table,
schema=_choose_schema,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND))
This works again:
result = (
p
| "Read Data"
>> beam.io.ReadFromPubSub(
subscription="projects/myproject/subscriptions/mysubscription",
with_attributes=False,
).with_output_types(bytes)
| "Decompose Data" >> beam.ParDo(DecomposeData())
| "Window into Fixed Intervals"
>> beam.WindowInto(window.FixedWindows(self.window_size))
| "Add Dummy Key" >> beam.Map(lambda elem: (None, elem))
| "Group into Batches"
>> beam.GroupIntoBatches(
max_buffering_duration_secs=self.window_size,
batch_size=self.batch_size,
)
| "Discard Dummy Key" >> beam.MapTuple(lambda _, val: val)
| "Transform Data" >> beam.ParDo(TransformData())
| "Re-window" >> beam.WindowInto(window.GlobalWindows())
| "Write to BigQuery Table" >> beam.io.WriteToBigQuery(
table=_choose_table,
schema=_choose_schema,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND))
This is very weird as I am using the same approach, before WriteToBigQuery I am batching elements. This is what I have:
tagged_events = (
pcoll
| "Key events" >> WithKeys(add_keys)
| "With timestamps" >> ParDo(AddTimestamp())
| "Window"
>> WindowInto(
windowfn=FixedWindows(self.window_interval_seconds),
allowed_lateness=self.window_lateness_seconds,
)
| "Get latest elements per key" >> combiners.Latest.PerKey()
| "Reshuffle elements" >> Reshuffle()
| "Prepare for insertion" >> FlatMapTuple(prepare_for_insert)
| "Tag Value" >> ParDo(TagTableValue()).with_outputs(*BQ_TABLES)
)
for table in BQ_TABLES:
errors = (
tagged_events[table]
| f"Reshuffle to {table}" >> Reshuffle()
| f"Batch {table} elements"
>> BatchElements(
min_batch_size=500, max_batch_size=500
)
| f"Flatten batch of {table}" >> FlatMap(flatten_list)
| f"Write to {table}" >> WriteToBigQuery(
table=f"{self.project}:{self.dataset}.{table}",
create_disposition=BigQueryDisposition.CREATE_NEVER,
write_disposition=BigQueryDisposition.WRITE_APPEND,
insert_retry_strategy=RetryStrategy.RETRY_NEVER,
ignore_insert_ids=True,
batch_size=BIGQUERY_INSERT_MAX_BATCH_SIZE,
)
)
One thing to note is that batching yields a batch, meaning a list of elements to be inserted, but the BigQuery sink expects single elements. That's why I have that Flatten step before insert.

Implementing Django 2.2 database constraint across multiple columns

I have a Django model with start date/time and end date/time where all four components may (independently) be a null value (and there is a semantic difference between a null/unknown value and a known value). I am trying to implement a database constraint [1, 2] to check that if they are non-null that the start date/time is before the end date/time.
I have implemented the constraint in two different ways (commented as Option 1, a single constraint, and Option 2, as two constraints) below:
from django.db import models
class Event( models.Model ):
start_date = models.DateField( blank = True, null = True )
start_time = models.TimeField( blank = True, null = True )
end_date = models.DateField( blank = True, null = True )
end_time = models.TimeField( blank = True, null = True )
class Meta:
constraints = [
# Option 1
models.CheckConstraint(
check = ( models.Q( start_date__isnull = True )
| models.Q( end_date__isnull = True )
| models.Q( start_date__lt = models.F( 'end_date' ) )
| ( ( models.Q( start_time__isnull = True )
| models.Q( end_time__isnull = True )
| models.Q( start_time__lte = models.F( 'end_time' ) )
)
& models.Q( start_date = models.F( 'end_date' ) ) # This line
)
),
name = 'start_date_and_time_lte_end_date_and_time'
),
# Option 2
models.CheckConstraint(
check = ( models.Q( start_date__isnull = True )
| models.Q( end_date__isnull = True )
| models.Q( start_date__lte = models.F( 'end_date' ) )
),
name = 'start_date_lte_end_date'
),
models.CheckConstraint(
check = ~( models.Q( start_date = models.F( 'end_date' ) )
& models.Q( start_time_gt = models.F( 'end_time' ) )
),
name = 'not_start_date_eq_end_date_and_start_time_gt_end_time'
),
]
When I run makemigrations both options succeed.
With Option 1, when I try to use the model in a test:
class EventModelTest( TestCase ):
def test_simple(self):
obj = Event.objects.create()
self.assertTrue( isinstance( obj, Event ) )
I get the error:
django.db.utils.DatabaseError: malformed database schema (packagename_event) - no such column: new_packagename_event.start_time
This error goes away if I comment out the line marked # this line (but doing that would make the constraint function incorrectly).
Option 2 appears to work perfectly but is less obvious that it is going to consider null values correctly.
Are the check constraints in option 1 and option 2 equivalent?
Why does option 1 fail and what can be done to fix it? Is it because I am trying to compare the value of the same column(s) in two places in the same constraint or is there another reason?
Found a 3rd way to implement the constraints so that it is more obvious that NULL values are being considered correctly using two constraints:
from django.db import models
class Event( models.Model ):
start_date = models.DateField( blank = True, null = True )
start_time = models.TimeField( blank = True, null = True )
end_date = models.DateField( blank = True, null = True )
end_time = models.TimeField( blank = True, null = True )
class Meta:
constraints = [
# Option 3
models.CheckConstraint(
check = ( models.Q( start_date__isnull = True )
| models.Q( end_date__isnull = True )
| models.Q( start_date__lte = models.F( 'end_date' ) )
),
name = 'start_date_lte_end_date'
),
models.CheckConstraint(
check = ( models.Q( start_date__isnull = True )
| models.Q( end_date__isnull = True )
| models.Q( start_date__lt = models.F( 'end_date' ) )
| models.Q( start_time__isnull = True )
| models.Q( end_time__isnull = True )
| models.Q( start_time__lte = models.F( 'end_time' ) )
),
name = 'not_start_date_eq_end_date_and_start_time_gt_end_time'
),
]
The two constraints will overlap exactly in the cases when:
start_date is null;
end_date is null; or
start_date < end_date
The remaining ways the checks can pass are when the first constraint is start_date = end_date and the second constraint is:
start_time is null;
end_time is null; or
start_time <= end_time
Which matches all the possible cases in Option 1.
On further testing, the model with constraints below demonstrates the same issue:
class SimpleModel( models.Model ):
value = models.IntegerField()
class Meta:
constraints = [
models.CheckConstraint(
check = ( models.Q( value__gte = 0 )
& ( models.Q( value__gte = 0 )
| models.Q( value__gte = 0 ) # this line
)
),
name = "simplemodel_check1"
),
models.CheckConstraint(
check = ( models.Q( value__gte = 0 )
& ( models.Q( value__gte = 0 )
& models.Q( value__gte = 0 )
)
),
name = "simplemodel_check2"
),
models.CheckConstraint(
check = ( models.Q( value__gte = 0 ) | models.Q( value__gte = 0 ) ),
name = "simplemodel_check3"
),
models.CheckConstraint(
check = ( models.Q( value__gte = 0 ) & models.Q( value__gte = 0 ) ),
name = "simplemodel_check4"
),
]
The 2nd, 3rd and 4th constraints work without issue but the 1st constraint causes an exception when trying to create an instance of the model with an error:
django.db.utils.DatabaseError: malformed database schema (packagename_event) - no such column: new_packagename_simplemodel.value
It appears to be an issue with the combination of & and |.

Cannot maintain focus on element in Selenium Python driver

I am trying to control the web by python to run a script and download the corresponding csv file.
Here is how the web page looks like with a dashboard menu to click the "Search" other button. Once clicked on Search button it shows a Search text box where one can enter a code and press enter to run.
Now I need to find the element of this Search box. From Inspect in Chrome, looks like below:
So I used the following code. I also used Actions to keep the focus on search box before I copy the code from a text file and send it to that search box.
def run_code():
""" Function to copy the code in Search and run it
"""
search_button=driver.find_element_by_link_text("Search")
search_button.click()
time.sleep(2)
with open('data_download_code.txt', 'r') as f:
code_file= f.read()
content_box=driver.find_element_by_class_name("ace_content")
# Getting the focus on the element
actions=ActionChains(driver)
actions.move_to_element(content_box)
actions.click()
content_box.send_keys(code_file,Keys.ENTER)
#content_box.submit()
However it throws an error of focus not on element.
I am not sure if I got the right element selector for Search from the attached html file, or it is just some focus issue. I did use Actions class there to get the focus.
I want the code to read the text in the txt file and send it to the search box and press enter to run it.
WebDriverException: Message: unknown error: cannot focus element
(Session info: chrome=71.0.3578.98)
EDIT: Extra html details for selector
Edit 2:
Edit 3:
So I am able to get the element for Search and it is able to copy the code from a txt file and enter in search box but I see it is not able to copy the whole code correctly hence gives an error. Pls see attached full code and how much got copied.
sourcetype=perf_log_bizx
(host=pc*bcf* OR host=pc*bsfapi* OR servername=pc*bcf* OR servername=pc*bsfapi*) OR
(host=sc*bcf* OR host=sc*bsfapi* OR servername=sc*bcf* OR servername=sc*bsfapi*) OR
(host=ac*bcf* OR host=ac*bsfapi* OR servername=ac*bcf* OR servername=ac*bsfapi*) OR
NOT "/perfLogServlet" NOT "REQ-\[*" earliest="12/18/2018:08:00:00" latest="12/18/2018:12:00:00"
| rex field=_raw "\[(?<client_ip>[\d\.]+)\]\s+\[(?<company_id>[^,]+),(?<company_name>[^,]+),(?<company_schema>[\w\.]+),(?<dbpool>[^,]+),(?<user_id>[^,]+),\S+\]\s+\S+\s+\S+\s+(?<render_time>\d+)\s(?<server_time>\d+)\s(?<end2end_time>\d+)\s+\S+\s\S+\s\[.*\]\s+\d+\-(?<call_id>\d+)\s+(?<module_id>(-|\w+))\s(?<page_name>(-|\w+))\s(?<page_qualifier>(-|\w+))"
| rex field=_raw "\[\[(?<MemoryAllocated>\d+)\w+\s+(?<CPUTimeTotal>\d+)\w+\s+(?<CPUTimeUser>\d+)\w+\s+(?<CPUTimeSystem>\d+)\w+\s+(?<FileRead>\d+)\w+\s+(?<FileWrite>\d+)\w+\s+(?<NetworkRead>\d+)\w+\s+(?<NetworkWrite>\d+)\w+\s+(?<NotClosedFiles>(\d+|-))\s+(?<NotClosedSockets>(\d+|-))\s+\]\]\s+(?<SQLInvokes>\d+)\s+(?<SQLTimeTotal>\d+)"
| eval company_id = ifnull(CMID, company_id)
| eval dbpool = ifnull(DPN, dbpool)
| eval company_schema =ifnull(SN, company_schema)
| eval user_id = ifnull(UID, user_id)
| eval module_id = ifnull(MID, module_id)
| eval page_name = ifnull(PID, page_name)
| eval page_qualifier = ifnull(PQ, page_qualifier)
| rex field=CAID "\d+\-(?<CAID>\d+)"
| eval call_id = ifnull(CAID, call_id)
| eval render_time = ifnull(RDT, render_time)
| eval server_time = ifnull(SVT, server_time)
| eval end2end_time = ifnull(EET, end2end_time)
| eval MemoryAllocated = ifnull(MEM, MemoryAllocated)
| eval CPUTimeTotal = ifnull(CPU, CPUTimeTotal)
| eval CPUTimeUser = ifnull(UCPU, CPUTimeUser)
| eval CPUTimeSystem = ifnull(SCPU, CPUTimeSystem)
| eval FileRead = ifnull(FRE, FileRead)
| eval FileWrite = ifnull(FWR, FileWrite)
| eval NetworkRead = ifnull(NRE, NetworkRead)
| eval NetworkWrite = ifnull(NWR, NetworkWrite)
| eval NotClosedFiles = ifnull(0, NotClosedFiles)
| eval NotClosedSockets = ifnull(0, NotClosedSockets)
| eval SQLInvokes = ifnull(SQLC, SQLInvokes)
| eval SQLTimeTotal = ifnull(SQLT, SQLTimeTotal)
| eval request_type = if(call_id=0,"Root", "Subaction")
| search call_id = 0 AND page_name!="NONE"
| eval full_page_name = module_id + "-" + page_name + "-" + page_qualifier + " [" + request_type + "]"
| eval has_open_sockets = if ( ifnull(NotClosedSockets,0) > 0, 1, 0)
| eval has_open_files = if ( ifnull(NotClosedFiles,0) > 0, 1, 0)
| eval time = strftime( _time, "%Y-%m-%d %H:%M:%S" )
| eval server = ifnull(servername, host)
| rex field=server "\w(?<dc>\d+)\w"
| eval dc_name = "DC" + tostring(dc)
| eval server_type = if (substr(server, 1, 2) = "pc", "PROD", if (substr(server, 1, 2) = "sc", "PREVIEW", if (substr(server, 1, 2) = "ac", "QA", "OTHER") ) )
| eval dc_company_user = dc + "|" + company_id + "|" + sha256( user_id )
| table
time,
dc_name,
server_type,
dbpool,
company_id,
full_page_name,
dc_company_user,
server_time,
end2end_time,
SQLInvokes,
SQLTimeTotal,
MemoryAllocated[![enter image description here][6]][6]
Edit4:
The code read from the txt file is also reading \n. So the string has \n in it and I guess that might be causing issues when sent to the WebDriver to run in the search box. Possible to read the code as it is in above edit?
'sourcetype=perf_log_bizx\n(host=pc*bcf* OR host=pc*bsfapi* OR servername=pc*bcf* OR servername=pc*bsfapi*) OR\n(host=sc*bcf* OR host=sc*bsfapi* OR servername=sc*bcf* OR servername=sc*bsfapi*) OR\n(host=ac*bcf* OR host=ac*bsfapi* OR servername=ac*bcf* OR servername=ac*bsfapi*) OR\nNOT "/perfLogServlet" NOT "REQ-\\[*" earliest="12/18/2018:08:00:00" latest="12/18/2018:12:00:00" \n \n | rex field=_raw "\\[(?<client_ip>[\\d\\.]+)\\]\\s+\\[(?<company_id>[^,]+),(?<company_name>[^,]+),(?<company_schema>[\\w\\.]+),(?<dbpool>[^,]+),(?<user_id>[^,]+),\\S+\\]\\s+\\S+\\s+\\S+\\s+(?<render_time>\\d+)\\s(?<server_time>\\d+)\\s(?<end2end_time>\\d+)\\s+\\S+\\s\\S+\\s\\[.*\\]\\s+\\d+\\-(?<call_id>\\d+)\\s+(?<module_id>(-|\\w+))\\s(?<page_name>(-|\\w+))\\s(?<page_qualifier>(-|\\w+))"\n | rex field=_raw "\\[\\[(?<MemoryAllocated>\\d+)\\w+\\s+(?<CPUTimeTotal>\\d+)\\w+\\s+(?<CPUTimeUser>\\d+)\\w+\\s+(?<CPUTimeSystem>\\d+)\\w+\\s+(?<FileRead>\\d+)\\w+\\s+(?<FileWrite>\\d+)\\w+\\s+(?<NetworkRead>\\d+)\\w+\\s+(?<NetworkWrite>\\d+)\\w+\\s+(?<NotClosedFiles>(\\d+|-))\\s+(?<NotClosedSockets>(\\d+|-))\\s+\\]\\]\\s+(?<SQLInvokes>\\d+)\\s+(?<SQLTimeTotal>\\d+)"\n \n | eval company_id = ifnull(CMID, company_id)\n | eval dbpool = ifnull(DPN, dbpool)\n | eval company_schema =ifnull(SN, company_schema)\n | eval user_id = ifnull(UID, user_id)\n \n | eval module_id = ifnull(MID, module_id)\n | eval page_name = ifnull(PID, page_name)\n | eval page_qualifier = ifnull(PQ, page_qualifier)\n \n | rex field=CAID "\\d+\\-(?<CAID>\\d+)"\n | eval call_id = ifnull(CAID, call_id)\n \n | eval render_time = ifnull(RDT, render_time)\n | eval server_time = ifnull(SVT, server_time)\n | eval end2end_time = ifnull(EET, end2end_time)\n | eval MemoryAllocated = ifnull(MEM, MemoryAllocated)\n | eval CPUTimeTotal = ifnull(CPU, CPUTimeTotal)\n | eval CPUTimeUser = ifnull(UCPU, CPUTimeUser)\n | eval CPUTimeSystem = ifnull(SCPU, CPUTimeSystem)\n | eval FileRead = ifnull(FRE, FileRead)\n | eval FileWrite = ifnull(FWR, FileWrite)\n | eval NetworkRead = ifnull(NRE, NetworkRead)\n | eval NetworkWrite = ifnull(NWR, NetworkWrite)\n | eval NotClosedFiles = ifnull(0, NotClosedFiles)\n | eval NotClosedSockets = ifnull(0, NotClosedSockets)\n | eval SQLInvokes = ifnull(SQLC, SQLInvokes)\n | eval SQLTimeTotal = ifnull(SQLT, SQLTimeTotal)\n \n | eval request_type = if(call_id=0,"Root", "Subaction")\n \n| search call_id = 0 AND page_name!="NONE"\n \n | eval full_page_name = module_id + "-" + page_name + "-" + page_qualifier + " [" + request_type + "]"\n | eval has_open_sockets = if ( ifnull(NotClosedSockets,0) > 0, 1, 0)\n | eval has_open_files = if ( ifnull(NotClosedFiles,0) > 0, 1, 0)\n | eval time = strftime( _time, "%Y-%m-%d %H:%M:%S" )\n | eval server = ifnull(servername, host)\n | rex field=server "\\w(?<dc>\\d+)\\w"\n | eval dc_name = "DC" + tostring(dc)\n | eval server_type = if (substr(server, 1, 2) = "pc", "PROD", if (substr(server, 1, 2) = "sc", "PREVIEW", if (substr(server, 1, 2) = "ac", "QA", "OTHER") ) )\n | eval dc_company_user = dc + "|" + company_id + "|" + sha256( user_id )\n \n| table\n time,\n dc_name,\n server_type,\n dbpool,\n company_id,\n full_page_name,\n dc_company_user,\n server_time,\n end2end_time,\n SQLInvokes,\n SQLTimeTotal,\n MemoryAllocated'
You should send keys to input field, but not to parent div. Try below instead:
content_box = driver.find_element_by_css_selector("div.ace_content input")
content_box.send_keys(code_file, Keys.ENTER)
or
content_box = driver.find_element_by_class_name('ace_text-input')
content_box.send_keys(code_file, Keys.ENTER)
Also note that most likely you won't need to use Actions
content_box=driver.find_element_by_class_name("ace_content")
this code will result in content_box being a "div" element. you can't send keys to a div element. inspect that div to find a "textarea" or "input" element, and set that to your content_box.
On top of #Andersson's answer (which you should accept btw, he did solve your problem ;) let me help you with stripping the \n from the source text. This code:
with open('data_download_code.txt', 'r') as f:
code_file= f.read()
, the read() method, returns the raw value of the file, with the EOL (end-of-line) characters intact. This though:
code_file = f.read.splitlines()
, will return it (in code_file) as a list of strings, each list member a line in the file. Now the question is - what to replace the EOL chars with? I'm not familiar with the language that's in it, so it's up to you to decide.
Say it is a semicolon, ;, this is how to transform the list back into a string:
code_file = ';'.join(code_file)
This will concatenate all list members in a single string, using that character as delimiter. Naturally, you just replace the char with whatever is applicable:
code_file = ' '.join(code_file) # a whitespace character
code_file = '\t'.join(code_file) # a tab
code_file = '\\n'.join(code_file) # a literal newline
code_file = 'whatever?'.join(code_file) # you name it
So the final form is:
with open('data_download_code.txt', 'r') as f:
code_file= f.readlines()
code_file = ';'.join(code_file)

generated code is not indent

I am modifying the oil file using python script. I have written EBNF grammar to convert oil file to AST using Grako. And generate oil file back from AST using codegen but the Oil file is not indent (generate in one line).
Sample Oil file:
CPU dummy
{
OS StdOS
{
SCALABILITYCLASS = SC1;
STATUS = EXTENDED;
};
};
Generated Oil:
CPUdummy{OSStdOS{SCALABILITYCLASS=SC1;STATUS=EXTENDED;};};
EBNF grammer:
file = [{Comments_option}] OIL_version Includes [implementation_definition] application_definition {object_definition_list};
Includes
= "#include" include_name ;
include_name
= ?/[!-_A-Za-z0-9]+/? ;
OIL_version
= "OIL_VERSION" "=" version description ";" ;
version = '"' ver '"';
implementation_definition
= "IMPLEMENTATION" name "{" implementation_spec_list "}" description ";";
implementation_spec_list
= [implementation_spec] ;
implementation_spec
= object "{" implementation_def "}" description ";";
object = "OS"
"TASK"
"COUNTER"
"ALARM"
"RESOURCE"
"EVENT"
"ISR"
"MESSAGE"
"COM"
"NM"
"APPMODE"
"IPDU"
"APPLICATION";
implementation_list
= [implementation_def]
| [implementation_list implementation_def] ;
implementation_def
= impl_attr_def
| impl_ref_def;
impl_attr_def
= "UINT32" auto_specifier number_range attribute_name multiple_specifier default_number description ";"
| ( "INT32" | "UINT64" | "INT64" ) auto_specifier number_range attribute_name multiple_specifier default_number description ";"
| "FLOAT" auto_specifier float_range attribute_name multiple_specifier default_float description ";"
| "ENUM" auto_specifier enumeration attribute_name multiple_specifier default_name description ";"
| "STRING" auto_specifier attribute_name multiple_specifier default_string description ";"
| "BOOLEAN" auto_specifier bool_values attribute_name multiple_specifier default_bool description ";" ;
impl_parameter_list
= [( "{" {implementation_def} [implementation_def] "}" )] ;
auto_specifier
= ["WITH_AUTO"];
number_range
= [( "[" ( number ".." | ( number ) ) number "]" )];
number_list
= number
| number_list "," number ;
default_number
= [( "=" ( number | "NO_DEFAULT" | "AUTO" ) )];
description
= [( ":" '"' comments '"' )] ;
float_range
= [( "[" float ".." float "]" )] ;
default_float
= [( "=" ( float | "NO_DEFAULT" | "AUTO" ) )] ;
enumeration
= "[" enumerator_list "]";
enumerator_list
= enumerator
| enumerator_list "," enumerator ;
enumerator
= name [impl_parameter_list] description;
bool_values
= [( "[" "TRUE" impl_parameter_list description "," "FALSE" impl_parameter_list description "]" )] ;
default_name
= [( "=" ( name | "NO_DEFAULT" | "AUTO" ) )] ;
default_string
= [( "=" ( string | "NO_DEFAULT" | "AUTO" ) )] ;
default_bool
= [( "=" ( boolean | "NO_DEFAULT" | "AUTO" ) )] ;
impl_ref_def
= object_ref_type reference_name multiple_specifier description ";";
object_ref_type
= "OS_TYPE"
| "TASK_TYPE"
| "COUNTER_TYPE"
| "ALARM_TYPE"
| "RESOURCE_TYPE"
| "EVENT_TYPE"
| "ISR_TYPE"
| "MESSAGE_TYPE"
| "COM_TYPE"
| "NM_TYPE"
| "APPMODE_TYPE"
| "IPDU_TYPE";
reference_name
= name
| object;
multiple_specifier
= [( "[" "]" )] ;
application_definition
= "CPU" name "{" [Includes] { ( parameter_list Comments_option ) } "}" description ";" ;
object_definition_list
= [object_definition];
Comment_list
= object_definition | parameter comments ;
object_definition
= object_name "{" { parameter_list Comments_option } "}" description ";" ;
object_name
= object name;
parameter_list
= [parameter];
parameter
= attribute_name "=" attribute_value [ "{" { ( parameter [Comments_option] ) } "}" ] description ";" ;
attribute_name
= name
| object;
attribute_value
= boolean
| float
| number
| string
| "AUTO"
| '"' string '"';
Comments_option
= ( Single_line Multi_line );
Single_line = {"//" comments};
Multi_line = {"/*#*" Comment_list "*#*/"};
name = ?/[-_A-Za-z0-9]+/?;
string = ?/[-_A-Za-z0-9_*, ]+/?;
ver = ?/[0-9.0-9]+/?;
comments = ?/[-_A-Za-z0-9 *#]+/? ;
boolean = "FALSE"
| "TRUE";
number = dec_number
| hex_number;
dec_number
= sign int_digits;
sign = [( "+" | "-" )] ;
int_digits
= zero_digit
| pos_digit
| pos_digit dec_digits ;
dec_digits
= {dec_digit} [dec_digit] ;
float = ver;
exponent = [( ( "e" | "E" ) sign dec_digits )] ;
zero_digit
= "0";
pos_digit
= "1"
| "2"
| "3"
| "4"
| "5"
| "6"
| "7"
| "8"
| "9";
dec_digit
= zero_digit
| pos_digit;
hex_number
= "0x" {hex_digit};
hex_digit
= "A"
| "B"
| "C"
| "D"
| "E"
| "F"
| "a"
| "b"
| "c"
| "d"
| "e"
| "f"
| "0"
| "1"
| "2"
| "3"
| "4"
| "5"
| "6"
| "7"
| "8"
| "9";
For indentation grako to be taken care or codegen. How to indent the generated code. Thanks.
import json
from grako.util import asjson
print(json.dumps(asjson(myast), indent=4))

Categories