RecursionError: maximum recursion depth exceeded while using lark in python - python

I've written the decaf grammar specified in cs143 course.
Here is my code.
import sys
from lark import Lark, Transformer, v_args
decaf_grammar = r"""
start : PROGRAM
PROGRAM : DECL+
DECL : VARIABLEDECL | FUNCTIONDECL | CLASSDECL | INTERFACEDECL
VARIABLEDECL : VARIABLE ";"
VARIABLE : TYPE "ident"
TYPE : "int" | "double" | "bool" | "string" | "ident" | TYPE "[]"
FUNCTIONDECL : ( TYPE "ident" "(" FORMALS ")" STMTBLOCK ) | ( "void" "ident" "(" FORMALS ")" STMTBLOCK )
FORMALS : VARIABLE ("," VARIABLE)*
CLASSDECL : "class" "ident" ["extends" "ident"] ["implements" "ident" ("," "ident")*] "{" FIELD* "}"
FIELD : VARIABLEDECL | FUNCTIONDECL
INTERFACEDECL : "interface" "ident" "{" PROTOTYPE* "}"
PROTOTYPE : (TYPE "ident" "(" FORMALS ")" ";") | ("void" "ident" "(" FORMALS ")" ";")
STMTBLOCK : "{" VARIABLEDECL* STMT* "}"
STMT : ( EXPR? ";") | IFSTMT | WHILESTMT | FORSTMT | BREAKSTMT | RETURNSTMT | RETURNSTMT | PRINTSTMT | STMTBLOCK
IFSTMT : "if" "(" EXPR ")" STMT ["else" STMT]
WHILESTMT : "while" "(" EXPR ")" STMT
FORSTMT : "for" "(" EXPR? ";" EXPR ";" EXPR? ")" STMT
RETURNSTMT : "return" EXPR? ";"
BREAKSTMT : "break" ";"
PRINTSTMT : "print" "(" EXPR ("," EXPR)* ")" ";"
EXPR : (LVALUE "=" EXPR) | CONSTANT | LVALUE | "this" | CALL | "(" EXPR ")" | (EXPR "+" EXPR) | (EXPR "-" EXPR) | (EXPR "*" EXPR) | (EXPR "/" EXPR) | (EXPR "%" EXPR) | ("-" EXPR) | (EXPR "<" EXPR) | (EXPR "<=" EXPR) | (EXPR ">" EXPR) | (EXPR ">=" EXPR) | (EXPR "==" EXPR) | (EXPR "!=" EXPR) | (EXPR "&&" EXPR) | (EXPR "||" EXPR) | ("!" EXPR) | ("ReadInteger" "(" ")") | ("ReadLine" "(" ")") | ("new" "ident") | ("NewArray" "(" EXPR "," TYPE ")")
LVALUE : "ident" | (EXPR "." "ident") | (EXPR "[" EXPR "]")
CALL : ("ident" "(" ACTUALS ")") | (EXPR "." "ident" "(" ACTUALS ")")
ACTUALS : EXPR ("," EXPR)* | ""
CONSTANT : "intConstant" | "doubleConstant" | "boolConstant" | "stringConstant" | "null"
"""
class TreeToJson(Transformer):
#v_args(inline=True)
def string(self, s):
return s[1:-1].replace('\\"', '"')
json_parser = Lark(decaf_grammar, parser='lalr', lexer='standard', transformer=TreeToJson())
parse = json_parser.parse
def test():
test_json = '''
{
}
'''
j = parse(test_json)
print(j)
import json
assert j == json.loads(test_json)
if __name__ == '__main__':
test()
#with open(sys.argv[1]) as f:
#print(parse(f.read()))
It throws
RecursionError: maximum recursion depth exceeded.
I'm using lark for the first time

The problem you have is that you don't feel the difference between lark's rules and terminals. Terminals (they are only should be named in capitals) should match string, not structure of your grammar.
The main terminal's property you must support is that they, unlike rules, are not "recursive". Because of that lark struggle to build your grammar and goes to infinite recursion and stackoverflow.

try using sys.setrecursionlimit(xxxx) where xxxx is max recursion depth you want.
To know more visit docs.python.org/3 .

Related

python: Lark-parser

I use the Lark library to parse boolean expressions like
(A=(value1) OR B>(value2)) AND C<=(value3)
and I also use it to parse keyless expressions like
(A OR B) AND C
the parser works correctly until I try to parse a keyless expression containing not only letters and numbers but other characters as well
valu* OR text
in this case the parser crashes with the exception that it does not recognise the character
Although if you use a key expression, it works correctly
A=(valu* OR text) AND B=(value2)
Please tell me where I have a bug in my code and how it can be fixed.
Code:
from lark import Lark, Tree, Token
rules = """
?start: expr
?expr: link_or
?link_or: (link_or "or"i)? link_and
?link_and: (link_and "and"i)? ( NAME | cond_eq | cond_gt | cond_ge | cond_lt | cond_le )
?cond_eq: KEY "=" const | "(" expr ")"
?cond_gt: KEY ">" const | "(" expr ")"
?cond_ge: KEY ">=" const | "(" expr ")"
?cond_lt: KEY "<" const | "(" expr ")"
?cond_le: KEY "<=" const | "(" expr ")"
KEY: NAME
?const: INT -> int
| string_raw -> string
?string_raw: /\((?:[^)(]+|\((?:[^)(]+|\([^)(]*\))*\))*\)/
%import common.CNAME -> NAME
%import common.WS_INLINE
%import common.INT
%ignore WS_INLINE
"""
parser = Lark(rules)
for text in ("key1=(value1*) OR key2=(value2)", "key1 OR key2", "key1* OR key2"):
print("text:", text)
try:
tree = parser.parse(text)
print("parsed tree:", tree)
except BaseException as e:
print("Exception:", e)
print()
Output
example #1: "key1=(value1*) OR key2=(value2)": OK
text: key1=(value1*) OR key2=(value2)
parsed tree: Tree(Token('RULE', 'link_or'), [Tree(Token('RULE', 'cond_eq'), [Token('KEY', 'key1'), Tree('string', [Token('__ANON_2', '(value1*)')])]), Tree(Token('RULE', 'cond_eq'), [Token('KEY', 'key2'), Tree('string', [Token('__ANON_2', '(value2)')])])])
example #2: "key1 OR key2": OK
text: key1 OR key2
parsed tree: Tree(Token('RULE', 'link_or'), [Token('NAME', 'key1'), Token('NAME', 'key2')])
example #3: "key1* OR key2": FAILED
text: key1* OR key2
Exception: No terminal matches '*' in the current parser context, at line 1 col 5
key1* OR key2
^
Expected one of:
* __ANON_1
* OR
* LESSTHAN
* AND
* MORETHAN
* __ANON_0
* EQUAL
example #3 without try except block:
Traceback (most recent call last):
File "D:\codes\python\test.py", line 34, in <module>
tree = parser.parse(text)
File "C:\Program Files\Python\lib\site-packages\lark\lark.py", line 645, in parse
return self.parser.parse(text, start=start, on_error=on_error)
File "C:\Program Files\Python\lib\site-packages\lark\parser_frontends.py", line 96, in parse
return self.parser.parse(stream, chosen_start, **kw)
File "C:\Program Files\Python\lib\site-packages\lark\parsers\earley.py", line 266, in parse
to_scan = self._parse(lexer, columns, to_scan, start_symbol)
File "C:\Program Files\Python\lib\site-packages\lark\parsers\xearley.py", line 146, in _parse
to_scan = scan(i, to_scan)
File "C:\Program Files\Python\lib\site-packages\lark\parsers\xearley.py", line 119, in scan
raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect.name for item in to_scan},
lark.exceptions.UnexpectedCharacters: No terminal matches '*' in the current parser context, at line 1 col 5
key1* OR key2
^
Expected one of:
* __ANON_1
* __ANON_0
* LESSTHAN
* MORETHAN
* AND
* OR
* EQUAL

network device command parsing with pyparsing

I`m developing network device command parser using pyparsing.
I analysed and define the command format as below:
cli ::= string + (next)*
next ::= string|range|group|simple_recursive|selective_recursive|infinite_recursive|keywords
keywords ::= "WORD"
| "LINE"
| "A.B.C.D"
| "A.B.C.D/M"
| "X:X::X:X"
| "X:X::X:X/M"
| "HH:MM:SS"
| "AA:NN"
| "XX:XX:XX:XX:XX:XX"
| "MULTILINE"
inner_recur ::= next + (next)* + ("|")* | ("|" + next + (next)*)*
string ::= alphanums + "_" + "-"
range ::= "<" + nums + "-" nums + ">"
group ::= "(" + inner_recur + ")"
simple_recursive ::= "." + range
selective_recursive ::= "{" + inner_recur + "}"
infinite_recursive ::= "[" + inner_recur + "]"
and implemented written:
# string ::= alphanums + "_" + "-"
string_ = Word(alphanums + "_" + "-").setResultsName("string")
#print(string_.parseString("option82"))
# range ::= "<" + nums + "-" nums + ">"
range_ = Combine(Literal("<") + Word(nums) + Literal("-") + Word(nums) + Literal(">")).setResultsName("range")
#print(range_.parseString("<24-1004>"))
# simple_recursive ::= "." + range
simple_recursive_ = Combine(Literal(".") + range_).setResultsName("simple_recursive")
#print(simple_recursive_.parseString(".<1-60045>"))
# keywords ::= "WORD" | "LINE" | "A.B.C.D" | "A.B.C.D/M" | "X:X::X:X" | "X:X::X:X/M" | "HH:MM:SS" | "AA:NN" | "XX:XX:XX:XX:XX:XX" | "MULTILINE"
keywords_ = Keyword("X:X::X:X/M").setResultsName("X:X::X:/M") | Keyword("A.B.C.D/M").setResultsName("A.B.C.D/M") | Keyword("A.B.C.D").setResultsName("A.B.C.D") | Keyword("X:X::X:X").setResultsName("X:X::X:X") | Keyword("HH:MM:SS").setResultsName("HH:MM:SS") | Keyword("AA:NN").setResultsName("AA:NN") | Keyword("XX:XX:XX:XX:XX:XX").setResultsName("XX:XX:XX:XX:XX:XX") | Keyword("MULTILINE").setResultsName("MULTILINE") | Keyword("WORD").setResultsName("WORD") | Keyword("LINE").setResultsName("LINE")
#print(keywords_.parseString("A.B.C.D").asXML())
#next_ = Forward()
inner_recur = Forward()
# group ::= "(" + inner_recur + ")"
group_ = Combine(Literal("(") + inner_recur + Literal(")"))
# selective_recursive ::= "{" + inner_recur + "}"
selective_recursive_ = Combine(Literal("{") + inner_recur + Literal("}"))
# infinite_recursive ::= "[" + inner_recur + "]"
infinite_recursive_ = Combine(Literal("[") + inner_recur + Literal("]"))
# next ::= string|range|group|simple_recursive|selective_recursive|infinite_recursive|keywords
next_ = keywords_ | string_ | simple_recursive_ | range_ | group_ | selective_recursive_ | infinite_recursive_
# inner_recur ::= next + (next)* + ("|")* | ("|" + next + (next)*)*
inner_recur << next_ + ZeroOrMore(next_) + ZeroOrMore(Literal("|") | ZeroOrMore(Literal("|") + next_ + OneOrMore(next_)))
# cli ::= string + (next)*
cli_ = string_ + ZeroOrMore(next_)
To test my parser, I tried to input datas
>>> test = cli_.parseString("bgp as .<1-200>")
>>> print(test)
>>> ['bgp', 'as', ['.<1-200>']]
test = cli_.parseString("bgp as <1-200> <1-255> <1-255> WORD A.B.C.D A.B.C.D/M (A|(B|C))")
print(test)
>>>
test = cli_.parseString("test (A|<1-200>|(B|{a|b|c} aaa)")
test = cli_.parseString("test (A|<1-200>|(B|{a|b|c|})|)")
when parsed second data, infinite recursion raised. I don't understand this situation and have any solution...
I expect the result:
['bgp', 'as', ['<1-200>'], ['<1-255>'], ['<1-255>'], 'WORD',
'A.B.C.D', 'A.B.C.D/M', ['A', ['B', 'C']]]
what is my problem in format or code? and point be modified?
While you have made a good first step in defining your grammar in conceptual BNF terms before writing code, I'm struggling a bit with making sense of your grammar. The culprit to me seems to be this part:
inner_recur ::= next + (next)* + ("|")* | ("|" + next + (next)*)*
From your posted examples, this looks like you are trying to define some sort of infix notation, using '|' as an operator.
From your tests, it also looks like you need to support multiple inner_recur terms within any grouping ()'s, []'s, or {}'s.
Also, please read the docs (https://pyparsing-docs.readthedocs.io/en/latest/pyparsing.html) to get a clearer picture of the difference between setResultsName and setName. I'm pretty sure in your parser throughout, you are using setResultsName but really want setName. Similarly with using Combine when you really want Group.
Lastly, I rewrote your test code using runTests, and saw that you had mismatched ()'s on the third test.
Here is your parser with these changes:
# string ::= alphanums + "_" + "-"
string_ = Word(alphanums + "_" + "-").setResultsName("string")
#print(string_.parseString("option82"))
# range ::= "<" + nums + "-" nums + ">"
range_ = Group(Literal("<") + Word(nums) + Literal("-") + Word(nums) + Literal(">")).setResultsName("range")
#print(range_.parseString("<24-1004>"))
# simple_recursive ::= "." + range
simple_recursive_ = Group(Literal(".") + range_).setResultsName("simple_recursive")
#print(simple_recursive_.parseString(".<1-60045>"))
# keywords ::= "WORD" | "LINE" | "A.B.C.D" | "A.B.C.D/M" | "X:X::X:X" | "X:X::X:X/M" | "HH:MM:SS" | "AA:NN" | "XX:XX:XX:XX:XX:XX" | "MULTILINE"
keywords_ = Keyword("X:X::X:X/M").setResultsName("X:X::X:/M") | Keyword("A.B.C.D/M").setResultsName("A.B.C.D/M") | Keyword("A.B.C.D").setResultsName("A.B.C.D") | Keyword("X:X::X:X").setResultsName("X:X::X:X") | Keyword("HH:MM:SS").setResultsName("HH:MM:SS") | Keyword("AA:NN").setResultsName("AA:NN") | Keyword("XX:XX:XX:XX:XX:XX").setResultsName("XX:XX:XX:XX:XX:XX") | Keyword("MULTILINE").setResultsName("MULTILINE") | Keyword("WORD").setResultsName("WORD") | Keyword("LINE").setResultsName("LINE")
#print(keywords_.parseString("A.B.C.D").asXML())
#next_ = Forward()
inner_recur = Forward()
# group ::= "(" + inner_recur + ")"
group_ = Group(Literal("(") + OneOrMore(inner_recur) + Literal(")"))
# selective_recursive ::= "{" + inner_recur + "}"
selective_recursive_ = Group(Literal("{") + OneOrMore(inner_recur) + Literal("}"))
# infinite_recursive ::= "[" + inner_recur + "]"
infinite_recursive_ = Group(Literal("[") + OneOrMore(inner_recur) + Literal("]"))
# next ::= string|range|group|simple_recursive|selective_recursive|infinite_recursive|keywords
next_ = keywords_ | string_ | simple_recursive_ | range_ | group_ | selective_recursive_ | infinite_recursive_
#~ next_.setName("next_").setDebug()
# inner_recur ::= next + (next)* + ("|")* | ("|" + next + (next)*)*
#~ inner_recur <<= OneOrMore(next_) + ZeroOrMore(Literal("|")) | ZeroOrMore(Literal("|") + OneOrMore(next_))
inner_recur <<= Group(infixNotation(next_,
[
(None, 2, opAssoc.LEFT),
('|', 2, opAssoc.LEFT),
]) + Optional('|'))
# cli ::= string + (next)*
cli_ = string_ + ZeroOrMore(next_)
tests = """\
bgp as .<1-200>
bgp as <1-200> <1-255> <1-255> WORD A.B.C.D A.B.C.D/M (A|(B|C))
test (A|<1-200>|(B|{a|b|c} aaa))
test (A|<1-200>|(B|{a|b|c|})|)
"""
cli_.runTests(tests)
Which gives:
bgp as .<1-200>
['bgp', 'as', ['.', ['<', '1', '-', '200', '>']]]
- simple_recursive: ['.', ['<', '1', '-', '200', '>']]
- range: ['<', '1', '-', '200', '>']
- string: 'as'
bgp as <1-200> <1-255> <1-255> WORD A.B.C.D A.B.C.D/M (A|(B|C))
['bgp', 'as', ['<', '1', '-', '200', '>'], ['<', '1', '-', '255', '>'], ['<', '1', '-', '255', '>'], 'WORD', 'A.B.C.D', 'A.B.C.D/M', ['(', [['A', '|', ['(', [['B', '|', 'C']], ')']]], ')']]
- A.B.C.D: 'A.B.C.D'
- A.B.C.D/M: 'A.B.C.D/M'
- WORD: 'WORD'
- range: ['<', '1', '-', '255', '>']
- string: 'as'
test (A|<1-200>|(B|{a|b|c} aaa))
['test', ['(', [['A', '|', ['<', '1', '-', '200', '>'], '|', ['(', [['B', '|', [['{', [['a', '|', 'b', '|', 'c']], '}'], 'aaa']]], ')']]], ')']]
- string: 'test'
test (A|<1-200>|(B|{a|b|c|})|)
['test', ['(', [['A', '|', ['<', '1', '-', '200', '>'], '|', ['(', [['B', '|', ['{', [['a', '|', 'b', '|', 'c'], '|'], '}']]], ')']], '|'], ')']]
- string: 'test'
This may be off the mark in some places, but I hope it gives you some ideas to move forward with your project.

How to find and replace case sensitive whole words in python

Consider the below mcve:
import re
import textwrap
import traceback
import unittest
def replace_words(content, replacements):
rc = re.compile(r"[A-Za-z_]\w*")
def translate(match):
word = match.group(0)
return replacements.get(word, word)
return rc.sub(translate, content, re.IGNORECASE | re.MULTILINE)
class class_name(unittest.TestCase):
def setUp(self):
self.replacements = [
{
'PLUS': '"+"',
'DASH': '"-"',
'BANG': '"!"',
'TILDE': '"~"',
'STAR': '"*"',
'SLASH': '"/"',
'PERCENT': '"%"',
'LEFT_PAREN': '"("',
'RIGHT_PAREN': '")"'
}, {
"IF": "fi",
"FOO": "oof",
"BAR": "rab",
"OP_FOO": "oof_op"
}
]
self.texts = [
textwrap.dedent("""\
variable_identifier :
IDENTIFIER
primary_expression :
foo1
foo2
foo3
LEFT_PAREN expression RIGHT_PAREN
unary_operator :
PLUS
DASH
BANG
TILDE
multiplicative_expression :
unary_expression
multiplicative_expression STAR unary_expression
multiplicative_expression SLASH unary_expression
multiplicative_expression PERCENT unary_expression\
"""),
textwrap.dedent("""\
IF identifier IDENTIFIER FOO BAR BARycentric
OP_FOO
""")
]
self.expected_results = [
textwrap.dedent("""\
variable_identifier :
IDENTIFIER
primary_expression :
foo1
foo2
foo3
"(" expression ")"
unary_operator :
"+"
"-"
"!"
"~"
multiplicative_expression :
unary_expression
multiplicative_expression "*" unary_expression
multiplicative_expression "/" unary_expression
multiplicative_expression "%" unary_expression\
"""),
textwrap.dedent("""\
fi identifier IDENTIFIER oof rab BARycentric
oof_op
""")
]
def _tester(self, f):
replacements = self.replacements
expected_results = self.expected_results
texts = self.texts
self.assertEqual(f(texts[0], replacements[0]), expected_results[0])
self.assertEqual(f(texts[1], replacements[1]), expected_results[1])
def test_replace_words(self):
self._tester(replace_words)
if __name__ == "__main__":
unittest.main()
replace_words function is attempting to search and replace case sensitive whole words in a given text using a dictionary of replacements above code but it will fail in the line self.assertEqual(f(texts[0], replacements[0]), expected_results[0]) though and I don't know why.
So the question would be, how do you find and replace case sensitive whole words using a replacements dictionary in python?
You can use re.sub and re.findall:
import re
def regex_string(d, to_lower = False):
if not to_lower:
return '|'.join(r'\b{}\b'.format(i) for i in d.keys())
return '|'.join([c for b in [[r'\b{}\b'.format(i.lower()), r'\b{}\b'.format(i)] for i in d.keys()] for c in b])
replacements = {
'PLUS': '"+"',
'DASH': '"-"',
'BANG': '"!"',
'TILDE': '"~"',
'STAR': '"*"',
'SLASH': '"/"',
'PERCENT': '"%"',
'LEFT_PAREN': '"("',
'RIGHT_PAREN': '")"'
}
replaced = re.sub(regex_string(replacements, True), '{}', content)
final_result = replaced.format(*[replacements.get(i, i) for i in re.findall(regex_string(replacements, True), content)])
Output (case 1):
variable_identifier :
IDENTIFIER
primary_expression :
foo1
foo2
foo3
"(" expression ")"
unary_operator :
"+"
"-"
"!"
"~"
multiplicative_expression :
unary_expression
multiplicative_expression "*" unary_expression
multiplicative_expression "/" unary_expression
multiplicative_expression "%" unary_expression
Output (case 2):
fi identifier IDENTIFIER oof rab BARycentric
oof_op
Or, even shorter:
replaced = re.sub(regex_string(replacements, True), lambda x:replacements.get(x.group(), x.group()), content)

EOL whilst scanning string literal - Python

I'm new to Python. I'm trying to make code it so it will print out this ASCII art traffic light, here is the actual ASCII
##
_[]_
[____]
.----' '----.
.===| .==. |===.
\ | /####\ | /
/ | \####/ | \
'===| `""` |==='
.===| .==. |===.
\ | /::::\ | /
/ | \::::/ | \
'===| `""` |==='
.===| .==. |===.
\ | /&&&&\ | /
/ | \&&&&/ | \
'===| `""` |==='
jgs '--.______.--'
And the Code I'm trying to use is this
print ("##"),
print (" _[]_"),
print (".----' '----."),
print (" .===| .==. |===."),
print (" \ | /####\ | /"),
print (" / | \####/ | \\"),
print ("'===| `""` |==='"),
print (" .===| .==. |===."),
print ("\ | /::::\ | /"),
print (" / | \::::/ | \"),
print ("'===| `""` |==='"),
print (".===| .==. |===."),
print (" \ | /&&&&\ | /"),
print (" / | \&&&&/ | \"),
print (" '===| `""` |==='"),
print ("'--.______.--'")
You need to escape the \ characters, double them:
print (" / | \::::/ | \"),
should be:
print(" / | \\::::/ | \\")
You want to get rid of all the commas too.
Note that you can create a multiline string using triple quotes; make it a raw string (using r'') and you don't have to escape anything either:
print(r''' _[]_
[____]
.----' '----.
.===| .==. |===.
\ | /####\ | /
/ | \####/ | \
'===| `""` |==='
.===| .==. |===.
\ | /::::\ | /
/ | \::::/ | \
'===| `""` |==='
.===| .==. |===.
\ | /&&&&\ | /
/ | \&&&&/ | \
'===| `""` |==='
jgs '--.______.--'
''')

Pyparsing error when evaluating WFF logic expressions?

I'm new to Python and pyparsing, and I'm making a logic expression evaluator.
The formula must be a WFF. The BNF of WFF is:
<alpha set> ::= p | q | r | s | t | u | ...
(the arbitrary finite set of propositional variables)
<form> ::= <alpha set> | ¬<form> | (<form>V<form>) | (<form>^<form>)
| (<form> -> <form>) | (<form> <-> <form>)
My code is:
'''
Created on 17/02/2012
#author: Juanjo
'''
from pyparsing import *
from string import lowercase
def fbf():
atom = Word(lowercase, max=1) #aphabet
op = oneOf('^ V => <=>') #Operators
identOp = oneOf('( [ {')
identCl = oneOf(') ] }')
form = Forward() #Iniciar de manera recursiva
#Grammar:
form << ( (Group(Literal('~') + form)) | ( Group(identOp + form + op + form + identCl) ) | ( Group(identOp + form + identCl) ) | (atom) )
return form
entrada = raw_input("Input please: ") #userinput
print fbf().parseString(entrada)
The problem is when I use these expressions: a^b and aVb.
The parser should return an error, but there's no error; instead it returns a. Actually, any symbol after a will be ignored.
The WFF version of those forms are: (a^b) and (aVb)
Both work correctly. I think the problem is in the atom definition.
What am I doing wrong?
By default parseString will just parse the beginning of the string.
You can force it to parse the entire string by changing the code to:
print fbf().parseString(entrada, parseAll=True)
Alternatively, you can end the grammar with the StringEnd() token - see the documentation under parseString in http://packages.python.org/pyparsing/ for more details.

Categories