Related
Django: 3.0.5
Python: 3.7.3
I am rather new to Django's framework and I have been trying to create a custom user model so that the default username is an email. However, I cannot seem to create a superuser. I keep getting this error after calling py manage.py createsuperuser:
Traceback (most recent call last):
File "manage.py", line 21, in <module>
main()
File "manage.py", line 17, in main
execute_from_command_line(sys.argv)
File "C:\Users\User\Dev\tryDjango\lib\site-packages\django\core\management\__init__.py", line 401, in execute_from_command_line
utility.execute()
File "C:\Users\User\Dev\tryDjango\lib\site-packages\django\core\management\__init__.py", line 395, in execute
self.fetch_command(subcommand).run_from_argv(self.argv)
File "C:\Users\User\Dev\tryDjango\lib\site-packages\django\core\management\base.py", line 320, in run_from_argv
parser = self.create_parser(argv[0], argv[1])
File "C:\Users\User\Dev\tryDjango\lib\site-packages\django\core\management\base.py", line 294, in create_parser
self.add_arguments(parser)
File "C:\Users\User\Dev\tryDjango\lib\site-packages\django\contrib\auth\management\commands\createsuperuser.py", line 55, in add_arguments
field = self.UserModel._meta.get_field(field_name)
File "C:\Users\User\Dev\tryDjango\lib\site-packages\django\db\models\options.py", line 583, in get_field
raise FieldDoesNotExist("%s has no field named '%s'" % (self.object_name, field_name))
django.core.exceptions.FieldDoesNotExist: User has no field named ''
Below is the codes for my custom user model, I have also placed AUTH_USER_MODEL = 'accounts.User' in my settings.py.
class UserManager(BaseUserManager):
def create_user(self, email, password=None):
if not email:
raise ValueError("Users must have an Email address")
if not password:
raise ValueError("Users must have a Password")
user = self.model(
email=self.normalize_email(email)
)
user.set_password(password)
user.save(using=self._db)
return user
def create_staffuser(self, email, password=None):
user = self.create_user(
email,
password=password
)
user.staff = True
user.save(using=self._db)
return user
def create_superuser(self, email, password=None):
user = self.create_user(
email,
password=password
)
user.staff = True
user.admin = True
user.save(using=self._db)
return user
class User(AbstractBaseUser):
email = models.EmailField(max_length=255, unique=True)
# full_name = models.CharField(max_length=255, blank=False)
active = models.BooleanField(default=True)
staff = models.BooleanField(default=False)
admin = models.BooleanField(default=False)
timestamp = models.DateTimeField(auto_now_add=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['']
objects = UserManager()
def get_full_name(self):
return self.email
def get_short_name(self):
# The user is identified by their email address
return self.email
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
#property
def is_staff(self):
return self.staff
#property
def is_admin(self):
return self.admin
#property
def is_active(self):
return self.active
Appreciate any help! Oh and can anyone explain to me why do I need to write these codes under the User class? I don't see why I'd need it and I also don't see why is there a need for a Getter #property. Sorry if these are stupid questions.
def get_full_name(self):
return self.email
def get_short_name(self):
# The user is identified by their email address
return self.email
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
return True
def has_module_perms(self, app_label):
return True
#property
def is_staff(self):
return self.staff
#property
def is_admin(self):
return self.admin
#property
def is_active(self):
return self.active
EDIT: Serializer.py
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import re
from codecs import register_error, xmlcharrefreplace_errors
from .constants import voidElements, booleanAttributes, spaceCharacters
from .constants import rcdataElements, entities, xmlEntities
from . import treewalkers, _utils
from xml.sax.saxutils import escape
_quoteAttributeSpecChars = "".join(spaceCharacters) + "\"'=<>`"
_quoteAttributeSpec = re.compile("[" + _quoteAttributeSpecChars + "]")
_quoteAttributeLegacy = re.compile("[" + _quoteAttributeSpecChars +
"\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n"
"\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15"
"\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
"\x20\x2f\x60\xa0\u1680\u180e\u180f\u2000"
"\u2001\u2002\u2003\u2004\u2005\u2006\u2007"
"\u2008\u2009\u200a\u2028\u2029\u202f\u205f"
"\u3000]")
_encode_entity_map = {}
_is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((_is_ucs4 and len(v) > 1) or
(not _is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = _utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if v not in _encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
_encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if _utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = _utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = _encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error("htmlentityreplace", htmlentityreplace_errors)
def serialize(input, tree="etree", encoding=None, **serializer_opts):
"""Serializes the input token stream using the specified treewalker
:arg input: the token stream to serialize
:arg tree: the treewalker to use
:arg encoding: the encoding to use
:arg serializer_opts: any options to pass to the
:py:class:`html5lib.serializer.HTMLSerializer` that gets created
:returns: the tree serialized as a string
Example:
>>> from html5lib.html5parser import parse
>>> from html5lib.serializer import serialize
>>> token_stream = parse('<html><body><p>Hi!</p></body></html>')
>>> serialize(token_stream, omit_optional_tags=False)
'<html><head></head><body><p>Hi!</p></body></html>'
"""
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
s = HTMLSerializer(**serializer_opts)
return s.render(walker(input), encoding)
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = "legacy" # be secure by default
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer
:arg inject_meta_charset: Whether or not to inject the meta charset.
Defaults to ``True``.
:arg quote_attr_values: Whether to quote attribute values that don't
require quoting per legacy browser behavior (``"legacy"``), when
required by the standard (``"spec"``), or always (``"always"``).
Defaults to ``"legacy"``.
:arg quote_char: Use given quote character for attribute quoting.
Defaults to ``"`` which will use double quotes unless attribute
value contains a double quote, in which case single quotes are
used.
:arg escape_lt_in_attrs: Whether or not to escape ``<`` in attribute
values.
Defaults to ``False``.
:arg escape_rcdata: Whether to escape characters that need to be
escaped within normal elements within rcdata elements such as
style.
Defaults to ``False``.
:arg resolve_entities: Whether to resolve named character entities that
appear in the source tree. The XML predefined entities < >
& " ' are unaffected by this setting.
Defaults to ``True``.
:arg strip_whitespace: Whether to remove semantically meaningless
whitespace. (This compresses all whitespace to a single space
except within ``pre``.)
Defaults to ``False``.
:arg minimize_boolean_attributes: Shortens boolean attributes to give
just the attribute value, for example::
<input disabled="disabled">
becomes::
<input disabled>
Defaults to ``True``.
:arg use_trailing_solidus: Includes a close-tag slash at the end of the
start tag of void elements (empty elements whose end tag is
forbidden). E.g. ``<hr/>``.
Defaults to ``False``.
:arg space_before_trailing_solidus: Places a space immediately before
the closing slash in a tag using a trailing solidus. E.g.
``<hr />``. Requires ``use_trailing_solidus=True``.
Defaults to ``True``.
:arg sanitize: Strip all unsafe or unknown constructs from output.
See :py:class:`html5lib.filters.sanitizer.Filter`.
Defaults to ``False``.
:arg omit_optional_tags: Omit start/end tags that are optional.
Defaults to ``True``.
:arg alphabetical_attributes: Reorder attributes to be in alphabetical order.
Defaults to ``False``.
"""
unexpected_args = frozenset(kwargs) - frozenset(self.options)
if len(unexpected_args) > 0:
raise TypeError("__init__() got an unexpected keyword argument '%s'" % next(iter(unexpected_args)))
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "htmlentityreplace")
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
# pylint:disable=too-many-nested-blocks
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from .filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# Alphabetical attributes is here under the assumption that none of
# the later filters add or change order of attributes; it needs to be
# before the sanitizer so escaped elements come out correctly
if self.alphabetical_attributes:
from .filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from .filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from .filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from .filters.optionaltags import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError("System identifer contains both single and double quote characters")
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError("Unexpected </ in CDATA")
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
for (_, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple()) and
k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values == "always" or len(v) == 0:
quote_attr = True
elif self.quote_attr_values == "spec":
quote_attr = _quoteAttributeSpec.search(v) is not None
elif self.quote_attr_values == "legacy":
quote_attr = _quoteAttributeLegacy.search(v) is not None
else:
raise ValueError("quote_attr_values must be one of: "
"'always', 'spec', or 'legacy'")
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError("Unexpected child element of a CDATA element")
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError("Comment contains --")
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if key not in entities:
self.serializeError("Entity %s not recognized" % name)
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
"""Serializes the stream from the treewalker into a string
:arg treewalker: the treewalker to serialize
:arg encoding: the string encoding to use
:returns: the serialized tree
Example:
>>> from html5lib import parse, getTreeWalker
>>> from html5lib.serializer import HTMLSerializer
>>> token_stream = parse('<html><body>Hi!</body></html>')
>>> walker = getTreeWalker('etree')
>>> serializer = HTMLSerializer(omit_optional_tags=False)
>>> serializer.render(walker(token_stream))
'<html><head></head><body>Hi!</body></html>'
"""
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
class SerializeError(Exception):
"""Error in serialized tree"""
pass
You don't need to write this code under your model ! Can you show your serializers.py and your views.py please
My code returns an exception on some test cases and TRUE on other test cases.
CASE 1:
sample input:
<br>source = 'Delhi'<br>
destination = 'Agra'<br>
Type = 'one-way'
Expected output:
if exist -> return 'already exist' else: insert into dB.
My output:
working correctly
CASE 2:
Sample Input:
source = 'Delhi'<br>
destination = 'A gra'<br>
Type = 'one-way'<br>
Expected output:
if exist -> return 'already exist' else: insert into dB.
My output:
Error: local variable 'source' referenced before assignment
If I assign the variable as None it will show success which is incorrect.
res = db.test("select source,destination,type from cities where source = '"+str(self.source)+"' and destination='"+str(self.destination)+"' and type = '"+str(self.type)+"'")
for row in res:
source = row['source']
destination = row['destination']
types = row['type']
src = self.source
dst = self.destination
typ = self.type
if str(src).replace(' ','').lower() == str(source).replace(' ','').lower() and str(dst).replace(' ','').lower() == str(destination).replace(' ','').lower() and str(typ).replace(' ','').lower() == str(types).replace(' ','').lower():
return "already exist"
Initialize source,destination and type variable outside the loop to empty string.
You are trying to use a variable "source" which is initialized and then declared in for loop. You must initialize it outside the loop.
If you initialize only "source" outside, you would get the second exception as "local variable 'destination' referenced before assignment."
source = ""
destination = ""
types = ""
res = db.test("select source,destination,type from cities where source = '"+str(self.source)+"' and destination='"+str(self.destination)+"' and type = '"+str(self.type)+"'")
for row in res:
source = row['source']
destination = row['destination']
types = row['type']
src = self.source
dst = self.destination
typ = self.type
if str(src).replace(' ','').lower() == str(source).replace(' ','').lower() and str(dst).replace(' ','').lower() == str(destination).replace(' ','').lower() and str(typ).replace(' ','').lower() == str(types).replace(' ','').lower():
return "already exist"
As others suggested please update the post with the stacktrace, or you try to read the error and you will understand by your self. I was not able to spot the error with the code but 2 observations,
you are using user input as is while generating a query (Read about SQL Injection) use Query Parameters instead
Why are you converting each variable to "str"? by default it should be "str"
I wanted to add this as comment but dont have reps to add comment.
Here's what I am trying to do:
I've built a mini-system that allows for user registering & so on, but the system is very dependent on db_parse() and user_exists(), because that are the main two conditionals for the whole script to run.
Basically I am testing if an user exists with user_exists('username') which should return a "True" (It's a dict which has a value of either True/False).
So, here's the whole code to it (Please excuse the indendation:
class __system():
def __init__(self):
self.usernames = []
self.passwords = []
self.dbname = 'database.txt'
self.privilege = [1,2,3]
self.backupdb = 'backup.txt'
def db_parse(self):
d = {'username':[],
'uid':[],
'password':[],
'pwdid':[]
}
with open(self.dbname,'r') as f:
lines = ([line.rstrip() for line in f])
f.flush()
for x in xrange(0,len(lines)):
if x%2==0:
d['username'].append(lines[x])
d['uid'].append(x) #-> number of line in the file
if x%2==1:
d['password'].append(lines[x])
d['pwdid'].append(x)
print lines
f.close()
return d
def user_exists(self, username=''):
d = {'exists': None,
'uid': None
}
db = self.db_parse()
ylen = len(db['username'])
for y in range(0,ylen):
if username == db['username'][y]:
d['exists'] = True
d['uid'] = db['uid'][y]
else:
d['exists'] = False
d['uid'] = None
return d
def main():
obj = __system()
print obj.user_exists('user1')
if __name__ == "__main__":
main()
The 'database.txt' is looking like this:
user1<br>
203ad5ffa1d7c650ad681fdff3965cd2<br>
user2<br>
6e809cbda0732ac4845916a59016f954<br>
How can I say this...this sometimes work, this doesn't and I've done debugging for 10 hours straight (Yea, that's right.)
I can't seem to catch why it returns "False" and "uid:0" when the user clearly exists and then, 5 minutes later, only re-pasting the code, it does work.
You're going to kick yourself for this, but the issue is here:
for y in range(0,ylen):
if username == db['username'][y]:
d['exists'] = True
d['uid'] = db['uid'][y]
else:
d['exists'] = False
d['uid'] = None
return d
If username matches the first user in the file, your for loop continues on to the second user in the file, which, of course, won't match. So it ends up returning False/None. You just need to add a break if a match is found:
for y in range(0,ylen):
if username == db['username'][y]:
d['exists'] = True
d['uid'] = db['uid'][y]
break # Add this
else:
d['exists'] = False
d['uid'] = None
return d
As an aside, you don't need to call f.close() if you're opening the file using with open(...) as f. The file will automatically close when you leave the with block. You should also use for x, line in enumerate(lines): instead of for x in xrange(0, len(lines)):
Appologies for the really long drawn out question.
I am trying to read in a config file and get a list of rules out.
I have tried to use ConfigParser to do this but it is not a standard config file.
The file contains no section header and no token.
i.e.
config section a
set something to something else
config subsection a
set this to that
next
end
config firewall policy
edit 76
set srcintf "There"
set dstintf "Here"
set srcaddr "all"
set dstaddr "all"
set action accept
set schedule "always"
set service "TCP_5600"
next
edit 77
set srcintf "here"
set dstintf "there"
set srcaddr "all"
set dstaddr "all"
set action accept
set schedule "always"
set service "PING"
next
end
As I couldn't work out how to get ConfigParser to work I thought I would try to iterate through the file, unfortunately I don't have much programming skill so I have got stuck.
I really think I am making this more complicated than it should be.
Here's the code I have written;
class Parser(object):
def __init__(self):
self.config_section = ""
self.config_header = ""
self.section_list = []
self.header_list = []
def parse_config(self, fields): # Create a new section
new_list = []
self.config_section = " ".join(fields)
new_list.append(self.config_section)
if self.section_list: # Create a sub section
self.section_list[-1].append(new_list)
else: self.section_list.append(new_list)
def parse_edit(self, line): # Create a new header
self.config_header = line[0]
self.header_list.append(self.config_header)
self.section_list[-1].append(self.header_list)
def parse_set(self, line): # Key and values
key_value = {}
key = line[0]
values = line[1:]
key_value[key] = values
if self.header_list:
self.header_list.append(key_value)
else: self.section_list[-1].append(key_value)
def parse_next(self, line): # Close the header
self.config_header = []
def parse_end(self, line): # Close the section
self.config_section = []
def parse_file(self, path):
with open(path) as f:
for line in f:
# Clean up the fields and remove unused lines.
fields = line.replace('"', '').strip().split(" ")
if fields[0] == "set":
pass
elif fields[0] == "end":
pass
elif fields[0] == "edit":
pass
elif fields[0] == "config":
pass
elif fields[0] == "next":
pass
else: continue
# fetch and call method.
method = fields[0]
parse_method = "parse_" + method
getattr(Parser, parse_method)(self, fields[1:])
return self.section_list
config = Parser().parse_file('test_config.txt')
print config
The output I am looking for is something like the following;
[['section a', {'something': 'to something else'}, ['subsection a', {'this': 'to that'}]],['firewall policy',['76',{'srcintf':'There'}, {'dstintf':'Here'}{etc.}{etc.}]]]
and this is what I get
[['section a']]
EDIT
I have changed the above to reflect where I am currently at.
I am still having issues getting the output I expect. I just can't seem to get the list right.
class Parser(object):
def __init__(self):
self.my_section = 0
self.flag_section = False
# ...
def parse_config(self, fields):
self.my_section += 1
# go on with fields
# ...
self.flag_section = True
def parse_edit(self, line):
...
def parse_set(self, line):
...
def parse_end(self, line):
...
def parse_file(self, path):
with open(path) as f:
for line in f:
fields = f.strip().split(" ")
method = fields[0]
# fetch and call method
getattr(Parser, "parse_" + method)(self, fields[1:])
I post my answer for people who first come here from Google when trying to parse Fortigate configuration file !
I rewrote what I found here based on my own needs and it works great.
from collections import defaultdict
from pprint import pprint
import sys
f = lambda: defaultdict(f)
def getFromDict(dataDict, mapList):
return reduce(lambda d, k: d[k], mapList, dataDict)
def setInDict(dataDict, mapList, value):
getFromDict(dataDict, mapList[:-1])[mapList[-1]] = value
class Parser(object):
def __init__(self):
self.config_header = []
self.section_dict = defaultdict(f)
def parse_config(self, fields): # Create a new section
self.config_header.append(" ".join(fields))
def parse_edit(self, line): # Create a new header
self.config_header.append(line[0])
def parse_set(self, line): # Key and values
key = line[0]
values = " ".join(line[1:])
headers= self.config_header+[key]
setInDict(self.section_dict,headers,values)
def parse_next(self, line): # Close the header
self.config_header.pop()
def parse_end(self, line): # Close the section
self.config_header.pop()
def parse_file(self, path):
with open(path) as f:
gen_lines = (line.rstrip() for line in f if line.strip())
for line in gen_lines:
# pprint(dict(self.section_dict))
# Clean up the fields and remove unused lines.
fields = line.replace('"', '').strip().split(" ")
valid_fields= ["set","end","edit","config","next"]
if fields[0] in valid_fields:
method = fields[0]
# fetch and call method
getattr(Parser, "parse_" + method)(self, fields[1:])
return self.section_dict
config = Parser().parse_file('FGT02_20130308.conf')
print config["system admin"]["admin"]["dashboard-tabs"]["1"]["name"]
print config["firewall address"]["ftp.fr.debian.org"]["type"]
I do not know if this can help you too, but it did for me : http://wiki.python.org/moin/ConfigParserExamples
Have fun !
I would do it in a simpler way:
flagSection = False
flagSub = False
mySection = 0
mySubsection = 0
myItem = 0
with open('d:/config.txt', 'r') as f:
gen_lines = (line.rstrip() for line in f if line.strip())
for line in gen_lines:
if line[0:7]=='config ':
mySection = mySection + 1
newLine = line[7:]
# Create a new section
# Mark section as open
flagSection == True
elif line[0:5]=='edit '):
mySubsection = mySubsection + 1
newLine = line[5:]
# Create a new sub-section
# Mark subsection as open
flagSub == true
elif line[0:4]=='set '):
myItem = myItem + 1
name, value = x.split(' ',2)[1:]
# Add to whatever is open
elif line=='end':
# If subsection = open then close and goto end
if flagSub:
# Or if section = open then close and goto end
elif flagSection:
# :End
continue
The instruction gen_lines = (line.rstrip() for line in f if line.strip())
creates a generator of not empty lines (thanks to the test if line.strip()) without newline and without blanks at the right (thanks to line.rstrip())
.
If I would know more about the operations you want to perform with name,value and in the section opened with if line=='end' , I could propose a code using regexes.
Edit
from time import clock
n = 1000000
print 'Measuring times with clock()'
te = clock()
for i in xrange(n):
x = ('abcdfafdf'[:3] == 'end')
print clock()-te,
print "\tx = ('abcdfafdf'[:3] == 'end')"
te = clock()
for i in xrange(n):
x = 'abcdfafdf'.startswith('end')
print clock()-te,
print "\tx = 'abcdfafdf'.startswith('end')"
print '\nMeasuring times with timeit module'
import timeit
ti = timeit.repeat("x = ('abcdfafdf'[:3] == 'end')",repeat=10,number = n)
print min(ti),
print "\tx = ('abcdfafdf'[:3] == 'end')"
to = timeit.repeat("x = 'abcdfafdf'.startswith('end')",repeat=10,number = n)
print min(to),
print "\tx = 'abcdfafdf'.startswith('end')"
result:
Measuring times with clock()
0.543445605517 x = ('abcdfafdf'[:3] == 'end')
1.08590449345 x = 'abcdfafdf'.startswith('end')
Measuring times with timeit module
0.294152748464 x = ('abcdfafdf'[:3] == 'end')
0.901923289133 x = 'abcdfafdf'.startswith('end')
Is the fact the times are smaller with timieit than with clock() due to the fact that the GC is unplugged when the program is run ? Anyway, with either clock() or timeit module , executing startswith() takes more time than slicing.
Note: (I've updated this since the first two suggestions... you can view the old post in txt form here: http://bennyland.com/old-2554127.txt). The update I made was to better understand what was going wrong - and now I at least sort of know what's happening but I have no clue how to fix it.
Anyway, using Django and Piston, I've set up a new BaseHandler class named BaseApiHandler which does most of the work I was doing across all of my handlers. This worked great until I added the ability to limit the filters being applied to my results (for instance 'give me the first result only').
Examples (had to remove ":" because i can't submit more urls):
- http//localhost/api/hours_detail/empid/22 gives me all hours_detail rows from employee # 22
- http//localhost/api/hours_detail/empid/22/limit/first gives me the first hours_detail row from employee # 22
What's happening is that when I run /limit/first several times in succession, the first example is then broken, pretending it's a /limit/ url when it isn't.
Right now I'm storing whether or not it's a limit and what the limit is in a new class - prior to this stackoverflow edit, I was just using a list with two entries (limit = [] when initialized, limit = [0,1] when set). Prior to this stackoverflow edit, once you spammed /limit/first, when going to the first example 'limit' would be pre-set to [0,1] and the handler would then limit the query because of this. With the debug data I've added, I can say for certain that the list was pre-set and not getting set during the execution of the code.
I'm adding debug info into my response so I can see what's happening. Right now when you first ask for Example 1's url, you get this CORRECT statusmsg response:
"statusmsg": "2 hours_detail found with query: {'empid':'22','datestamp':'2009-03-02',}",
When you ask for Example 2's url, you get this CORRECT statusmsg response:
"statusmsg": "1 hours_detail found with query: {'empid':'22','datestamp':'2009-03-02','limit','first',with limit[0,1](limit,None... limit set 1 times),}",
However, if you refresh a bunch of times, the limit set value starts increasing (incrementing this value was something a friend of mine suggested to see if this variable was somehow getting kept around)
"statusmsg": "1 hours_detail found with query: {'empid':'22','datestamp':'2009-03-02','limit','first',with limit[0,1](limit,None... limit set 10 times),}",
Once that number goes above '1 times', you can start trying to get Example 1's url. Each time I now refresh example 1, i get odd results. Here are 3 different status messages from different refreshes (Notice that from each one, 'limit':'first' is CORRECTLY missing from the kwarg's debug output while the actual value of islimit is hovering between 8 and 10):
"statusmsg": "1 hours_detail found with query: {'empid':'22','datestamp':'2009-03-02',with limit[0,1](limit,None... limit set 10 times),}",
"statusmsg": "1 hours_detail found with query: {'empid':'22','datestamp':'2009-03-02',with limit[0,1](limit,None... limit set 8 times),}",
"statusmsg": "1 hours_detail found with query: {'empid':'22','datestamp':'2009-03-02',with limit[0,1](limit,None... limit set 9 times),}",
So it would appear that this object is getting cached. Prior to changing 'limit' form a list to a class, it also appeared that the list version of 'limit' was getting cached as after going to Example 2's url, i would sometimes have [0,1] as the limit.
Here are the updated snippets of the code (remember, you can view the first post here: bennyland.com/old-2554127.txt)
URLS.PY - inside 'urlpatterns = patterns('
#hours_detail/id/{id}/empid/{empid}/projid/{projid}/datestamp/{datestamp}/daterange/{fromdate}to{todate}
#empid is required
url(r'^api/hours_detail/(?:' + \
r'(?:[/]?id/(?P<id>\d+))?' + \
r'(?:[/]?empid/(?P<empid>\d+))?' + \
r'(?:[/]?projid/(?P<projid>\d+))?' + \
r'(?:[/]?datestamp/(?P<datestamp>\d{4,}[-/\.]\d{2,}[-/\.]\d{2,}))?' + \
r'(?:[/]?daterange/(?P<daterange>(?:\d{4,}[-/\.]\d{2,}[-/\.]\d{2,})(?:to|/-)(?:\d{4,}[-/\.]\d{2,}[-/\.]\d{2,})))?' + \
r')+' + \
r'(?:/limit/(?P<limit>(?:first|last)))?' + \
r'(?:/(?P<exact>exact))?$', hours_detail_resource),
HANDLERS.PY
class ResponseLimit(object):
def __init__(self):
self._min = 0
self._max = 0
self._islimit = 0
#property
def min(self):
if self.islimit == 0:
raise LookupError("trying to access min when no limit has been set")
return self._min
#property
def max(self):
if self.islimit == 0:
raise LookupError("trying to access max when no limit has been set")
return self._max
#property
def islimit(self):
return self._islimit
def setlimit(self, min, max):
self._min = min
self._max = max
# incrementing _islimit instead of using a bool so I can try and see why it's broken
self._islimit += 1
class BaseApiHandler(BaseHandler):
limit = ResponseLimit()
def __init__(self):
self._post_name = 'base'
#property
def post_name(self):
return self._post_name
#post_name.setter
def post_name(self, value):
self._post_name = value
def process_kwarg_read(self, key, value, d_post, b_exact):
"""
this should be overridden in the derived classes to process kwargs
"""
pass
# override 'read' so we can better handle our api's searching capabilities
def read(self, request, *args, **kwargs):
d_post = {'status':0,'statusmsg':'Nothing Happened'}
try:
# setup the named response object
# select all employees then filter - querysets are lazy in django
# the actual query is only done once data is needed, so this may
# seem like some memory hog slow beast, but it's actually not.
d_post[self.post_name] = self.queryset(request)
s_query = ''
b_exact = False
if 'exact' in kwargs and kwargs['exact'] <> None:
b_exact = True
s_query = '\'exact\':True,'
for key,value in kwargs.iteritems():
# the regex url possibilities will push None into the kwargs dictionary
# if not specified, so just continue looping through if that's the case
if value is None or key == 'exact':
continue
# write to the s_query string so we have a nice error message
s_query = '%s\'%s\':\'%s\',' % (s_query, key, value)
# now process this key/value kwarg
self.process_kwarg_read(key=key, value=value, d_post=d_post, b_exact=b_exact)
# end of the kwargs for loop
else:
if self.limit.islimit > 0:
s_query = '%swith limit[%s,%s](limit,%s... limit set %s times),' % (s_query, self.limit.min, self.limit.max, kwargs['limit'],self.limit.islimit)
d_post[self.post_name] = d_post[self.post_name][self.limit.min:self.limit.max]
if d_post[self.post_name].count() == 0:
d_post['status'] = 0
d_post['statusmsg'] = '%s not found with query: {%s}' % (self.post_name, s_query)
else:
d_post['status'] = 1
d_post['statusmsg'] = '%s %s found with query: {%s}' % (d_post[self.post_name].count(), self.post_name, s_query)
except:
e = sys.exc_info()[1]
d_post['status'] = 0
d_post['statusmsg'] = 'error: %s %s' % (e, traceback.format_exc())
d_post[self.post_name] = []
return d_post
class HoursDetailHandler(BaseApiHandler):
#allowed_methods = ('GET', 'PUT', 'POST', 'DELETE',)
model = HoursDetail
exclude = ()
def __init__(self):
BaseApiHandler.__init__(self)
self._post_name = 'hours_detail'
def process_kwarg_read(self, key, value, d_post, b_exact):
# each query is handled slightly differently... when keys are added
# handle them in here. python doesn't have switch statements, this
# could theoretically be performed using a dictionary with lambda
# expressions, however I was affraid it would mess with the way the
# filters on the queryset work so I went for the less exciting
# if/elif block instead
# querying on a specific row
if key == 'id':
d_post[self.post_name] = d_post[self.post_name].filter(pk=value)
# filter based on employee id - this is guaranteed to happen once
# per query (see read(...))
elif key == 'empid':
d_post[self.post_name] = d_post[self.post_name].filter(emp__id__exact=value)
# look for a specific project by id
elif key == 'projid':
d_post[self.post_name] = d_post[self.post_name].filter(proj__id__exact=value)
elif key == 'datestamp' or key == 'daterange':
d_from = None
d_to = None
# first, regex out the times in the case of range vs stamp
if key == 'daterange':
m = re.match('(?P<daterangefrom>\d{4,}[-/\.]\d{2,}[-/\.]\d{2,})(?:to|/-)(?P<daterangeto>\d{4,}[-/\.]\d{2,}[-/\.]\d{2,})', \
value)
d_from = datetime.strptime(m.group('daterangefrom'), '%Y-%m-%d')
d_to = datetime.strptime(m.group('daterangeto'), '%Y-%m-%d')
else:
d_from = datetime.strptime(value, '%Y-%m-%d')
d_to = datetime.strptime(value, '%Y-%m-%d')
# now min/max to get midnight on day1 through just before midnight on day2
# note: this is a hack because as of the writing of this app,
# __date doesn't yet exist as a queryable field thus any
# timestamps not at midnight were incorrectly left out
d_from = datetime.combine(d_from, time.min)
d_to = datetime.combine(d_to, time.max)
d_post[self.post_name] = d_post[self.post_name].filter(clock_time__gte=d_from)
d_post[self.post_name] = d_post[self.post_name].filter(clock_time__lte=d_to)
elif key == 'limit':
order_by = 'clock_time'
if value == 'last':
order_by = '-clock_time'
d_post[self.post_name] = d_post[self.post_name].order_by(order_by)
self.limit.setlimit(0, 1)
else:
raise NameError
def read(self, request, *args, **kwargs):
# empid is required, so make sure it exists before running BaseApiHandler's read method
if not('empid' in kwargs and kwargs['empid'] <> None and kwargs['empid'] >= 0):
return {'status':0,'statusmsg':'empid cannot be empty'}
else:
return BaseApiHandler.read(self, request, *args, **kwargs)
I would say that there is a basic flaw in your code, if has_limit() can return True when limit is a list of length 2, but this line will fail if limit is shorter than 3 elements long:
s_query = '%swith limit[%s,%s](limit,%s > traceback:%s),' %
(s_query, self.limit[0], self.limit[1], kwargs['limit'],
self.limit[2])
Why are you initializing self.limit to an invalid length list? You could also make this code a little more defensive:
if self.has_limit():
s_query += 'with limit[%s,%s]' % self.limit[0:1]
if 'limit' in kwargs and len(self.limit) > 2:
s_query += '(limit,%s > traceback:%s),' %
(kwargs['limit'], self.limit[2])
I think you may be creating an alias to your internal limit list, via the get_limit property accessor. Try removing (or at least adding a print statement) inside this accessor. If you have code externally that binds a local list to get_limit, then it can update the contents using append, del, or assignment to slices, such as [:]. Or try this:
def get_limit(self):
return self._limit[:]
Instead of binding your internal list to an external name, it will make a copy of your internal list.