Im trying to load the data from a list in python where, I save the data in a list:
class Region(object):
def __init__(self, cities: list[CitiySalah], label: str):
self.cities = cities
self.label = label
def toMap(self) -> dict:
map = self.__dict__
r = [{}]
for x in self.cities:
r.append(x.toMap())
map["cities"] = r
return map
and the one that loads the data is:
def startLoading() -> list[Region]:
.......
for select in selects:
if select.has_attr("name") and select['name'] == "ville":
groups = select.find_all('optgroup')
for group in groups:
lable = group['label']
allR = {"lable": lable}
cities = [CitiySalah]
for option in group:
try:
# the city
if(option.has_attr('value')):
value = option['value']
city = str(option).split('<')[1].split('>')[1]
id = str(option).split('<')[1].split(
'?ville=')[1].split('"')[0]
dataUrl = url+"?ville="+id
data = MySalah(getSalahHour(dataUrl))
R = CitiySalah(argu={"value": value,
"city": city,
"dataUrl": dataUrl,
"id": id, }, data=data)
# print(R.toMap())
cities.append(R)
except:
pass
# allR['cities'] = cities
res.append(Region(label=lable, cities=cities))
return res
and when I'm trying to call the function by:
def getDataForDatabase():
listR = [{}]
data = startLoading()
for x in data:
listR.append(x.toMap())
return listR
I get this error
Traceback (most recent call last):
File "/home/nimr/ServerApps/ScrappingProject/Salah/Functions.py", line 108, in <module>
print(getDataForDatabase())
File "/home/nimr/ServerApps/ScrappingProject/Salah/Functions.py", line 104, in getDataForDatabase
listR.append(x.toMap())
TypeError: toMap() missing 1 required positional argument: 'self'
I have tried, I'm still new in python as I'm from c++ family, and I got stuck here,
I need to save the data in a list and convert them into Map, so I can save them in the database (NoSQL).
and as for the rest of models they are working correct, I don't know why I'm getting this error.
I would like to create a program that converts money from one type of currency to another. So far this is my code:
def read_exchange_rates(exchange_file_name):
#reads file with exchange rates formatted like USD,1. Each line is a 3 letter currency code and a float to convert it to USD.
f=open(exchange_file_name,"r")
answer={}
for line in f:
k, v = line.split(",")
answer[k] = float(v)
return answer
f.close()
pass
class Money:
exchange_rates = read_exchange_rates(rate_file)
#calls previously defined function to read file with exchange rates
def __init__ (self, monamount, code):
self.monamount=monamount
self.code=code
def to(self, othercode):
i = self.monamount/self.exchange_rates[self.code]
j = i*self.exchange_rates[self.othercode]
return othercode+str(j)
It should return the converted amount along with it's currency code (othercode) but instead it returns a KeyError. If I type
a=Money(650,'USD')
b=a.to('GBP')
it should return GBP somenumber. This is the error. Thank you!
Traceback (most recent call last):
File "<pyshell#126>", line 1, in <module>
b=a.to('GBP')
File "<pyshell#124>", line 9, in to
i = self.monamount/self.exchange_rates[self.code]
KeyError: 'USD'
Are you sure your file contains 'USD' key?
Here is a slightly modified and simplified code (so I didn't have to create an exchange file):
def read_exchange_rates():
answer={}
answer['USD'] = 1
answer['GBP'] = 0.76
return answer
class Money:
exchange_rates = read_exchange_rates()
def __init__ (self, monamount, code):
self.monamount = monamount
self.code = code
def to(self, othercode):
i = self.monamount/self.exchange_rates[self.code]
j = i*self.exchange_rates[othercode]
return othercode + str(j)
a = Money(650,'USD')
b = a.to('GBP')
print(b)
This prints out GBP494.0.
I am building an algorithm for sentiment analysis which could segment do the segmentation on a .txt corpus, but there has been some problem in the code I dont know how to resolve?
class Splitter(object):
def _init_(self):
self.nltk_splitter = nltk.data.load('tokenizers/punkt/english/pickle')
self.nltk_tokenizer = nltk.tokenize.TreebankWordTokenizer()
def split(self,text):
"""imput format: a .txt file
output format : a list of lists of words.
for eg [['this', 'is']['life' , 'worth' , 'living']]"""
sentences = self.nltk_splitter.tokenize(text)
tokenized_sentences = [self.nltk_tokenizer.tokenize(sent) for sent in sentences]
return tokenized_sentences
and then I did the following things
>>> f = open('amazonshoes.txt')
>>> raw = f.read()
>>> text = nltk.Text(raw)
>>> splitter = Splitter()
>>> splitted_sentences = splitter.split(text)
and the error is
Traceback (most recent call last):
File "<pyshell#21>", line 1, in <module>
splitted_sentences = splitter.split(text)
File "<pyshell#14>", line 9, in split
sentences = self.nltk_splitter.tokenize(text)
AttributeError: 'Splitter' object has no attribute 'nltk_splitter'
The constructor of the class Splitter should be called __init__, with two leading and trailing underscores.
Currently the _init_ method (single underscore) is not executed, so the Splitter object your create (by calling Splitter()) never acquires the attribute/field nltk_splitter
I am trying to read key-value pairs from an already existing shelf to create a new class object with a updated field and write that class object to a new shelf.
My class object : SongDetails
This is the procedure which fails:
def updateShelfWithTabBody(shelfFileName, newShelfFileName):
"""this function updates songDetails with
html body i.e. just the part that contains lyrics and
chords in the tab """
#read all songDetails
shelf = shelve.open(shelfFileName)
listOfKeys = shelf.keys()
#create new songDetails object
temporaryShelfObject = SongDetails.SongDetails()
#iterate over list of keys
for key in listOfKeys:
#print "name:"+shelf[key].name
#fill details from temporaryShelfObject
temporaryShelfObject.name = shelf[key].name
temporaryShelfObject.tabHtmlPageContent = shelf[key].tabHtmlPageContent
#add new detail information
htmlPageContent = shelf[key].tabHtmlPageContent
temporaryShelfObject.htmlBodyContent = extractDataFromDocument.fetchTabBody(htmlPageContent)
#write SongDetails back to shelf
writeSongDetails.writeSongDetails(temporaryShelfObject, newShelfFileName)
Definitions for functions used in above code:
def fetchTabBody(page_contents):
soup = BeautifulSoup(page_contents)
HtmlBody = ""
try:
#The lyrics and chords of song are contained in div with id = "cont"
#Note: This assumtption is specific to ultimate-guitar.com
HtmlBody = soup.html.body.find("div",{"id":"cont"})
except:
print "Error: ",sys.exc_info()[0]
return HtmlBody
def writeSongDetails(songDetails, shelfFileName):
shelf = shelve.open(shelfFileName)
songDetails.name = str(songDetails.name).strip(' ')
shelf[songDetails.name] = songDetails
shelf.close()
SongDetails class:
class SongDetails:
name = ""
tabHtmlPageContent = ""
genre = ""
year = ""
artist = ""
chordsAndLyrics = ""
htmlBodyContent = ""
scale = ""
chordsUsed = []
This is the error that I get:
Traceback (most recent call last):
File "/l/nx/user/ndhande/Independent_Study_Project_Git/Crawler/updateSongDetailsShelfWithNewAttributes.py", line 69, in <module>
updateShelfWithTabBody(shelfFileName, newShelfFileName)
File "/l/nx/user/ndhande/Independent_Study_Project_Git/Crawler/updateSongDetailsShelfWithNewAttributes.py", line 38, in updateShelfWithTabBody
writeSongDetails.writeSongDetails(temporaryShelfObject, newShelfFileName)
File "/home/nx/user/ndhande/Independent_Study_Project_Git/Crawler/writeSongDetails.py", line 7, in writeSongDetails
shelf[songDetails.name] = songDetails
File "/usr/lib64/python2.6/shelve.py", line 132, in __setitem__
p.dump(value)
File "/usr/lib64/python2.6/copy_reg.py", line 71, in _reduce_ex
state = base(self)
File "/u/ndhande/.local/lib/python2.6/site-packages/BeautifulSoup.py", line 476, in __unicode__
return str(self).decode(DEFAULT_OUTPUT_ENCODING)
**RuntimeError: maximum recursion depth exceeded**
I couldn't find any reason why I'm getting this error even though there is no explicit recursive call in my code. I have seen this error in other stackoverflow posts, but they did have recursive calls in their case.
str(self) calls __str__ or calls __unicode__ calls str(self).
Here's my problem: I'm trying to parse a big text file (about 15,000 KB) and write it to a MySQL database. I'm using Python 2.6, and the script parses about half the file and adds it to the database before freezing up. Sometimes it displays the text:
MemoryError.
Other times it simply freezes. I figured I could avoid this problem by using generator's wherever possible, but I was apparently wrong.
What am I doing wrong?
When I press Ctrl + C to keyboard interrupt, it shows this error message:
...
sucessfully added vote # 2281
sucessfully added vote # 2282
sucessfully added vote # 2283
sucessfully added vote # 2284
floorvotes_db.py:35: Warning: Data truncated for column 'vote_value' at row 1
r['bill ID'] , r['last name'], r['vote'])
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "floorvotes_db.py", line 67, in addAllFiles
addFile(file)
File "floorvotes_db.py", line 61, in addFile
add(record)
File "floorvotes_db.py", line 35, in add
r['bill ID'] , r['last name'], r['vote'])
File "build/bdist.linux-i686/egg/MySQLdb/cursors.py", line 166, in execute
File "build/bdist.linux-i686/egg/MySQLdb/connections.py", line 35, in defaulte rrorhandler
KeyboardInterrupt
import os, re, datetime, string
# Data
DIR = '/mydir'
tfn = r'C:\Documents and Settings\Owner\Desktop\data.txt'
rgxs = {
'bill number': {
'rgx': r'(A|S)[0-9]+-?[A-Za-z]* {50}'}
}
# Compile rgxs for speediness
for rgx in rgxs: rgxs[rgx]['rgx'] = re.compile(rgxs[rgx]['rgx'])
splitter = rgxs['bill number']['rgx']
# Guts
class floor_vote_file:
def __init__(self, fn):
self.iterdata = (str for str in
splitter.split(open(fn).read())
if str and str <> 'A' and str <> 'S')
def iterVotes(self):
for record in self.data:
if record: yield billvote(record)
class billvote(object):
def __init__(self, section):
self.data = [line.strip() for line
in section.splitlines()]
self.summary = self.data[1].split()
self.vtlines = self.data[2:]
self.date = self.date()
self.year = self.year()
self.votes = self.parse_votes()
self.record = self.record()
# Parse summary date
def date(self):
d = [int(str) for str in self.summary[0].split('/')]
return datetime.date(d[2],d[0],d[1]).toordinal()
def year(self):
return datetime.date.fromordinal(self.date).year
def session(self):
"""
arg: 2-digit year int
returns: 4-digit session
"""
def odd():
return divmod(self.year, 2)[1] == 1
if odd():
return str(string.zfill(self.year, 2)) + \
str(string.zfill(self.year + 1, 2))
else:
return str(string.zfill(self.year - 1, 2))+ \
str(string.zfill(self.year, 2))
def house(self):
if self.summary[2] == 'Assembly': return 1
if self.summary[2] == 'Senate' : return 2
def splt_v_line(self, line):
return [string for string in line.split(' ')
if string <> '']
def splt_v(self, line):
return line.split()
def prse_v(self, item):
"""takes split_vote item"""
return {
'vote' : unicode(item[0]),
'last name': unicode(' '.join(item[1:]))
}
# Parse votes - main
def parse_votes(self):
nested = [[self.prse_v(self.splt_v(vote))
for vote in self.splt_v_line(line)]
for line in self.vtlines]
flattened = []
for lst in nested:
for dct in lst:
flattened.append(dct)
return flattened
# Useful data objects
def record(self):
return {
'date' : unicode(self.date),
'year' : unicode(self.year),
'session' : unicode(self.session()),
'house' : unicode(self.house()),
'bill ID' : unicode(self.summary[1]),
'ayes' : unicode(self.summary[5]),
'nays' : unicode(self.summary[7]),
}
def iterRecords(self):
for vote in self.votes:
r = self.record.copy()
r['vote'] = vote['vote']
r['last name'] = vote['last name']
yield r
test = floor_vote_file(tfn)
import MySQLdb as dbapi2
import floorvotes_parse as v
import os
# Initial database crap
db = dbapi2.connect(db=r"db",
user="user",
passwd="XXXXX")
cur = db.cursor()
if db and cur: print "\nConnected to db.\n"
def commit(): db.commit()
def ext():
cur.close()
db.close()
print "\nConnection closed.\n"
# DATA
DIR = '/mydir'
files = [DIR+fn for fn in os.listdir(DIR)
if fn.startswith('fvote')]
# Add stuff
def add(r):
"""add a record"""
cur.execute(
u'''INSERT INTO ny_votes (vote_house, vote_date, vote_year, bill_id,
member_lastname, vote_value) VALUES
(%s , %s , %s ,
%s , %s , %s )''',
(r['house'] , r['date'] , r['year'],
r['bill ID'] , r['last name'], r['vote'])
)
#print "added", r['year'], r['bill ID']
def crt():
"""create table"""
SQL = """
CREATE TABLE ny_votes (openleg_id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
vote_house int(1), vote_date int(5), vote_year int(2), bill_id varchar(8),
member_lastname varchar(50), vote_value varchar(10));
"""
cur.execute(SQL)
print "\nCreate ny_votes.\n"
def rst():
SQL = """DROP TABLE ny_votes"""
cur.execute(SQL)
print "\nDropped ny_votes.\n"
crt()
def addFile(fn):
"""parse and add all records in a file"""
n = 0
for votes in v.floor_vote_file(fn).iterVotes():
for record in votes.iterRecords():
add(record)
n += 1
print 'sucessfully added vote # ' + str(n)
def addAllFiles():
for file in files:
addFile(file)
if __name__=='__main__':
rst()
addAllFiles()
Generators are a good idea, but you seem to miss the biggest problem:
(str for str in splitter.split(open(fn).read()) if str and str <> 'A' and str <> 'S')
You're reading the whole file in at once even if you only need to work with bits at a time. You're code is too complicated for me to fix, but you should be able to use file's iterator for your task:
(line for line in open(fn))
I noticed that you use a lot of slit() calls. This is memory consuming, according to http://mail.python.org/pipermail/python-bugs-list/2006-January/031571.html . You can start investigating this.
Try to comment out add(record) to see if the problem is in your code or on the database side. All the records are added in one transaction (if supported) and maybe this leads to a problem if it get too many records. If commenting out add(record) helps, you could try to call commit() from time to time.
This isn't a Python memory issue, but perhaps it's worth thinking about. The previous answers make me think you'll sort that issue out quickly.
I wonder about the rollback logs in MySQL. If a single transaction is too large, perhaps you can checkpoint chunks. Commit each chunk separately instead of trying to rollback a 15MB file's worth.