I have a simple xmlrpc server which is written along the line of
server = SimpleXMLRPCServer(('127.0.0.1', 8000),allow_none=True)
server.register_function(self.fetch_buyer_data,'"fetch_buyer_data")
...
...
server.serve_forever()
This is not the complete code, but you get the idea (hopefully) !
In the same server script, I have a function that reads off the contents of an sqlite3 database and return all the data. Something like this:
def fetch_buyer_data(self, projectname):
conn = sqlite3.connect(...)
# read data from sqlite3 database and save it into list
conn.close()
return datalist
And I use this following code from client to access the above function.
proxy = xmlrpclib.ServerProxy("http://%s:%s/" %(hostip,hostport),allow_none=True)
data = proxy.fetch_buyer_data(SELECTED_PROJECT)
It's all good until the data in sqlite3 database get larger (not very large but something like a few Megabytes!), I keep getting the following error message!
Traceback (most recent call last):
File "C:\Custom\src\Client\client.py", line 178, in show_user_page
userpage = UserPage()
File "C:\Custom\src\Client\client.py", line 2371, in __init__
self.update_buyer_table()
File "C:\Custom\src\Client\client.py", line 6106, in update_buyer_table
data = proxy.fetch_buyer_data(SELECTED_PROJECT)
File "C:\Python27\Lib\xmlrpclib.py", line 1224, in __call__
return self.__send(self.__name, args)
File "C:\Python27\Lib\xmlrpclib.py", line 1578, in __request
verbose=self.__verbose
File "C:\Python27\Lib\xmlrpclib.py", line 1264, in request
return self.single_request(host, handler, request_body, verbose)
File "C:\Python27\Lib\xmlrpclib.py", line 1297, in single_request
return self.parse_response(response)
File "C:\Python27\Lib\xmlrpclib.py", line 1453, in parse_response
stream = GzipDecodedResponse(response)
File "C:\Python27\Lib\xmlrpclib.py", line 1204, in __init__
self.stringio = StringIO.StringIO(response.read())
File "C:\Python27\Lib\httplib.py", line 548, in read
s = self._safe_read(self.length)
File "C:\Python27\Lib\httplib.py", line 649, in _safe_read
raise IncompleteRead(''.join(s), amt)
httplib.IncompleteRead: IncompleteRead(8031 bytes read, 1732 more expected)
NOTE: I have checked that the rest of the registered functions on server are working. So,I can rule out the connection problems(ip,port etc).
What is casuing this error message? How do I overcome the problem?
I am using python 2.7 on windows xp sp3.
UPDATE 1:
I found out that this doesn't entirely depends on the size of the database. Sometimes it gives me error message, sometimes it doesn't. Could anyone tell me what is casuing this IncompleteRead Problem?
Related
I am trying to run very straightforward code to figure out how to use pymongo with the MongoDB Atlas Cloud.
Here is the example code
import pymongo
client = pymongo.MongoClient("mongodb+srv://{myusername}:{mypassword}#cluster0-uywu8.mongodb.net/test?retryWrites=true&w=majority")
db = client.BroadwayMatch
print(db)
collection = db.Artists
print(collection)
print(collection.insert_one({'x': 1}))
BroadwayMatch and Artists are existing databases and collections that I was able to insert to last week, I am not sure what changed. It seems to be connecting to the database and collection successfully, but for some reason is unable to read or write to it. All attributes of collections can be accessed, but all methods result in a ServerSelectionTimeoutError. Here is the output from this snippet
Database(MongoClient(host=['cluster0-shard-00-01-uywu8.mongodb.net:27017', 'cluster0-shard-00-00-uywu8.mongodb.net:27017', 'cluster0-shard-00-02-uywu8.mongodb.net:27017'], document_class=dict, tz_aware=False, connect=True, authsource='admin', replicaset='Cluster0-shard-0', ssl=True, retrywrites=True, w='majority'), 'BroadwayMatch')
Collection(Database(MongoClient(host=['cluster0-shard-00-01-uywu8.mongodb.net:27017', 'cluster0-shard-00-00-uywu8.mongodb.net:27017', 'cluster0-shard-00-02-uywu8.mongodb.net:27017'], document_class=dict, tz_aware=False, connect=True, authsource='admin', replicaset='Cluster0-shard-0', ssl=True, retrywrites=True, w='majority'), 'BroadwayMatch'), 'Artists')
Traceback (most recent call last):
File "C:\Python37\Spotify-Match\mongotest.py", line 10, in <module>
print(collection.insert_one({'x': 1}))
File "C:\Python37\lib\site-packages\pymongo\collection.py", line 700, in insert_one
session=session),
File "C:\Python37\lib\site-packages\pymongo\collection.py", line 614, in _insert
bypass_doc_val, session)
File "C:\Python37\lib\site-packages\pymongo\collection.py", line 602, in _insert_one
acknowledged, _insert_command, session)
File "C:\Python37\lib\site-packages\pymongo\mongo_client.py", line 1279, in _retryable_write
with self._tmp_session(session) as s:
File "C:\Python37\lib\contextlib.py", line 112, in __enter__
return next(self.gen)
File "C:\Python37\lib\site-packages\pymongo\mongo_client.py", line 1611, in _tmp_session
s = self._ensure_session(session)
File "C:\Python37\lib\site-packages\pymongo\mongo_client.py", line 1598, in _ensure_session
return self.__start_session(True, causal_consistency=False)
File "C:\Python37\lib\site-packages\pymongo\mongo_client.py", line 1551, in __start_session
server_session = self._get_server_session()
File "C:\Python37\lib\site-packages\pymongo\mongo_client.py", line 1584, in _get_server_session
return self._topology.get_server_session()
File "C:\Python37\lib\site-packages\pymongo\topology.py", line 434, in get_server_session
None)
File "C:\Python37\lib\site-packages\pymongo\topology.py", line 200, in _select_servers_loop
self._error_message(selector))
pymongo.errors.ServerSelectionTimeoutError: connection closed,connection closed,connection closed
I'm not sure what I am doing wrong, can anyone help?
Reasons you might not be able to connect to an Atlas server:
Your white list has not enable your current IP address
you are using the wrong username and/or password. In your case it looks like your fString is missing an f at the start.
When diagnosing these conditions cutting and pasting the MongoDB Atlas connection string (see below) for the MongoDB shell or MongoDB Compass can often expose username and/or password errors.
Either your mongo server is not exposed otherwise it is not in the default port. Try following:
import pymongo
client = pymongo.MongoClient("mongodb://uname:pass#ip:port/")
db = client['BroadwayMatch']
I'm trying to figure out what causes this error when I run my app using the basic Flask server during development. I start it with this:
from myapp import app
app.run(debug=True, port=5001)
All is well and I'll continue to code and refresh etc, but then after a while I get the recursion error and have to Ctrl-C the server and restart it. Not a big deal, just a little annoying to have to deal with every now and then.
Here's the full traceback, which I tried to use to determine the cause but can't see anything that stands out (possibly something to do with how werkzeug uses Cookie.py?):
Traceback (most recent call last):
File "/Users/jeff/.virtualenvs/fmll/lib/python2.7/site-packages/flask/app.py", line 1701, in __call__
return self.wsgi_app(environ, start_response)
File "/Users/jeff/.virtualenvs/fmll/lib/python2.7/site-packages/werkzeug/wsgi.py", line 411, in __call__
return self.app(environ, start_response)
(last bit repeated a bunch - trimmed to fit in posting size requirements)
File "/Users/jeff/.virtualenvs/fmll/lib/python2.7/site-packages/flask/app.py", line 1685, in wsgi_app
with self.request_context(environ):
File "/Users/jeff/.virtualenvs/fmll/lib/python2.7/site-packages/flask/ctx.py", line 274, in __enter__
self.push()
File "/Users/jeff/.virtualenvs/fmll/lib/python2.7/site-packages/flask/ctx.py", line 238, in push
self.session = self.app.open_session(self.request)
File "/Users/jeff/.virtualenvs/fmll/lib/python2.7/site-packages/flask/app.py", line 792, in open_session
return self.session_interface.open_session(self, request)
File "/Users/jeff/.virtualenvs/fmll/lib/python2.7/site-packages/flask/sessions.py", line 191, in open_session
secret_key=key)
File "/Users/jeff/.virtualenvs/fmll/lib/python2.7/site-packages/werkzeug/contrib/securecookie.py", line 309, in load_cookie
data = request.cookies.get(key)
File "/Users/jeff/.virtualenvs/fmll/lib/python2.7/site-packages/werkzeug/utils.py", line 77, in __get__
value = self.func(obj)
File "/Users/jeff/.virtualenvs/fmll/lib/python2.7/site-packages/werkzeug/wrappers.py", line 418, in cookies
cls=self.dict_storage_class)
File "/Users/jeff/.virtualenvs/fmll/lib/python2.7/site-packages/werkzeug/http.py", line 741, in parse_cookie
cookie.load(header)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/Cookie.py", line 632, in load
self.__ParseString(rawdata)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/Cookie.py", line 665, in __ParseString
self.__set(K, rval, cval)
File "/Users/jeff/.virtualenvs/fmll/lib/python2.7/site-packages/werkzeug/_internal.py", line 290, in _BaseCookie__set
morsel = self.get(key, _ExtendedMorsel())
File "/Users/jeff/.virtualenvs/fmll/lib/python2.7/site-packages/werkzeug/_internal.py", line 271, in __init__
Morsel.__init__(self)
File "/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/Cookie.py", line 438, in __init__
dict.__setitem__(self, K, "")
RuntimeError: maximum recursion depth exceeded while calling a Python object
Since it occurs during your developement process, you could increase recursion limit, before starting your server, using :
sys.setrecursionlimit(2000) # Choose the right figure for you here
# the value on my system is 1000 but this is platform-dependant
However, you should use it very carefully and probably not in production unless you have a good knowledge of it's impacts.
Ref : http://docs.python.org/2/library/sys.html#sys.setrecursionlimit
I use:
MongoDB 1.6.5
Pymongo 1.9
Python 2.6.6
I have 3 types of daemons. 1st load data from web, 2nd analyze it and save result, and 3rd group result. All of them working with Mongodb.
At some time 3rd daemon throws many exceptions like this(mostly when there are big amount of data in DB):
Traceback (most recent call last):
File "/usr/local/lib/python2.6/dist-packages/gevent-0.13.1-py2.6-linux-x86_64.egg/gevent/greenlet.py", line 405, in run
result = self._run(*self.args, **self.kwargs)
File "/data/www/spider/daemon/scripts/mainconverter.py", line 72, in work
for item in res:
File "/usr/local/lib/python2.6/dist-packages/pymongo-1.9_-py2.6-linux-x86_64.egg/pymongo/cursor.py", line 601, in next
if len(self.__data) or self._refresh():
File "/usr/local/lib/python2.6/dist-packages/pymongo-1.9_-py2.6-linux-x86_64.egg/pymongo/cursor.py", line 564, in _refresh
self.__query_spec(), self.__fields))
File "/usr/local/lib/python2.6/dist-packages/pymongo-1.9_-py2.6-linux-x86_64.egg/pymongo/cursor.py", line 521, in __send_message
**kwargs)
File "/usr/local/lib/python2.6/dist-packages/pymongo-1.9_-py2.6-linux-x86_64.egg/pymongo/connection.py", line 743, in _send_message_with_response
return self.__send_and_receive(message, sock)
File "/usr/local/lib/python2.6/dist-packages/pymongo-1.9_-py2.6-linux-x86_64.egg/pymongo/connection.py", line 724, in __send_and_receive
return self.__receive_message_on_socket(1, request_id, sock)
File "/usr/local/lib/python2.6/dist-packages/pymongo-1.9_-py2.6-linux-x86_64.egg/pymongo/connection.py", line 714, in __receive_message_on_socket
struct.unpack("<i", header[8:12])[0])
AssertionError: ids don't match -561338340 0
<Greenlet at 0x2baa628: <bound method Worker.work of <scripts.mainconverter.Worker object at 0x2ba8450>>> failed with AssertionError
Can anyone tell what cause this exeption and how to fix this.
Thanks.
This is likely a threading problem related to how you are using worker threads with gevent coroutines. It seems like the pymongo connection object is reading a response for a request it didn't make.
I'm trying out Dowser, it's really cool but I'm stuck with a little problem, and I couldn't find anything helpful on Google, so here I am.. ^^;
I'm running a CherryPy+SQLAlchemy application.. It works normally, except that when I enable Dowser (that is, after I call dowser.Root()), now and then I get exceptions like:
SystemError:
../Objects/tupleobject.c:809: bad
argument to internal function
on innocent-looking istructions, such as accessing a SQLA-mapped field.. Relevant part of a traceback:
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.5.8-py2.6.egg/sqlalchemy/orm/attributes.py", line 158, in __get__
return self.impl.get(instance_state(instance), instance_dict(instance))
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.5.8-py2.6.egg/sqlalchemy/orm/attributes.py", line 377, in get
value = callable_()
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.5.8-py2.6.egg/sqlalchemy/orm/strategies.py", line 586, in __call__
result = q.all()
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.5.8-py2.6.egg/sqlalchemy/orm/query.py", line 1267, in all
return list(self)
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.5.8-py2.6.egg/sqlalchemy/orm/query.py", line 1422, in instances
rows = [process[0](context, row) for row in fetch]
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.5.8-py2.6.egg/sqlalchemy/orm/query.py", line 2032, in main
return _instance(row, None)
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.5.8-py2.6.egg/sqlalchemy/orm/mapper.py", line 1653, in _instance
identitykey = identity_key(row)
File "/usr/local/lib/python2.6/dist-packages/SQLAlchemy-0.5.8-py2.6.egg/sqlalchemy/orm/mapper.py", line 1594, in identity_key
return (identity_class, tuple(row[column] for column in pk_cols))
Could this be related to the Dowser thread, accessing the garbage collector? Any hint on what I could check out?
I'm running Python 2.6.2 on Xubuntu Jaunty.
Thanks for your attention!
Hey, any idea about what is the timeout error which I am getting here:
Error trace:
File "/array/purato/python2.6/lib/python2.6/site-packages/cherrypy/_cprequest.py", line 606, in respond
cherrypy.response.body = self.handler()
File "/array/purato/python2.6/lib/python2.6/site-packages/cherrypy/_cpdispatch.py", line 25, in __call__
return self.callable(*self.args, **self.kwargs)
File "sync_server.py", line 853, in put_file
return RequestController_v1_0.put_file(self, *args, **kw)
File "sync_server.py", line 409, in put_file
saved_path, tgt_path, root_folder = self._save_file(client_id, theFile)
File "sync_server.py", line 404, in _save_file
saved_path, tgt_path, root_folder = get_posted_file(cherrypy.request, 'theFile', staging_path)
File "sync_server.py", line 1031, in get_posted_file
, keep_blank_values=True)
File "/array/purato/python2.6/lib/python2.6/cgi.py", line 496, in __init__
self.read_multi(environ, keep_blank_values, strict_parsing)
File "/array/purato/python2.6/lib/python2.6/cgi.py", line 620, in read_multi
environ, keep_blank_values, strict_parsing)
File "/array/purato/python2.6/lib/python2.6/cgi.py", line 498, in __init__
self.read_single()
File "/array/purato/python2.6/lib/python2.6/cgi.py", line 635, in read_single
self.read_lines()
File "/array/purato/python2.6/lib/python2.6/cgi.py", line 657, in read_lines
self.read_lines_to_outerboundary()
File "/array/purato/python2.6/lib/python2.6/cgi.py", line 685, in read_lines_to_outerboundary
line = self.fp.readline(1<<16)
File "/array/purato/python2.6/lib/python2.6/site-packages/cherrypy/wsgiserver/__init__.py", line 206, in readline
data = self.rfile.readline(size)
File "/array/purato/python2.6/lib/python2.6/site-packages/cherrypy/wsgiserver/__init__.py", line 868, in readline
data = self.recv(self._rbufsize)
File "/array/purato/python2.6/lib/python2.6/site-packages/cherrypy/wsgiserver/__init__.py", line 747, in recv
return self._sock.recv(size)
timeout: timed out
Here is the code which is getting called:
def get_posted_file(request, form_field_name, tgt_folder, tgt_fname=None):
logger.debug('get_posted_file: %s' % request.headers['Last-Modified'])
lowerHeaderMap = {}
for key, value in request.headers.items():
lowerHeaderMap[key.lower()] = value
---> dataDict = TmpFieldStorage(fp=request.rfile, headers=lowerHeaderMap, environ={'REQUEST_METHOD':'POST'}
, keep_blank_values=True)
and:
class TmpFieldStorage(cgi.FieldStorage):
"""
Use a named temporary file to allow creation of hard link to final destination
"""
def make_file(self, binary=None):
tmp_folder = os.path.join(get_filer_root(cherrypy.request.login), 'sync_tmp')
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
return tempfile.NamedTemporaryFile(dir=tmp_folder)
environ={'REQUEST_METHOD':'POST'}
That seems a rather deficient environ. The CGI spec requires many more environment variables to be in there, some of which the cgi module is going to need.
In particular there is no CONTENT_LENGTH header. Without it, cgi is defaulting to reading the entire contents of the stream up until EOF. But since it is (probably) a network stream rather than a file there will be no EOF (or at least not one directly at the end of the submission), so the form reader will be sitting there waiting for more input that will never come. Timeout.