I need my bot to monitor my raspberry cpu temperature. It checks it every minute and then send an alert if > a threshold. When a message is sent I need it to not send it again for 10 minutes. I've done it but then I get a timeout error when sending the same message 10 minutes after. Can anybody help me? I did not find any help on telepot giyhub page.
This is my code
bot = telepot.Bot(TOKEN)
bot.message_loop(handle)
while 1:
if ((get_cpu_temperature() > 30.0) and alarm()):
data = "Temperature: " + str(get_cpu_temperature()) + " 'C"
bot.sendMessage(users[0],data)
time.sleep(60)
The alarm function just checks if 10 mins are passed.
This is the error:
Traceback (most recent call last):
File "temp_disk_check_live.py", line 74, in <module>
bot.sendMessage(users[0],data)
File "/usr/local/lib/python2.7/dist-packages/telepot/__init__.py", line 456, in sendMessage
return self._api_request('sendMessage', _rectify(p))
File "/usr/local/lib/python2.7/dist-packages/telepot/__init__.py", line 434, in _api_request
return api.request((self._token, method, params, files), **kwargs)
File "/usr/local/lib/python2.7/dist-packages/telepot/api.py", line 130, in request
r = fn(*args, **kwargs) # `fn` must be thread-safe
File "/home/pi/.local/lib/python2.7/site-packages/urllib3/request.py", line 148, in request_encode_body
return self.urlopen(method, url, **extra_kw)
File "/home/pi/.local/lib/python2.7/site-packages/urllib3/poolmanager.py", line 321, in urlopen
response = conn.urlopen(method, u.request_uri, **kw)
File "/home/pi/.local/lib/python2.7/site-packages/urllib3/connectionpool.py", line 639, in urlopen
_stacktrace=sys.exc_info()[2])
File "/home/pi/.local/lib/python2.7/site-packages/urllib3/util/retry.py", line 357, in increment
raise six.reraise(type(error), error, _stacktrace)
File "/home/pi/.local/lib/python2.7/site-packages/urllib3/connectionpool.py", line 601, in urlopen
chunked=chunked)
File "/home/pi/.local/lib/python2.7/site-packages/urllib3/connectionpool.py", line 389, in _make_request
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
File "/home/pi/.local/lib/python2.7/site-packages/urllib3/connectionpool.py", line 320, in _raise_timeout
raise ReadTimeoutError(self, url, "Read timed out. (read timeout=%s)" % timeout_value)
urllib3.exceptions.ReadTimeoutError: HTTPSConnectionPool(host='api.telegram.org', port=443): Read timed out. (read timeout=30)
Exception in thread Thread-1 (most likely raised during interpreter shutdown):
Traceback (most recent call last):
File "/usr/lib/python2.7/threading.py", line 801, in __bootstrap_inner
File "/usr/local/lib/python2.7/dist-packages/telepot/__init__.py", line 391, in run
File "/usr/local/lib/python2.7/dist-packages/telepot/__init__.py", line 310, in k
File "/usr/lib/python2.7/threading.py", line 168, in acquire
<type 'exceptions.TypeError'>: 'NoneType' object is not callable
the handle function is the standard one from telepot examples.
Thanks a lot
You could create a new thread and start a timer like..
def hello():
print("hello, world")
t = Timer(30.0, hello)
t.start() # after 30 seconds, "hello, world" will be printed
So in your code;
def send_message(user,message):
bot.sendMessage(user,message)
t = Timer(600, send_message("Temperature...", user[0])
if cpu_temp > 30: t.start()
How about initialize Bot whenever you really need to send a message?
while 1:
if ((get_cpu_temperature() > 30.0) and alarm()):
data = "Temperature: " + str(get_cpu_temperature()) + " 'C"
telepot.Bot(TOKEN).sendMessage(users[0],data)
time.sleep(60*10) # 10 min
Related
I am trying to write a streaming implementation of dumping a table from psql into a pre-signed URL on S3. Unfortunately, it seems to error out at a seemingly random time in the upload. I have tried many combinations of opening/closing the file descriptors at different times. I for the life of me cannot figure out why this is occurring.
The strangest thing is when I mock the requests library and analyze the sent data, it works as intended. The socket is raising an EPIPE error at a certain amount through the stream
from psycopg2 import connect
import threading
import requests
import requests_mock
import traceback
from base64 import b64decode
from boto3 import session
r_fd, w_fd = os.pipe()
connection = connect(host='host', database='db',
user='user', password='pw')
cursor = connection.cursor()
b3_session = session.Session(profile_name='profile', region_name='us-east-1')
url = b3_session.client('s3').generate_presigned_url(
ClientMethod='put_object',
Params={'Bucket': 'bucket', 'Key': 'test_streaming_upload.txt'},
ExpiresIn=3600)
rd = os.fdopen(r_fd, 'rb')
wd = os.fdopen(w_fd, 'wb')
def stream_data():
print('Starting stream')
with os.fdopen(r_fd, 'rb') as rd:
requests.put(url, data=rd, headers={'Content-type': 'application/octet-stream'})
print('Ending stream')
to_thread = threading.Thread(target=stream_data)
to_thread.start()
print('Starting copy')
with os.fdopen(w_fd, 'wb') as wd:
cursor.copy_expert('COPY table TO STDOUT WITH CSV HEADER', wd)
print('Ending copy')
to_thread.join()
The output is always the same:
Starting stream
Starting copy
Exception in thread Thread-1:
Traceback (most recent call last):
File "/venv/lib/python3.9/site-packages/urllib3/contrib/pyopenssl.py", line 342, in _send_until_done
return self.connection.send(data)
File "/venv/lib/python3.9/site-packages/OpenSSL/SSL.py", line 1718, in send
self._raise_ssl_error(self._ssl, result)
File "/venv/lib/python3.9/site-packages/OpenSSL/SSL.py", line 1624, in _raise_ssl_error
raise SysCallError(errno, errorcode.get(errno))
OpenSSL.SSL.SysCallError: (32, 'EPIPE')
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/venv/lib/python3.9/site-packages/requests/adapters.py", line 473, in send
low_conn.send(b'\r\n')
File "/Users/me/.pyenv/versions/3.9.7/lib/python3.9/http/client.py", line 995, in send
self.sock.sendall(data)
File "/venv/lib/python3.9/site-packages/urllib3/contrib/pyopenssl.py", line 354, in sendall
sent = self._send_until_done(
File "/venv/lib/python3.9/site-packages/urllib3/contrib/pyopenssl.py", line 349, in _send_until_done
raise SocketError(str(e))
OSError: (32, 'EPIPE')
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/Users/me/.pyenv/versions/3.9.7/lib/python3.9/threading.py", line 973, in _bootstrap_inner
self.run()
File "/Users/me/.pyenv/versions/3.9.7/lib/python3.9/threading.py", line 910, in run
self._target(*self._args, **self._kwargs)
File "/Users/me/Library/Application Support/JetBrains/PyCharm2021.2/scratches/scratch_60.py", line 37, in stream_data
requests.put(url, data=rd, headers={'Content-type': 'application/octet-stream'})
File "/venv/lib/python3.9/site-packages/requests/api.py", line 131, in put
return request('put', url, data=data, **kwargs)
File "/venv/lib/python3.9/site-packages/requests/api.py", line 60, in request
return session.request(method=method, url=url, **kwargs)
File "/venv/lib/python3.9/site-packages/requests/sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "/venv/lib/python3.9/site-packages/requests/sessions.py", line 646, in send
r = adapter.send(request, **kwargs)
File "/venv/lib/python3.9/site-packages/requests/adapters.py", line 498, in send
raise ConnectionError(err, request=request)
requests.exceptions.ConnectionError: (32, 'EPIPE')
Am I missing something obvious? Is this a memory error? I appreciate any insight I can get because this is killing me. I can verify that the socket is being written to anywhere from 1.5 to 2.5k times before this error occurs.
I am using Python 3.6.2, on Fedora 26 Workstation.
Below is some scrapbook code which demonstrates my issue:
EDIT: added Sam Hartman's suggestion to code.
import asyncio, json
from autobahn.asyncio.websocket import WebSocketClientProtocol, WebSocketClientFactory
class MyClientProtocol(WebSocketClientProtocol):
def onConnect(self, response):
print(response.peer)
def onOpen(self):
print("open")
self.sendMessage(json.dumps({'command': 'subscribe', 'channel': "1010"}).encode("utf8"))
def onMessage(self, payload, isBinary):
print("message")
print(json.loads(payload))
factory1 = WebSocketClientFactory("wss://api2.poloniex.com:443")
factory1.protocol = MyClientProtocol
loop1 = asyncio.get_event_loop()
loop1.run_until_complete(loop1.create_connection(factory1, "api2.poloniex.com", 443, ssl=True))
try:
loop1.run_forever()
except KeyboardInterrupt:
pass
loop1.close()
asyncio.set_event_loop(asyncio.new_event_loop())
factory2 = WebSocketClientFactory("wss://api2.poloniex.com:443")
factory2.protocol = MyClientProtocol
loop2 = asyncio.get_event_loop()
loop2.run_until_complete(loop2.create_connection(factory2, "api2.poloniex.com", 443, ssl=True))
try:
loop2.run_forever()
except KeyboardInterrupt:
pass
loop2.close()
After having closed an initial asyncio event loop, creating another and setting it as the global event loop, attempting to use the new event loop yields the following errors:
Fatal write error on socket transport
protocol: <asyncio.sslproto.SSLProtocol object at 0x7f8a84ed4748>
transport: <_SelectorSocketTransport fd=6>
Traceback (most recent call last):
File "/usr/lib64/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
Fatal error on SSL transport
protocol: <asyncio.sslproto.SSLProtocol object at 0x7f8a84ed4748>
transport: <_SelectorSocketTransport closing fd=6>
Traceback (most recent call last):
File "/usr/lib64/python3.6/asyncio/selector_events.py", line 762, in write
n = self._sock.send(data)
OSError: [Errno 9] Bad file descriptor
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib64/python3.6/asyncio/sslproto.py", line 648, in _process_write_backlog
self._transport.write(chunk)
File "/usr/lib64/python3.6/asyncio/selector_events.py", line 766, in write
self._fatal_error(exc, 'Fatal write error on socket transport')
File "/usr/lib64/python3.6/asyncio/selector_events.py", line 646, in _fatal_error
self._force_close(exc)
File "/usr/lib64/python3.6/asyncio/selector_events.py", line 658, in _force_close
self._loop.call_soon(self._call_connection_lost, exc)
File "/usr/lib64/python3.6/asyncio/base_events.py", line 574, in call_soon
self._check_closed()
File "/usr/lib64/python3.6/asyncio/base_events.py", line 357, in _check_closed
raise RuntimeError('Event loop is closed')
RuntimeError: Event loop is closed
Exception in callback _SelectorSocketTransport._read_ready()
handle: <Handle _SelectorSocketTransport._read_ready()>
Traceback (most recent call last):
File "/usr/lib/python3.6/site-packages/txaio/_common.py", line 63, in call_later
self._buckets[real_time][1].append(call)
KeyError: 412835000
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/lib64/python3.6/asyncio/events.py", line 127, in _run
self._callback(*self._args)
File "/usr/lib64/python3.6/asyncio/selector_events.py", line 731, in _read_ready
self._protocol.data_received(data)
File "/usr/lib64/python3.6/asyncio/sslproto.py", line 503, in data_received
ssldata, appdata = self._sslpipe.feed_ssldata(data)
File "/usr/lib64/python3.6/asyncio/sslproto.py", line 204, in feed_ssldata
self._handshake_cb(None)
File "/usr/lib64/python3.6/asyncio/sslproto.py", line 619, in _on_handshake_complete
self._app_protocol.connection_made(self._app_transport)
File "/usr/lib/python3.6/site-packages/autobahn/asyncio/websocket.py", line 97, in connection_made
self._connectionMade()
File "/usr/lib/python3.6/site-packages/autobahn/websocket/protocol.py", line 3340, in _connectionMade
WebSocketProtocol._connectionMade(self)
File "/usr/lib/python3.6/site-packages/autobahn/websocket/protocol.py", line 1055, in _connectionMade
self.onOpenHandshakeTimeout,
File "/usr/lib/python3.6/site-packages/txaio/_common.py", line 72, in call_later
self._notify_bucket, real_time,
File "/usr/lib/python3.6/site-packages/txaio/aio.py", line 382, in call_later
return self._config.loop.call_later(delay, real_call)
File "/usr/lib64/python3.6/asyncio/base_events.py", line 543, in call_later
timer = self.call_at(self.time() + delay, callback, *args)
File "/usr/lib64/python3.6/asyncio/base_events.py", line 553, in call_at
self._check_closed()
File "/usr/lib64/python3.6/asyncio/base_events.py", line 357, in _check_closed
raise RuntimeError('Event loop is closed')
RuntimeError: Event loop is closed
It seems reasonable that one might need to reopen an event loop after having closed an earlier one. Indeed this question even shows how: Asyncio Event Loop is Closed
The code below should achieve this:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
so I am clearly doing something wrong. Can somebody see something missing?
I have fairly high confidence that your factory object is maintaining a reference to the old event loop presumably that it gets from asyncio.get_event_loop. Asyncio consumers are bad about getting hidden references to loops.
My recommendation is to reconstruct the web socket factory after closing the loop
I have a bit of code that uses newspaper to go take a look at various media outlets and download articles from them. This has been working fine for a long time but has recently started acting up. I can see what the problem is but as I'm new to Python I'm not sure about the best way to address it. Basically (I think) I need to make a modification to keep the occasional malformed web address from crashing the script entirely and instead allow it to dispense with that web address and move on to the others.
The origins of the error is when I attempt to download an article using:
article.download()
Some articles (they change every day obviously) will throw the following error but the script continues to run:
Traceback (most recent call last):
File "C:\Anaconda3\lib\encodings\idna.py", line 167, in encode
raise UnicodeError("label too long")
UnicodeError: label too long
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "C:\Anaconda3\lib\site-packages\newspaper\mthreading.py", line 38, in run
func(*args, **kargs)
File "C:\Anaconda3\lib\site-packages\newspaper\source.py", line 350, in download_articles
html = network.get_html(url, config=self.config)
File "C:\Anaconda3\lib\site-packages\newspaper\network.py", line 39, in get_html return get_html_2XX_only(url, config, response)
File "C:\Anaconda3\lib\site-packages\newspaper\network.py", line 60, in get_html_2XX_only url=url, **get_request_kwargs(timeout, useragent))
File "C:\Anaconda3\lib\site-packages\requests\api.py", line 72, in get return request('get', url, params=params, **kwargs)
File "C:\Anaconda3\lib\site-packages\requests\api.py", line 58, in request return session.request(method=method, url=url, **kwargs)
File "C:\Anaconda3\lib\site-packages\requests\sessions.py", line 502, in request resp = self.send(prep, **send_kwargs)
File "C:\Anaconda3\lib\site-packages\requests\sessions.py", line 612, in send r = adapter.send(request, **kwargs)
File "C:\Anaconda3\lib\site-packages\requests\adapters.py", line 440, in send timeout=timeout
File "C:\Anaconda3\lib\site-packages\urllib3\connectionpool.py", line 600, in urlopen chunked=chunked)
File "C:\Anaconda3\lib\site-packages\urllib3\connectionpool.py", line 356, in _make_request conn.request(method, url, **httplib_request_kw)
File "C:\Anaconda3\lib\http\client.py", line 1107, in request self._send_request(method, url, body, headers)
File "C:\Anaconda3\lib\http\client.py", line 1152, in _send_request self.endheaders(body)
File "C:\Anaconda3\lib\http\client.py", line 1103, in endheaders self._send_output(message_body)
File "C:\Anaconda3\lib\http\client.py", line 934, in _send_output self.send(msg)
File "C:\Anaconda3\lib\http\client.py", line 877, in send self.connect()
File "C:\Anaconda3\lib\site-packages\urllib3\connection.py", line 166, in connect conn = self._new_conn()
File "C:\Anaconda3\lib\site-packages\urllib3\connection.py", line 141, in _new_conn (self.host, self.port), self.timeout, **extra_kw)
File "C:\Anaconda3\lib\site-packages\urllib3\util\connection.py", line 60, in create_connection for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
File "C:\Anaconda3\lib\socket.py", line 733, in getaddrinfo for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
UnicodeError: encoding with 'idna' codec failed (UnicodeError: label too long)
The next bit is supposed to then parse and run natural language processing on each article and write certain elements to a dataframe so I then have:
for paper in papers:
for article in paper.articles:
article.parse()
print(article.title)
article.nlp()
if article.publish_date is None:
d = datetime.now().date()
else:
d = article.publish_date.date()
stories.loc[i] = [paper.brand, d, datetime.now().date(), article.title, article.summary, article.keywords, article.url]
i += 1
(This might be a little sloppy too but that's a problem for another day)
This runs fine until it gets to one of those URLs with the error and then tosses an article exception and the script crashes:
C:\Anaconda3\lib\site-packages\PIL\TiffImagePlugin.py:709: UserWarning: Corrupt EXIF data. Expecting to read 2 bytes but only got 0.
warnings.warn(str(msg))
ArticleException Traceback (most recent call last) <ipython-input-17-2106485c4bbb> in <module>()
4 for paper in papers:
5 for article in paper.articles:
----> 6 article.parse()
7 print(article.title)
8 article.nlp()
C:\Anaconda3\lib\site-packages\newspaper\article.py in parse(self)
183
184 def parse(self):
--> 185 self.throw_if_not_downloaded_verbose()
186
187 self.doc = self.config.get_parser().fromstring(self.html)
C:\Anaconda3\lib\site-packages\newspaper\article.py in throw_if_not_downloaded_verbose(self)
519 if self.download_state == ArticleDownloadState.NOT_STARTED:
520 print('You must `download()` an article first!')
--> 521 raise ArticleException()
522 elif self.download_state == ArticleDownloadState.FAILED_RESPONSE:
523 print('Article `download()` failed with %s on URL %s' %
ArticleException:
So what's the best way to keep this from terminating my script? Should I address it in the download stage where I'm getting the unicode error or at the parse stage by telling it to overlook those bad addresses? And how would I go about implementing that correction?
Really appreciate any advice.
I had the same issue and although in general using except: pass is not recommended, the following worked for me:
try:
a.parse()
file.write( a.title+'\n')
except :
pass
What I've found is that Navid is correct for this exact problem.
However .parse() is only one of the functions that can trip you up. I wrap all the calls inside of the try / accept structure like this:
word_list = []
for words in google_news.articles:
try:
words.download()
words.parse()
words.nlp()
except:
pass
word_list.append(words.keywords)
You can try catching the ArticleException. Don't forget to import the newspaper module.
try:
article.download()
article.parse()
except newspaper.article.ArticleException:
# do something
I tried to do a simple query to my LAB instrument by:
>>> import visa
>>> rm = visa.ResourceManager()
>>> viavi = rm.open_resource("TCPIP0::10.0.2.76::5001::SOCKET")
>>> print(viavi.query("*IDN?"))
The result was:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Program Files\Python35\lib\site-packages\pyvisa\resources\messagebase
d.py", line 407, in query
return self.read()
File "C:\Program Files\Python35\lib\site-packages\pyvisa\resources\messagebase
d.py", line 332, in read
message = self.read_raw().decode(enco)
File "C:\Program Files\Python35\lib\site-packages\pyvisa\resources\messagebase
d.py", line 306, in read_raw
chunk, status = self.visalib.read(self.session, size)
File "C:\Program Files\Python35\lib\site-packages\pyvisa\ctwrapper\functions.p
y", line 1582, in read
ret = library.viRead(session, buffer, count, byref(return_count))
File "C:\Program Files\Python35\lib\site-packages\pyvisa\ctwrapper\highlevel.p
y", line 188, in _return_handler
raise errors.VisaIOError(ret_value)
pyvisa.errors.VisaIOError: VI_ERROR_TMO (-1073807339): Timeout expired before op
eration completed.
According to what I learned till now (from the experience of others). This timeout error is somehow related to line termination ("\n"). How can I solve this problem ?
I found out that it was all related to the read_termination. My LAB instrument simply terminated its response by a '\n'.
While my script was looking for a '\r' all that time.
I wrote a script which fetches publicly available data and puts into SQLite DB using peewee. The data is available like this site.com/data/1, site.com/data/2 ... site.com/data/N. So I use a for loop and change the value of N each time.
Problem is after a while script stops working. I mean, it just stops responding (not exits). I have print statement in loop and it stops printing next numbers.
For a smaller loop range it works perfectly and for larger it stops working. I have found that upto range of 80, it just works fine.
When I force close the script, I get following on terminal. It basically something related to requests and http connections.
Traceback (most recent call last):
File "/Users/avi/Documents/code/my-app/venv/lib/python3.4/site-packages/requests/packages/urllib3/connectionpool.py", line 372, in _make_request
httplib_response = conn.getresponse(buffering=True)
TypeError: getresponse() got an unexpected keyword argument 'buffering'
Full traceback:
Traceback (most recent call last):
File "/Users/avi/Documents/code/my-app/venv/lib/python3.4/site-packages/requests/packages/urllib3/connectionpool.py", line 372, in _make_request
httplib_response = conn.getresponse(buffering=True)
TypeError: getresponse() got an unexpected keyword argument 'buffering'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "script.py", line 71, in <module>
main()
File "script.py", line 56, in main
user_data = get_script_user_data(i)
File "script.py", line 42, in get_script_user_data
r = requests.get(users_data_public_api.format(user_id))
File "/Users/avi/Documents/code/my-app/venv/lib/python3.4/site-packages/requests/api.py", line 69, in get
return request('get', url, params=params, **kwargs)
File "/Users/avi/Documents/code/my-app/venv/lib/python3.4/site-packages/requests/api.py", line 50, in request
response = session.request(method=method, url=url, **kwargs)
File "/Users/avi/Documents/code/my-app/venv/lib/python3.4/site-packages/requests/sessions.py", line 465, in request
resp = self.send(prep, **send_kwargs)
File "/Users/avi/Documents/code/my-app/venv/lib/python3.4/site-packages/requests/sessions.py", line 573, in send
r = adapter.send(request, **kwargs)
File "/Users/avi/Documents/code/my-app/venv/lib/python3.4/site-packages/requests/adapters.py", line 370, in send
timeout=timeout
File "/Users/avi/Documents/code/my-app/venv/lib/python3.4/site-packages/requests/packages/urllib3/connectionpool.py", line 544, in urlopen
body=body, headers=headers)
File "/Users/avi/Documents/code/my-app/venv/lib/python3.4/site-packages/requests/packages/urllib3/connectionpool.py", line 374, in _make_request
httplib_response = conn.getresponse()
File "/usr/local/Cellar/python3/3.4.3/Frameworks/Python.framework/Versions/3.4/lib/python3.4/http/client.py", line 1171, in getresponse
response.begin()
File "/usr/local/Cellar/python3/3.4.3/Frameworks/Python.framework/Versions/3.4/lib/python3.4/http/client.py", line 351, in begin
version, status, reason = self._read_status()
File "/usr/local/Cellar/python3/3.4.3/Frameworks/Python.framework/Versions/3.4/lib/python3.4/http/client.py", line 313, in _read_status
line = str(self.fp.readline(_MAXLINE + 1), "iso-8859-1")
File "/usr/local/Cellar/python3/3.4.3/Frameworks/Python.framework/Versions/3.4/lib/python3.4/socket.py", line 374, in readinto
return self._sock.recv_into(b)
KeyboardInterrupt
I thought I am making requests too fast and request is not able to buffer it (just my assumptions). So I added sleep statement for half second. And problem still existed. Then I changed sleep to 1.5 second, worked till loop range was upto 400. But on next iteration it stopped at within range of 50-60.
Here's the full code:
import sqlite3
import datetime
import time
from datetime import date
import requests
from peewee import *
users_data_public_api = "http://api.some-site.com/user/{0}"
db = SqliteDatabase('opendata_users.db')
class OpendataUser(Model):
user_id = IntegerField()
fullname = CharField()
email = CharField()
sex = CharField(null = True)
dob = DateField(null = True)
class Meta:
database = db
def initialize_db():
db.connect()
#db.create_tables([OpendataUser])
def deinit():
db.close()
def get_opendata_user_data(user_id):
""" Returns Opendata user data only if he has all the fields required """
r = requests.get(users_data_public_api.format(user_id))
if r.status_code == requests.codes.ok:
user_data = r.json()['users'][0]
if user_data['email'] != None and 'deleted' not in user_data['email']:
if user_data['fullname'] != None and user_data['dob'] != None:
try:
user_data['dob'] = datetime.datetime.strptime(user_data['dob'], "%d/%m/%Y").date()
except ValueError:
user_data['dob'] = None
return user_data
def main():
for i in range(450, 600):
user_data = get_opendata_user_data(i)
if user_data:
print(i)
od_user = OpendataUser(user_id=user_data['user_id'],
fullname=user_data['fullname'],
email=user_data['email'],
sex=user_data['sex'],
dob=user_data['dob'])
od_user.save()
time.sleep(1.5)
if __name__ == '__main__':
initialize_db()
main()
deinit()
So whats the reason? appreciate any help. (code is not open source, so I have modified here and there)