This script crashes due to a serial timout exception after an extended period of time. It's been making it to 5000 loops or so before it has the following error:
Traceback (most recent call last):
File "C:\Users\3d Exposure\Desktop\venderFix.py", line 69, in <module>
A.setHigh(12)
File "C:\Python27\lib\arduino\arduino.py", line 30, in setHigh
self.__sendData('1')
File "C:\Python27\lib\arduino\arduino.py", line 58, in __sendData
self.serial.write(str(serial_data))
File "C:\Python27\lib\site-packages\serial\serialwin32.py", line 270, in write
raise writeTimeoutError
SerialTimeoutException: Write timeout
Why would it time out after working perfectly for so long? How do I stop it doing so?
import json
import urllib
from pprint import pprint
import time
from arduino import Arduino
##############################################################
#Change to suit the Mars Bars, currently at 0.2 of a second #
vendtime = 0.2 #
#
#Delay Time Between each Search (never below 15 seconds) #
delayTime = 15 #
#This is the search term for the URL. (%23 = #) #
searchTerm = '%23happy' #
#
A = Arduino('COM3') #
A.output([13]) #Output on pin 12 #
##############################################################
#to collect the first tweet without vending
countTweet = 0
#To test Twitter for consistancy
tweet= 0
noTweet= 0
#the infinate loop
while True:
#j contains the JSON we load from the URL
j =json.loads(urllib.urlopen('http://search.twitter.com/search.json?q='+searchTerm+'&result_type=recent&rpp=1&filter:retweets').read())
#Debug JSON from twitter (for faults on the Twitter end or possible GET limit id below 15 seconds per request)
#pprint(j) #needed for debugging only
#find the text and the tweet id
if 'results' in j and j['results']:
text = j['results'][0]['text']
id = j['results'][0]['id']
#how many times the Json is complete
tweet+= 1
else:
#How many times the Json is incomplete (sometimes twitter malfunctions. About 0.1 in 100 are broken)
noTweet += 1
#print the text and id to the screen
pprint(text) #needed for debugging only
pprint(id) #needed for debugging only
#to get the existing tweet from before we power on, if the first ID has been stored already (count == 1)
if countTweet != 0: #if countTweet is not equal to 0 then it's not the first tweet
#pprint ("new loop") #needed for debugging only
#if lastID is not equal to ID
if lastID != id:
#Tell Arduino to Vend
#pin 12 HIGH
A.setHigh(13)
#Sleep for the time specified in vendtime
time.sleep(vendtime)
#pin 12 LOW
A.setLow(13)
#Display the tweet that triggered the vend
#pprint(text) #needed for debugging only
#pprint(id) #needed for debugging only
#Make lastID equal to ID so that next time we can compare it
lastID = id
#pprint ('lastID updated') #needed for debugging only
#if no new tweets, print
else: #needed for debugging only
pprint ('no new tweets') #needed for debugging only
#If it's the first loop, confirm by printing to the screen
else:
pprint("First loop complete")
pprint(text)
pprint(id)
lastID = id
pprint(lastID)
countTweet += 1 #Add 1 to countTweet
pprint ('Number of Tweets')
pprint (countTweet)
pprint('Working JSON')
pprint(tweet)
pprint('Broken JSON')
pprint(noTweet)
pprint('waiting')
time.sleep(delayTime)
For communication with the Arduino I am using the Python Arduino Prototyping API v2. It works well, this is the first and only issue in serial I have had with this script.
I imagine I need to set a timeout myself although I am not 100% sure how to ensure this would not have the same or a similar problem.
Related
I'm using this python script:
LINK
It's working great so far.
But now I would like to optimize it, because sometimes it's happening that the script will be executed 2-3 times within 10-20 minutes, because it will always run if there are 3 streams or more (e.g. a 4. stream will be started --> notification will be send again or also if a user decide to cancel this stream and watch another movie --> The script will run again!)
I have tried to use time.sleep but that is not working. I would like to have it like this:
If the program will be executed,it shouldn't be run again within the next 60 minutes.
What do I need to use / code here?
Thanks for help!
Thank you for the Tip, my code does look like this now (can you maybe check?):
** code section ** = my code which I have merged inside the existing script.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Description: Send a PlexPy notification when the total
# number of streams exceeds a threshold.
# Author: /u/SwiftPanda16
# Requires: requests
# PlexPy script trigger: Playback start
# PlexPy script arguments: {streams}
import requests
import sys
**import os
from datetime import datetime, timedelta**
### EDIT SETTINGS ###
PLEXPY_URL = 'xx.xxx.xx:8181'
PLEXPY_APIKEY = 'xxxxxxxxxxxxxxxxxx'
AGENT_ID = 14 # The PlexPy notifier agent id found here: https://github.com/JonnyWong16/plexpy/blob/master/API.md#notify
NOTIFY_SUBJECT = 'test' # The notification subject
NOTIFY_BODY = 'Test'
STREAM_THRESHOLD = 3
**### time management ###
one_hour_ago = datetime.now() - timedelta(minutes=60)
filetime = datetime.fromtimestamp(os.path.getctime("timestamp.txt"))
if filetime < one_hour_ago:**
### CODE BELOW ###
def main():
try:
streams = int(sys.argv[1])
except:
print("Invalid PlexPy script argument passed.")
return
if streams >= STREAM_THRESHOLD:
print("Number of streams exceeds {threshold}.".format(threshold=STREAM_THRESHOLD))
print("Sending PlexPy notification to agent ID: {agent_id}.".format(agent_id=AGENT_ID))
params = {'apikey': PLEXPY_APIKEY,
'cmd': 'notify',
'agent_id': AGENT_ID,
'subject': NOTIFY_SUBJECT,
'body': NOTIFY_BODY}
r = requests.post(PLEXPY_URL.rstrip('/') + '/api/v2', params=params)
**os.getcwd()
open ('timestamp.txt', 'w')**
else:
print("Number of streams below {threshold}.".format(threshold=STREAM_THRESHOLD))
print("No notification sent.")
return
if __name__ == "__main__":
main()
**else:
pass**
Have the script write a timestamp to an external file and check that file at startup.
Here is an example:
import time
def script_has_run_recently(seconds):
filename = 'last-run-time.txt'
current_time = int(time.time())
try:
with open(filename, 'rt') as f:
last_run = int(f.read().strip())
except (IOError, ValueError) as e:
last_run = 0
if last_run + seconds > current_time:
return True
else:
with open(filename, 'wt') as f:
f.write(str(current_time))
return False
def main():
print('running the main function.')
if __name__ == "__main__":
seconds = 3600 # one hour in seconds
if script_has_run_recently(seconds):
print('you need to wait before you can run this again')
else:
main()
I am adapting the Python script in this project (expanded below) to a point where it updates a JSON file's elements, instead of the InitialState streamer. However, with the multiple threads that are opened by the script, it is impossible to succinctly write the data from each thread back to the file as it would be read, changed, and written back to the file in all threads at the same time. As there can only be one file, no version will ever be accurate as the last thread would override all others.
Question: How can I update the states in the JSON based in each thread (simultaneously) without it affecting the other thread's writing operation or locking up the file?
JSON file contains the occupant's status that I would like to manipulate with the python script:
{
"janeHome": "false",
"johnHome": "false",
"jennyHome": "false",
"jamesHome": "false"
}
This is the python script:
import subprocess
import json
from time import sleep
from threading import Thread
# Edit these for how many people/devices you want to track
occupant = ["Jane","John","Jenny","James"]
# MAC addresses for our phones
address = ["11:22:33:44:55:66","77:88:99:00:11:22","33:44:55:66:77:88","99:00:11:22:33:44"]
# Sleep once right when this script is called to give the Pi enough time
# to connect to the network
sleep(60)
# Some arrays to help minimize streaming and account for devices
# disappearing from the network when asleep
firstRun = [1] * len(occupant)
presentSent = [0] * len(occupant)
notPresentSent = [0] * len(occupant)
counter = [0] * len(occupant)
# Function that checks for device presence
def whosHere(i):
# 30 second pause to allow main thread to finish arp-scan and populate output
sleep(30)
# Loop through checking for devices and counting if they're not present
while True:
# Exits thread if Keyboard Interrupt occurs
if stop == True:
print ("Exiting Thread")
exit()
else:
pass
# If a listed device address is present print
if address[i] in output:
print(occupant[i] + "'s device is connected")
if presentSent[i] == 0:
# TODO: UPDATE THIS OCCUPANT'S STATUS TO TRUE
# Reset counters so another stream isn't sent if the device
# is still present
firstRun[i] = 0
presentSent[i] = 1
notPresentSent[i] = 0
counter[i] = 0
sleep(900)
else:
# If a stream's already been sent, just wait for 15 minutes
counter[i] = 0
sleep(900)
# If a listed device address is not present, print and stream
else:
print(occupant[i] + "'s device is not connected")
# Only consider a device offline if it's counter has reached 30
# This is the same as 15 minutes passing
if counter[i] == 30 or firstRun[i] == 1:
firstRun[i] = 0
if notPresentSent[i] == 0:
# TODO: UPDATE THIS OCCUPANT'S STATUS TO FALSE
# Reset counters so another stream isn't sent if the device
# is still present
notPresentSent[i] = 1
presentSent[i] = 0
counter[i] = 0
else:
# If a stream's already been sent, wait 30 seconds
counter[i] = 0
sleep(30)
# Count how many 30 second intervals have happened since the device
# disappeared from the network
else:
counter[i] = counter[i] + 1
print(occupant[i] + "'s counter at " + str(counter[i]))
sleep(30)
# Main thread
try:
# Initialize a variable to trigger threads to exit when True
global stop
stop = False
# Start the thread(s)
# It will start as many threads as there are values in the occupant array
for i in range(len(occupant)):
t = Thread(target=whosHere, args=(i,))
t.start()
while True:
# Make output global so the threads can see it
global output
# Reads existing JSON file into buffer
with open("data.json", "r") as jsonFile:
data = json.load(jsonFile)
jsonFile.close()
# Assign list of devices on the network to "output"
output = subprocess.check_output("arp-scan -interface en1 --localnet -l", shell=True)
temp = data["janeHome"]
data["janeHome"] = # RETURNED STATE
data["johnHome"] = # RETURNED STATE
data["jennyHome"] = # RETURNED STATE
data["jamesHome"] = # RETURNED STATE
with open("data.json", "w") as jsonFile:
json.dump(data, jsonFile)
jsonFile.close()
# Wait 30 seconds between scans
sleep(30)
except KeyboardInterrupt:
# On a keyboard interrupt signal threads to exit
stop = True
exit()
I think we can all agree that the best idea would be to return the data from each thread to the main and write it to the file in one location but here is where it gets confusing, with each thread checking for a different person, how can the state be passed back to main for writing?
i'm trying to download twitter followers from a list of accounts. my function (that uses twython) works pretty well for short account lists but rise an error for longer lists. it is not a RateLimit problem since my function sleeps until the next time bin if the rate limit is hit.
the error is this
twythonerror: ('Connection aborted.', error(10054, ''))
others seem to have the same problem and the proposed solution is to make the function sleep between different REST API calls so i implemented the following code
del twapi
sleep(nap[afternoon])
afternoon = afternoon + 1
twapi = Twython(app_key=app_key, app_secret=app_secret,
oauth_token=oauth_token, oauth_token_secret=oauth_token_secret)
nap is a list of intervals in seconds and afternoon is an index.
despite this suggestion i still have the exact same problem. it seems that the sleep doesen't resolve the problem.
can anyone help me?
here is the whole finction
def download_follower(serie_lst):
"""Creates account named txt files containing followers ids. Uses for loop on accounts names list."""
nap = [1, 2, 4, 8, 16, 32, 64, 128]
afternoon = 0
for exemplar in serie_lst:
#username from serie_lst entries
account_name = exemplar
twapi = Twython(app_key=app_key, app_secret=app_secret,
oauth_token=oauth_token, oauth_token_secret=oauth_token_secret)
try:
#initializations
del twapi
if afternoon >= 7:
afternoon =0
sleep(nap[afternoon])
afternoon = afternoon + 1
twapi = Twython(app_key=app_key, app_secret=app_secret,
oauth_token=oauth_token, oauth_token_secret=oauth_token_secret)
next_cursor = -1
result = {}
result["screen_name"] = ""
result["followers"] = []
iteration = 0
file_name = ""
#user info
user = twapi.lookup_user(screen_name = account_name)
#store user name
result['screen_name'] = account_name
#loop until all cursored results are stored
while (next_cursor != 0):
sleep(random.randrange(start = 1, stop = 15, step = 1))
call_result = twapi.get_followers_ids(screen_name = account_name, cursor = next_cursor)
#loop over each entry of followers id and append each entry to results_follower
for i in call_result["ids"]:
result["followers"].append(i)
next_cursor = call_result["next_cursor"] #new next_cursor
iteration = iteration + 1
if (iteration > 13): #skip sleep if all cursored pages are processed
error_msg = localtime()
error_msg = "".join([str(error_msg.tm_mon), "/", str(error_msg.tm_mday), "/", str(error_msg.tm_year), " at ", str(error_msg.tm_hour), ":", str(error_msg.tm_min)])
error_msg ="".join(["Twitter API Request Rate Limit hit on ", error_msg, ", wait..."])
print(error_msg)
del error_msg
sleep(901) #15min + 1sec
iteration = 0
#output file
file_name = "".join([account_name, ".txt"])
#print output
out_file = open(file_name, "w") #open file "account_name.txt"
#out_file.write(str(result["followers"])) #standard format
for i in result["followers"]: #R friendly table format
out_file.write(str(i))
out_file.write("\n")
out_file.close()
except twython.TwythonRateLimitError:
#wait
error_msg = localtime()
error_msg = "".join([str(error_msg.tm_mon), "/", str(error_msg.tm_mday), "/", str(error_msg.tm_year), " at ", str(error_msg.tm_hour), ":", str(error_msg.tm_min)])
error_msg ="".join(["Twitter API Request Rate Limit hit on ", error_msg, ", wait..."])
print(error_msg)
del error_msg
del twapi
sleep(901) #15min + 1sec
#initializations
if afternoon >= 7:
afternoon =0
sleep(nap[afternoon])
afternoon = afternoon + 1
twapi = Twython(app_key=app_key, app_secret=app_secret,
oauth_token=oauth_token, oauth_token_secret=oauth_token_secret)
next_cursor = -1
result = {}
result["screen_name"] = ""
result["followers"] = []
iteration = 0
file_name = ""
#user info
user = twapi.lookup_user(screen_name = account_name)
#store user name
result['screen_name'] = account_name
#loop until all cursored results are stored
while (next_cursor != 0):
sleep(random.randrange(start = 1, stop = 15, step = 1))
call_result = twapi.get_followers_ids(screen_name = account_name, cursor = next_cursor)
#loop over each entry of followers id and append each entry to results_follower
for i in call_result["ids"]:
result["followers"].append(i)
next_cursor = call_result["next_cursor"] #new next_cursor
iteration = iteration + 1
if (iteration > 13): #skip sleep if all cursored pages are processed
error_msg = localtime()
error_msg = "".join([str(error_msg.tm_mon), "/", str(error_msg.tm_mday), "/", str(error_msg.tm_year), " at ", str(error_msg.tm_hour), ":", str(error_msg.tm_min)])
error_msg = "".join(["Twitter API Request Rate Limit hit on ", error_msg, ", wait..."])
print(error_msg)
del error_msg
sleep(901) #15min + 1sec
iteration = 0
#output file
file_name = "".join([account_name, ".txt"])
#print output
out_file = open(file_name, "w") #open file "account_name.txt"
#out_file.write(str(result["followers"])) #standard format
for i in result["followers"]: #R friendly table format
out_file.write(str(i))
out_file.write("\n")
out_file.close()
As discussed in the comments, there are a few issues with your code at present. You shouldn't need to delete your connection for it to function properly, and I think the issue comes because you initialise for a second time without having any catches for hitting your rate limit. Here is an example using Tweepy of how you can get the information you require:
import tweepy
from datetime import datetime
def download_followers(user, api):
all_followers = []
try:
for page in tweepy.Cursor(api.followers_ids, screen_name=user).pages():
all_followers.extend(map(str, page))
return all_followers
except tweepy.TweepError:
print('Could not access user {}. Skipping...'.format(user))
# Include your keys below:
consumer_key = 'YOUR_KEY'
consumer_secret = 'YOUR_KEY'
access_token = 'YOUR_KEY'
access_token_secret = 'YOUR_KEY'
# Set up tweepy API, with handling of rate limits
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
main_api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
# List of usernames to get followers for
lookup_users = ['asongtoruin', 'mbiella']
for username in lookup_users:
user_followers = download_followers(username, main_api)
if user_followers:
with open(username + '.txt', 'w') as outfile:
outfile.write('\n'.join(user_followers))
print('Finished outputting: {} at {}'.format(username, datetime.now().strftime('%Y/%m/%d %H:%M:%S')))
Tweepy is clever enough to know when it has hit its rate limit when we use wait_on_rate_limit=True, and checks how long it needs to sleep for before it can start again. By using wait_on_rate_limit_notify=True, we allow it to paste out how long it will be waiting until it can next get a page of followers (through this ID-based method, it seems as though there are 5000 IDs per page).
We additionally catch a TweepError exception - this can occur if the username provided relates to a protected account for which our authenticated user does not have permission to view. In this case, we simply skip the user to allow other information to be downloaded, but print out a warning that the user could not be accessed.
Running this saves a text file of follower ids for any user it can access. For me this prints the following:
Rate limit reached. Sleeping for: 593
Finished outputting: asongtoruin at 2017/02/22 11:43:12
Could not access user mbiella. Skipping...
With the follower IDs of asongtoruin (aka me) saved as asongtoruin.txt
There is one possible issue, in that our pages of followers start from the newest first. This could (though I don't understand the API well enough to say with certainty) result in issues with our output dataset if new users are added between our calls, as we may both miss these users and end up with duplicates in our dataset. If duplicates become an issue, you could change return all_followers to return set(all_followers)
I have the following Python script:
#!/usr/bin/env python
# coding: utf-8
import time
import serial
import datetime
from datetime import timedelta
import os.path
PATH = '/home/pi/test/'
Y = datetime.datetime.now().strftime('%Y')[3]
def get_current_time():
return datetime.datetime.now()
def get_current_time_f1():
return datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
def get_current_time_f2():
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
def compare_time():
current_time = get_current_time()
if current_time.minute == 59 and current_time.second >= 45:
return "new"
def file_date():
if compare_time() == "new":
plusonehour = datetime.datetime.now() + timedelta(hours=1)
return plusonehour.strftime('Z'+Y+'%m%d%H')
else:
return datetime.datetime.now().strftime('Z'+Y+'%m%d%H')
def createCeilFile():
filename = os.path.join(PATH, file_date()+".dat")
fid = open(filename, "w")
fid.writelines(["-Ceilometer Logfile","\n","-File created: "+get_current_time_f1(),"\n"])
return fid
# open the first file at program start
fid = createCeilFile()
# serial port settings
ser=serial.Serial(
port='/dev/ttyUSB0',
baudrate = 19200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
)
counter=0
# read first byte, grab date string, read rest of string, print both in file
while 1:
tdata = ser.read()
time.sleep(3)
data_left = ser.inWaiting()
tdata += ser.read(data_left)
fid.writelines(["-"+get_current_time_f2(),"\n",tdata,"\n"])
#should have ~10 secs before next message needs to come in
#if next string will go into the next hour
if compare_time() == "new":
# close old file
fid.writelines(["File closed: "+get_current_time_f2(),"\n"])
fid.close()
# open new file
fid = createCeilFile()
# then it goes back to 'tdata = ser.read()' and waits again.
It works fine and stores all the data I need in the correct format and so on.
A data message from the device comes trough every 15 seconds. The python script runs for an infinite time and reads those messages. At the beginning of each message the script adds a time, when the message was written to the file and therefor received. And the time is the problem with this script. I have a time drift of about 3 to 4 seconds in 24 hours. Weird about that is, that the time drifts backwards. So if I start with data messages coming in at 11, 26, 41 and 56 seconds during the minute, after 24 hours the messages seem to come in at 8, 23, 38 and 53 seconds in the minute.
Has anyone an explanation for that or maybe a way to compensate it? I thought about restarting the program every hour, after it saved the hourly file. Maybe that helps resetting the weird time drift?
The following code communicates with multiple pH modules, via the serial port. The pH modules are selected via a multiplexer via the i2c bus using an 8574N chip. When the raspberry pi has booted up the program works correctly in the terminal however, if the program was to be stop and be restarted it fails to initialize correctly. Also, this program does not work correctly in the python IDLE (selecting random devices on the multiplexer). It seems like the i2c or serial comms on the pi is not initializing correctly. For example, in IDLE when selecting a module, It brings up the incorrect module to the one that you have chosen spits out incorrect data. However, this all works fine in the terminal.
Does any one have any ideas??
Any help would be much appreciated!
from smbus import SMBus
# from itertools import cycle
import time
import serial
usbport = '/dev/ttyAMA0'
ser = serial.Serial(usbport, 9600, timeout = 2)#
line = ""
data = ""
bus = SMBus(1) # Port 1 used on REV2
print "Electronics Workshop PH Reader."
print "=========================="
global count
count = 0 #init count to 0
probevalue = ""
def inputsel(count):
bus.write_byte(0x38, 0x00)
# count = input(" Select ph unit '0 to 23'") # count now incremented during program
count = input(" Select ph unit '0 to 23'")
if (count< 16):
data_sel = count
data_sel = data_sel | 32 # or the 2 numbers together
else:
data_sel = count - 16
data_sel = data_sel | 16 # or the 2 numbers together
bus.write_byte(0x38, data_sel) # send "count"Varable out to I2c Bus
print str(data_sel)
time.sleep (1)
data_sel = 0
print "Reading Channel:" + str(count)
def write_serial(serial_data):
global ser
data = ""
line = ""
status = ""
ser.write(serial_data) # set ph unit to take one reading at a time
ser.write("\r") # set ph unit to take one reading at a time
time.sleep (1)
ser.write(serial_data) # set ph unit to take one reading at a time
ser.write("\r")
time.sleep (1)
while status != "done":
data = ser.read(1) # gets data from PH module '
if(data == "\r"): # check each bite of data to see if its a carriage return expecting "XX.XXr"
#carriage return sent diplay message and data
print "Received from sensor:" + line
status = "done"
probevalue = line
line =" "
ser.flushInput()
ser.flushOutput()
else:
# all 5 bytes of data have not been received
line = line + data # add one to the varable line
#if(data == " "):
# write_serial()
return probevalue
def main():
global serial_data
serial_data = " "
global count
probevalue = " "
count = 0
bus.write_byte(0x38, 0)
while 1:
inputsel(count) #select the input based off count variable
loop = range(0,7)
for loopcount in loop:
if ((loopcount == 0) | (loopcount == 1)): #set command to #?
serial_data = "#?\r" #set command to request id
if (loopcount == 0): #if buffer needs clearing
print "Clearing Buffer....."
else: print "Requesting ID....."
probevalue = write_serial(serial_data) #call write_serial with #? command put value into probevalue
elif (loopcount >= 2): #set r command once buffer clear and ID obtained
serial_data = "r\r" #set command to read value
if (loopcount == 2): print "Reading pH:" #output reaidng pH when loopcounter at 2
probevalue = write_serial(serial_data) #call write_serial with r command put value into probevalue
print "=========================="