I am getting a 'TwythonRateLimitError' and want to be sure that I don't screw up my account. I am new to working with the Twitter API. How can I check to make sure that I am not going over my query limit? I read that it is 150 queries/hour... What happens if I do? Am I at a risk of this in my code or is it only for particular commands?
I am not building an app, I am just trying to get a specific sample for twitter (random set of users with similar following bases (7500 to 10000 followers). My code so far is below. I will be saving the successful hits to a file but I am waiting to be sure that is necessary.
from twython import Twython, TwythonError, TwythonRateLimitError
from random import randint
APP_KEY = 'redacted'
APP_SECRET = 'redacted'
ACCESS_TOKEN = 'redacted'
twitter = Twython(APP_KEY, APP_SECRET, oauth_version=2)
ACCESS_TOKEN = twitter.obtain_access_token()
twitter = Twython(APP_KEY,access_token=ACCESS_TOKEN)
print "hello twitterQuery\n"
count = 0
step = 0
isError = 0
try:
#new account i made today to set upper bound on userID
maxID = twitter.show_user(screen_name="query_test")['id']
except TwythonRateLimitError:
isError = 1
ids = [0,0,0,0,0,0,0,0,0,0]
if isError == 0 and step <= 150:
while count < 10:
step = step +1
randomID = randint(1,maxID)
isMissing = 0
print str(step) + " " + str(randomID)
try:
randomUserData = twitter.show_user(user_id=randomID)
except TwythonError:
isMissing = 1;
if isMissing == 0:
followers = randomUserData['followers_count']
if followers >= 7500 and followers <= 10000:
print "ID: " + str(randomID) +", followers: "+ str(followers)
ids[count] = randomID
count = count+1
print "\ndone"
for each id in ids:
print id
to see your current rate limit status, pass in your app token and send a GET request to
https://api.twitter.com/1.1/account/rate_limit_status.json
and query the response.
See this page for further context
Related
I'm trying to get all the tracks from 2 playlists into a CSV file. However, in both playlists, even though I increase the offset parameter by 100 in each query, the first 100 songs of both playlists are returned. So the page is never changed. What could be the problem?
import spotipy, json, csv
from spotipy.oauth2 import SpotifyClientCredentials
client_credentials_manager = SpotifyClientCredentials(client_id=client_id, client_secret=client_secret)
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
data_file = open('data.csv', 'w')
writer = csv.writer(data_file)
writer.writerow(['track_num', 'track_id', 'track_name', 'first_artist'] + ['liked'])
playlist_ids = [
'xxxxxxxxxxxxxxxxxxxxxxx', # playlist 1
'yyyyyyyyyyyyyyyyyyyyyyy' # playlist 2
]
for playlist_id in playlist_ids:
offset_n = 0
total = 100
while offset_n < total:
tracks_response = sp.playlist_tracks(playlist_id, offset=offset_n)
tracks_json = json.dumps(tracks_response)
tracks_data = json.loads(tracks_json)
if offset_n == 0:
total = tracks_data['tracks']['total']
for track in tracks_data['tracks']['items']:
track_id = track['track']['id']
track_name = track['track']['name']
first_artist = track['track']['artists'][0]['name']
if playlist_id == playlist_ids[0]:
writer.writerow([row_num, track_id, track_name, first_artist] + [1])
else:
writer.writerow([row_num, track_id, track_name, first_artist] + [0])
offset_n += 100
data_file.close()
The playlist_tracks method returns a paginated result with details of the tracks of a playlist.
So you need to iterate over all pages to get the full data.
You can use this example as a reference:
def get_all_tracks_from_playlist(playlist_id)
tracks_response = sp.playlist_tracks(playlist_id)
tracks = tracks_response["items"]
while tracks_response["next"]:
tracks_response = sp.next(tracks_response)
tracks.extend(tracks_response["items"])
return tracks
Regarding the ReadTimeout exception you have mentioned in the comments:
Spotify client accepts requests_timeout and retries as arguments, according to the documentation the default values are requests_timeout=5, and retries=3
You can extend them as you wish to decrease the chance you will get the ReadTimeout exception.
As a start you can double the request timeout to 10 seconds, and change the retries to 5:
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager, requests_timeout=10, retries=5)
I am trying to create a Twitter user graph and for that I have written the following code :
import operator
import sys
import time
from urllib.error import URLError
from http.client import BadStatusLine
import json
import twitter
from functools import partial
from sys import maxsize as maxint
import itertools
import networkx
import matplotlib.pyplot as plt
G = networkx.Graph()
# Code and function taken from the twitter cookbook
def oauth_login():
CONSUMER_KEY = 'xxxx'
CONSUMER_SECRET = 'xxZD6r'
OAUTH_TOKEN = 'xxNRYl'
OAUTH_TOKEN_SECRET = 'xxHYJl'
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
return twitter_api
# Code and function taken from the twitter cookbook
def make_twitter_request(twitter_api_func, max_errors=10, *args, **kw):
# A nested helper function that handles common HTTPErrors. Return an updated
# value for wait_period if the problem is a 500 level error. Block until the
# rate limit is reset if it's a rate limiting issue (429 error). Returns None
# for 401 and 404 errors, which requires special handling by the caller.
def handle_twitter_http_error(e, wait_period=2, sleep_when_rate_limited=True):
if wait_period > 3600: # Seconds
print('Too many retries. Quitting.', file=sys.stderr)
raise e
if e.e.code == 401:
print('Encountered 401 Error (Not Authorized)', file=sys.stderr)
return None
elif e.e.code == 404:
print('Encountered 404 Error (Not Found)', file=sys.stderr)
return None
elif e.e.code == 429:
print('Encountered 429 Error (Rate Limit Exceeded)', file=sys.stderr)
if sleep_when_rate_limited:
print("Retrying in 15 minutes...ZzZ...", file=sys.stderr)
sys.stderr.flush()
time.sleep(60 * 15 + 5)
print('...ZzZ...Awake now and trying again.', file=sys.stderr)
return 2
else:
raise e # Caller must handle the rate limiting issue
elif e.e.code in (500, 502, 503, 504):
print('Encountered {0} Error. Retrying in {1} seconds'.format(e.e.code, wait_period), file=sys.stderr)
time.sleep(wait_period)
wait_period *= 1.5
return wait_period
else:
raise e
wait_period = 2
error_count = 0
while True:
try:
return twitter_api_func(*args, **kw)
except twitter.api.TwitterHTTPError as e:
error_count = 0
wait_period = handle_twitter_http_error(e, wait_period)
if wait_period is None:
return
except URLError as e:
error_count += 1
time.sleep(wait_period)
wait_period *= 1.5
print("URLError encountered. Continuing.", file=sys.stderr)
if error_count > max_errors:
print("Too many consecutive errors...bailing out.", file=sys.stderr)
raise
except BadStatusLine as e:
error_count += 1
time.sleep(wait_period)
wait_period *= 1.5
print("BadStatusLine encountered. Continuing.", file=sys.stderr)
if error_count > max_errors:
print("Too many consecutive errors...bailing out.", file=sys.stderr)
raise
# Code and function taken from the twitter cookbook
def get_friends_followers_ids(twitter_api, screen_name=None, user_id=None,
friends_limit=maxint, followers_limit=maxint):
# Must have either screen_name or user_id (logical xor)
assert (screen_name is not None) != (user_id is not None), "Must have screen_name or user_id, but not both"
# See https://developer.twitter.com/en/docs/twitter-api/v1/accounts-and-users/follow-search-get-
#users/api-reference/get-friends-ids for details
# on API parameters
get_friends_ids = partial(make_twitter_request, twitter_api.friends.ids, count=5000)
get_followers_ids = partial(make_twitter_request, twitter_api.followers.ids, count=5000)
friends_ids, followers_ids = [], []
for twitter_api_func, limit, ids, label in [
[get_friends_ids, friends_limit, friends_ids, "friends"],
[get_followers_ids, followers_limit, followers_ids, "followers"]
]:
if limit == 0: continue
cursor = -1
while cursor != 0:
# Use make_twitter_request via the partially bound callable...
if screen_name:
response = twitter_api_func(screen_name=screen_name, cursor=cursor)
else: # user_id
response = twitter_api_func(user_id=user_id, cursor=cursor)
if response is not None:
ids += response['ids']
cursor = response['next_cursor']
print('Fetched {0} total {1} ids for {2}'.format(len(ids), label, (user_id or screen_name)),
file=sys.stderr)
# XXX: You may want to store data during each iteration to provide an
# an additional layer of protection from exceptional circumstances
if len(ids) >= limit or response is None:
break
# Do something useful with the IDs, like store them to disk...
return friends_ids[:friends_limit], followers_ids[:followers_limit]
# Code and function taken from the twitter cookbook
def get_user_profile(twitter_api, screen_names=None, user_ids=None):
# Must have either screen_name or user_id (logical xor)
assert (screen_names is not None) != (user_ids is not None)
items_to_info = {}
items = screen_names or user_ids
while len(items) > 0:
items_str = ','.join([str(item) for item in items[:100]])
items = items[100:]
if screen_names:
response = make_twitter_request(twitter_api.users.lookup, screen_name=items_str)
else: # user_ids
response = make_twitter_request(twitter_api.users.lookup, user_id=items_str)
for user_info in response:
if screen_names:
items_to_info[user_info['screen_name']] = user_info
else: # user_ids
items_to_info[user_info['id']] = user_info
return items_to_info
# Function to find reciprocal friends and sort them such that we get the top 5 friends
def reciprocal_friends(twitter_api, screen_name=None, user_id=None):
friends_list_ids, followers_list_ids = get_friends_followers_ids(twitter_api, screen_name=screen_name,
user_id=user_id,
friends_limit=5000, followers_limit=5000)
friends_reciprocal = list(set(friends_list_ids) & set(followers_list_ids))
list_followers_count = []
user_profiles = {}
for each in friends_reciprocal:
user_profiles[each] = get_user_profile(twitter_api, user_ids=[each])[each]
list_followers_count.append(user_profiles[each]['followers_count'])
res = sorted(list_followers_count, reverse=True)
friends_count = {user_profiles[fr]['followers_count']: fr for fr in friends_reciprocal}
list_resciprocal = []
if len(res) < 6:
list_resciprocal = friends_reciprocal
else:
for i in range(5):
list_resciprocal.append(friends_count[res[i]])
return list_resciprocal
# This function finds reciprocal friends again and again till we achieve at least 100 nodes
def crawler(twitter_api, screen_name=None, user_id=None):
rec_friends = reciprocal_friends(twitter_api, screen_name=screen_name, user_id=user_id)
edges = [(screen_name, x) for x in rec_friends]
G.add_edges_from(edges)
nodes = nxt_qu = rec_friends
if len(nodes) == 0:
print("No reciprocal friends")
return rec_friends
while G.number_of_nodes() < 101:
print("Queue Items : ", nxt_qu)
(queue, nxt_qu) = (nxt_qu, [])
for q in queue:
if G.number_of_nodes() >= 101:
break
print("ID Entered:", q)
res = reciprocal_friends(twitter_api, screen_name=None, user_id=q)
edges = [(q, z) for z in res]
G.add_edges_from(edges)
nxt_qu += res
nodes += res
print(nodes)
# To Plot the graph
networkx.draw(G)
plt.savefig("graphresult.png")
plt.show()
# Printing the Output
print("No. of Edges: ", G.number_of_edges())
print("No. of Nodes: ", G.number_of_nodes())
print("Diameter : ", networkx.diameter(G))
print("Average Distance: ", networkx.average_shortest_path_length(G))
# To write the output into a file
f = open("output.txt", "w")
f.write("No. of Nodes: " + str(G.number_of_nodes()))
f.write("\nNo. of Edges: " + str(G.number_of_edges()))
f.write("\nDiameter: " + str(networkx.diameter(G)))
f.write("\nAverage Distance: " + str(networkx.average_shortest_path_length(G)))
twitter_api = oauth_login()
crawler(twitter_api, screen_name="POTUS")
However I am getting this error often and this is making my program run very slow
ID Entered: 60784269
Fetched 5000 total friends ids for 60784269
Fetched 5000 total followers ids for 60784269
Encountered 429 Error (Rate Limit Exceeded)
Retrying in 15 minutes...ZzZ...
Is there a way to get around this ? Make the code run faster ?
I have read a few documents but I still dont have any clear picture. Any help is appreciated.
There is no way to go around the rate limits restrictions with the Public API.
Though there is an API v2 now which also allow you to get users and do not work against the same rate limits.
https://developer.twitter.com/en/docs/twitter-api/users/lookup/introduction
Notice that this solution would be temporary as Twitter will at some point remove access to API v1.
You can request twitter to have access to premium/enterprise level of the API but you will have to pay for that.
You can see rate limits documentation here :
API v1
API v2
I have a while loop nested within a for loop that's running over a json array collected from firestore, which collects stock symbols to pass to another api to gather minute by minute trading data to put back into the firestore db.
While I'm running the loop, it'll stop unexpectedly without any error around the fourth or sixth (never more) time through the 389 entry while loop.
Any idea why this is? Is it something with my code? I noticed if I changed the limit in the while loop from 389 down to 100, it worked through all the companies within the json array. But it won't get through many than four companies down the list if it's the full 389 entries.
Anyway, thanks for the help!
import requests
import json
import firebase_admin
from firebase_admin import credentials
from firebase_admin import firestore
import datetime
cred = credentials.Certificate("./serviceAccountKey.json")
firebase_admin.initialize_app(cred)
db = firestore.client()
doc_ref1 = db.collection(u'Quiver').stream()
for doc in doc_ref1:
symbol = doc.id
api_url = "https://api.iextrading.com/1.0/stock/{}/chart/1d".format(symbol)
query_url = api_url
r = requests.get(query_url)
if r.status_code != 200:
print("Error:", r.status_code)
continue
if r.status_code == 404:
print("Error:", r.status_code, symbol)
continue
json_stock = r.json()
b = 0
while b <= 100:
try:
date = json_stock[b]['date']
minute = json_stock[b]['minute']
label = json_stock[b]['label']
high = json_stock[b]['high']
low = json_stock[b]['low']
average = json_stock[b]['average']
volume = json_stock[b]['volume']
notional = json_stock[b]['notional']
numberOfTrades = json_stock[b]['numberOfTrades']
marketHigh = json_stock[b]['marketHigh']
marketLow = json_stock[b]['marketLow']
marketAverage = json_stock[b]['marketAverage']
marketVolume = json_stock[b]['marketVolume']
marketNotional = json_stock[b]['marketNotional']
marketNumberOfTrades = json_stock[b]['marketNumberOfTrades']
open = json_stock[b]['open']
close = json_stock[b]['close']
marketOpen = json_stock[b]['marketOpen']
marketClose = json_stock[b]['marketClose']
changeOverTime = json_stock[b]['changeOverTime']
marketChangeOverTime = json_stock[b]['marketChangeOverTime']
doc_ref = db.collection(u'dailies').document(u'{}-{}'.format(minute, symbol))
doc_ref.set({
u'date':u'{}'.format(date),
u'minute':u'{}'.format(minute),
u'label':u'{}'.format(label),
u'high':u'{}'.format(high),
u'average':u'{}'.format(average),
u'notional':u'{}'.format(notional),
u'number of trades':u'{}'.format(numberOfTrades),
u'market high':u'{}'.format(marketHigh),
u'market low':u'{}'.format(marketLow),
u'market average':u'{}'.format(marketAverage),
u'market volume':u'{}'.format(marketVolume),
u'market notional':u'{}'.format(marketNotional),
u'market number of trades':u'{}'.format(marketNumberOfTrades),
u'open':u'{}'.format(open),
u'close':u'{}'.format(close),
u'market open':u'{}'.format(marketOpen),
u'market close':u'{}'.format(marketClose),
u'change over time':u'{}'.format(changeOverTime),
u'market change over time':u'{}'.format(marketChangeOverTime)
})
print("{} {}: {}".format(symbol, minute, b))
b += 1
except IndexError:
print("Index Error")
break
you can use:
except Exception as errmsg:
print(errmsg)
and provide more information
I'm trying to take every open tweets in a hashtag but my code does not go further than 299 tweets.
I also trying to take tweets from a specific time line like tweets only in May 2015 and July 2016. Are there any way to do it in the main process or should I write a little code for it?
Here is my code:
# if this is the first time, creates a new array which
# will store max id of the tweets for each keyword
if not os.path.isfile("max_ids.npy"):
max_ids = np.empty(len(keywords))
# every value is initialized as -1 in order to start from the beginning the first time program run
max_ids.fill(-1)
else:
max_ids = np.load("max_ids.npy") # loads the previous max ids
# if there is any new keywords added, extends the max_ids array in order to correspond every keyword
if len(keywords) > len(max_ids):
new_indexes = np.empty(len(keywords) - len(max_ids))
new_indexes.fill(-1)
max_ids = np.append(arr=max_ids, values=new_indexes)
count = 0
for i in range(len(keywords)):
since_date="2015-01-01"
sinceId = None
tweetCount = 0
maxTweets = 5000000000000000000000 # maximum tweets to find per keyword
tweetsPerQry = 100
searchQuery = "#{0}".format(keywords[i])
while tweetCount < maxTweets:
if max_ids[i] < 0:
if (not sinceId):
new_tweets = api.search(q=searchQuery, count=tweetsPerQry)
else:
new_tweets = api.search(q=searchQuery, count=tweetsPerQry,
since_id=sinceId)
else:
if (not sinceId):
new_tweets = api.search(q=searchQuery, count=tweetsPerQry,
max_id=str(max_ids - 1))
else:
new_tweets = api.search(q=searchQuery, count=tweetsPerQry,
max_id=str(max_ids - 1),
since_id=sinceId)
if not new_tweets:
print("Keyword: {0} No more tweets found".format(searchQuery))
break
for tweet in new_tweets:
count += 1
print(count)
file_write.write(
.
.
.
)
item = {
.
.
.
.
.
}
# instead of using mongo's id for _id, using tweet's id
raw_data = tweet._json
raw_data["_id"] = tweet.id
raw_data.pop("id", None)
try:
db["Tweets"].insert_one(item)
except pymongo.errors.DuplicateKeyError as e:
print("Already exists in 'Tweets' collection.")
try:
db["RawTweets"].insert_one(raw_data)
except pymongo.errors.DuplicateKeyError as e:
print("Already exists in 'RawTweets' collection.")
tweetCount += len(new_tweets)
print("Downloaded {0} tweets".format(tweetCount))
max_ids[i] = new_tweets[-1].id
np.save(arr=max_ids, file="max_ids.npy") # saving in order to continue mining from where left next time program run
Have a look at this: https://tweepy.readthedocs.io/en/v3.5.0/cursor_tutorial.html
And try this:
import tweepy
auth = tweepy.OAuthHandler(CONSUMER_TOKEN, CONSUMER_SECRET)
api = tweepy.API(auth)
for tweet in tweepy.Cursor(api.search, q='#python', rpp=100).items():
# Do something
pass
In your case you have a max number of tweets to get, so as per the linked tutorial you could do:
import tweepy
MAX_TWEETS = 5000000000000000000000
auth = tweepy.OAuthHandler(CONSUMER_TOKEN, CONSUMER_SECRET)
api = tweepy.API(auth)
for tweet in tweepy.Cursor(api.search, q='#python', rpp=100).items(MAX_TWEETS):
# Do something
pass
If you want tweets after a given ID, you can also pass that argument.
Sorry, I can't answer in comment, too long. :)
Sure :) Check this example:
Advanced searched for #data keyword 2015 may - 2016 july
Got this url: https://twitter.com/search?l=&q=%23data%20since%3A2015-05-01%20until%3A2016-07-31&src=typd
session = requests.session()
keyword = 'data'
date1 = '2015-05-01'
date2 = '2016-07-31'
session.get('https://twitter.com/search?l=&q=%23+keyword+%20since%3A+date1+%20until%3A+date2&src=typd', streaming = True)
Now we have all the requested tweets,
Probably you could have problems with 'pagination'
Pagination url ->
https://twitter.com/i/search/timeline?vertical=news&q=%23data%20since%3A2015-05-01%20until%3A2016-07-31&src=typd&include_available_features=1&include_entities=1&max_position=TWEET-759522481271078912-759538448860581892-BD1UO2FFu9QAAAAAAAAETAAAAAcAAAASAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA&reset_error_state=false
Probably you could put a random tweet id, or you can parse first, or requests some data from twitter. It can be done.
Use Chrome's networking tab to find all the requested information :)
This code worked for me.
import tweepy
import pandas as pd
import os
#Twitter Access
auth = tweepy.OAuthHandler( 'xxx','xxx')
auth.set_access_token('xxx-xxx','xxx')
api = tweepy.API(auth,wait_on_rate_limit = True)
df = pd.DataFrame(columns=['text', 'source', 'url'])
msgs = []
msg =[]
for tweet in tweepy.Cursor(api.search, q='#bmw', rpp=100).items(10):
msg = [tweet.text, tweet.source, tweet.source_url]
msg = tuple(msg)
msgs.append(msg)
df = pd.DataFrame(msgs)
Check twitter api documentation, probably it allows just 300 tweets to parse.
I would recommend to forget api, make it with requests with streaming. The api is an implementation of requests with limitations.
since Twitter changed their APi the script I have to control a prototype vending unit doesn't work anymore, and the developer who wrote the script has moved on to greener pastures.
The script scans Twitter once every 15 seconds searching for the most recent tweet that contains a specified hashtag (currently set to #sunshine) and it also filters out any retweets.
When it has identified a new tweet, it sends a signal to an Arduino which triggers a solenoid to dispense a free product sample (currently sunscreen)
this line of code appears to be the problem/obsolete:
j =json.loads(urllib.urlopen('http://search.twitter.com/search.json?q='+searchTerm+'&result_type=recent&rpp=1&filter:retweets').read())
I've registered a Developer account with Twitter, so I have the consumer secret and token codes etc. but I still don't know how to modify the old code with these OAuth codes to get it working again. I have reproduced the code in full below. Can anyone please help me and show me the way to get this script working again.
import twitter
import json
import urllib
from pprint import pprint
import time
from arduino import Arduino
##################SETUP AS REQUIRED###########################
##############################################################
#Change to suit the sample, currently at 0.2 of a second #
vendtime = 0.2 #
#
#Delay Time Between each Search (never below 15 seconds) #
delayTime = 15 #
#This is the search term for the URL. (%23 = #) #
searchTerm = '%23sunshine' #
#
A = Arduino('COM3') #This will need to be COM3 #
A.output([12]) #Output on pin 12 #
A.output([13]) #to keep serial in use #
##############################################################
#to collect the first tweet without vending
countTweet = 0
#To test Twitter for consistancy
tweet= 0
noTweet= 0
#the infinite loop
while True:
#j contains the JSON we load from the URL
j =json.loads(urllib.urlopen('http://search.twitter.com/search.json?q='+searchTerm+'&result_type=recent&rpp=1&filter:retweets').read())
#Debug JSON from twitter (for faults on the Twitter end or possible GET limit id below 15 seconds per request)
#pprint(j) #needed for debugging only
#find the text and the tweet id
if 'results' in j and j['results']:
text = j['results'][0]['text']
id = j['results'][0]['id']
#how many times the Json is complete
tweet+= 1
else:
#How many times the Json is incomplete (sometimes twitter malfunctions. About 0.1 in 100 are broken)
noTweet += 1
#print the text and id to the screen
pprint(text) #needed for debugging only
pprint(id) #needed for debugging only
#to get the existing tweet from before we power on, if the first ID has been stored already (count == 1)
if countTweet != 0: #if countTweet is not equal to 0 then it's not the first tweet
#pprint ("new loop") #needed for debugging only
#if lastID is not equal to ID
if lastID != id:
#Tell Arduino to Vend
#pin 12 HIGH
A.setHigh(12)
#Sleep for the time specified in vendtime
time.sleep(vendtime)
#pin 12 LOW
A.setLow(12)
#Display the tweet that triggered the vend
#pprint(text) #needed for debugging only
#pprint(id) #needed for debugging only
#Make lastID equal to ID so that next time we can compare it
lastID = id
#pprint ('lastID updated') #needed for debugging only
#if no new tweets, print
else: #needed for debugging only
pprint ('no new tweets') #needed for debugging only
#If it's the first loop, confirm by printing to the screen
else:
pprint("First loop complete")
pprint(text)
pprint(id)
lastID = id
pprint(lastID)
countTweet += 1 #Add 1 to countTweet
pprint ('Number of Tweets')
pprint (countTweet)
pprint('Working JSON')
pprint(tweet)
pprint('Broken JSON')
pprint(noTweet)
pprint('waiting')
A.setHigh(13)
time.sleep(delayTime)
A.setLow(13)
The code you posted didn't even use the twitter library. The code below has been reworked and actually uses the twitter library but you still need to put the twitter keys into the code.
from twitter import *
import time
from arduino import Arduino
##################SETUP AS REQUIRED###########################
##############################################################
#Change to suit the sample, currently at 0.2 of a second #
vendtime = 0.2 #
#
#Delay Time Between each Search (never below 15 seconds) #
delayTime = 15 #
#This is the search term #
searchTerm = "#sunshine" #
#
A = Arduino("COM3") #This will need to be COM3 #
A.output([12]) #Output on pin 12 #
A.output([13]) #to keep serial in use #
##############################################################
# Twitter keys
OAUTH_TOKEN = "" # Access token
OAUTH_SECRET = "" # Access token secret
CONSUMER_KEY = "" # Consumer key
CONSUMER_SECRET = "" # Consumer secret
#to collect the first tweet without vending
first_tweet = True
#To test Twitter for consistancy
tweet= 0
notweet= 0
# Start Twitter session
t = Twitter\
(
auth = OAuth(OAUTH_TOKEN, OAUTH_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
)
#the infinite loop
while True:
# Print stats
print("Number of Tweets: %d" % (tweet + notweet))
print("Working JSON: %d" % tweet)
print("Broken JSON: %d" % notweet)
# Perform search
search_results = t.search.tweets(q = searchTerm, _timeout = 60)
#find the text and the tweet id
tweet_failed = True
if search_results:
if search_results.has_key("statuses"):
statuses = search_results["statuses"]
if statuses:
# Select first result
status = statuses[0]
if not bool(set(["id", "text"]) - set(status.keys())):
tweet_failed = False
tweet_text = status["text"]
tweet_id = status["id"]
#how many times the Json is complete
tweet+= 1
if tweet_failed:
#How many times the Json is incomplete (sometimes twitter malfunctions. About 0.1 in 100 are broken)
notweet += 1
continue
else:
if first_tweet:
first_tweet = False
print("First loop complete")
else:
#if last_id is not equal to tweet_id
if last_id != tweet_id:
#Tell Arduino to Vend
#pin 12 HIGH
A.setHigh(12)
#Sleep for the time specified in vendtime
time.sleep(vendtime)
#pin 12 LOW
A.setLow(12)
#Make last_id equal to ID so that next time we can compare it
last_id = tweet_id
#Display the tweet that triggered the vend
print("Tweet: %s" % tweet_text)
print("Id: %d" % tweet_id)
print("waiting")
A.setHigh(13)
time.sleep(delayTime)
A.setLow(13)