Make tweepy search for the newest mentions instead of the oldest - python

Today I wrote a twitter bot that replies anybody who mentions it with a random image from a folder.
The problem here is that I'm a newbie in python and I don't know how to make it funcitonal at all. When I started running it, the bot started replying all the mentions from other users (I'm using an old account a friend gave to me), and that's not precisely what I want, even if it's working, but not as I desire.
The bot is replying all the mentions from the very beggining and it won't stop until all these are replied (the bot is now turned off, I don't want to annoy anybody)
How can I make it to only reply to latest mentions instead of the first ones?
here's the code:
import tweepy
import logging
from config import create_api
import time
import os
import random
from datetime import datetime
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
api = create_api()
imagePath = random.choice(os.listdir("images/"))
while True:
for tweet in tweepy.Cursor(api.mentions_timeline).items():
try:
imagePath = random.choice(os.listdir("images/"))
tweetId = tweet.user.id
username = tweet.user.screen_name
api.update_with_media('images/' + imagePath, "#" + username + " ", in_reply_to_status_id=tweet.id)
print('Replying to ' + username + 'with ' + imagePath)
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
time.sleep(12)
Thanks in advance.

I don't have the ability to test this code currently but this should work.
Instead of iterating over every tweet, it turns the iterator that tweepy.Cursor returns into a list and then just gets the last item in that list.
api = create_api()
imagePath = random.choice(os.listdir("images/"))
while True:
tweet_iterator = tweepy.Cursor(api.mentions_timeline).items()
latest_tweet = list(tweet_iterator)[-1]
try:
imagePath = random.choice(os.listdir("images/"))
tweetId = latest_tweet.user.id
username = latest_tweet.user.screen_name
api.update_with_media('images/' + imagePath, "#" + username + " ", in_reply_to_status_id=latest_tweet.id)
print('Replying to ' + username + 'with ' + imagePath)
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
time.sleep(12)
You will also want to keep track of what user you last replied to, so you don't just keep spamming the same person over and over.
This isn't the most efficient way of doing it but should be easy enough to understand:
latest_user_id = None
while True:
# Rest of the code
try:
if latest_user_id == latest_tweet.user.id:
# don't do anything
else:
latest_user_id = latest_tweet.user.id
# the rest of your code

Related

Spotipy Accessing Track Data No Longer Works

I have been working on an AI project using Spotipy and the Spotify Web API. I have been getting a list of preview_url's to do some analysis on and I have successfully gotten many, but I ran into issues lately. Whenever I try to use .track(track_id) it gets stuck on the line and doesn't continue past the line. I was thinking it could be an issue with the API, but other commands work fine, it's only track that is giving me issues. I cannot figure out the issue because it doesn't give me any errors, it just gets stuck trying to execute that line and never finishes.
Refreshing the client secret does nothing now. This is the code I have so far.
from spotipy.oauth2 import SpotifyClientCredentials
cid = '121e03d3acd1440188ae4c0f58b844d4'
secret = '431a5e56bcd544c3aefce8166a9c3703'
client_credentials_manager = SpotifyClientCredentials(client_id=cid, client_secret=secret)
sp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)
number = 2
output_file = open('data\\25k_data_preview\\track_url_preview_' + str(number) + '.txt', 'a')
for l in open('data\\25k_data\\track_url_' + str(number) + '.txt'):
line = l.replace('\n','')
print(line)
try:
track = sp.track(line)
try:
testing = track['preview_url']
if testing != None:
output_file.write(line + " " + testing + "\n")
except:
x = 0
except:
x = 0
output_file.close()

Why does my Python bot sometimes posts one time too many?

I wrote a bot that fetches posts from Reddit and posts them on a Twitter Account. But sometimes - i dont know why - it posts twice in a row, and not once every 3 hours. I am suspecting that it is because i did something like this:
do stuff:
if stuff doesnt already exist:
do other stuff
else:
do stuff
And i really think its bad practice, but i cant figure out how else i can let it run in an infinite loop, but still try to get a post that hasnt been posted before.
I have two points in my code where i "re-run" the entire code upon a check. One is when the post that is fetched from reddit is not an image, the other when the post that was fetched was already posted before (and stored in a json file for this exact check).
I hope someone understands what i mean, thanks in advance.
import time
import tweepy
import datetime
import praw
import urllib.request
import os
import json
def Mainbot():
reddit = praw.Reddit(client_id='X',
client_secret='X',
user_agent='RedditFetchBot by FlyingThunder')
def Tweet(postinfo):
auth = tweepy.OAuthHandler("X", "X")
auth.set_access_token("X", "X")
api = tweepy.API(auth)
try:
api.update_with_media("local-filename.jpg", postinfo)
except:
print("not a file post")
Mainbot() #check 1
post = reddit.subreddit('okbrudimongo').random()
x = post.id
with open('data.json', 'r') as e:
eread = e.read()
if x not in eread:
with open('data.json', 'a') as f:
json.dump(x, f)
f.close()
e.close()
else:
e.close()
print("already posted")
Mainbot() #check 2
print(post.url + " " + post.title)
urllib.request.urlretrieve(post.url, "local-filename.jpg")
Tweet(postinfo=post.title+" (https://www.reddit.com" + post.permalink+")")
try:
time.sleep(5)
os.remove("local-filename.jpg")
except:
print("Datei nicht vorhanden")
def loop():
time.sleep(1800)
print("still running")
print(datetime.datetime.now())
while True:
Mainbot()
loop()
loop()
loop()
loop()
loop()
loop()
By the way, here is what it gives back - i made print checks to see what goes wrong, here you can see what it says when it posts twice
still running
2019-09-24 13:27:23.437152
still running
2019-09-24 13:57:23.437595
already posted
https://i.redd.it/xw38s1qrmlh31.jpg Führ Samstag bai ihm
https://i.redd.it/nnaxll9gjwf31.jpg Sorri Mamer
still running
2019-09-24 14:27:39.913651
still running
2019-09-24 14:57:39.913949
still running
2019-09-24 15:27:39.914013
There's quite a bit to unpack here.
if x not in eread:
...
else:
...
Mainbot() # <--- this line
in the above snippet, you check if the post.id is already in your file. and if it is, you call the function Mainbot() again which means it has another chance to post a tweet.
However, this line
Tweet(postinfo=post.title+" (https://www.reddit.com" + post.permalink+")")
Occurs outside if your if-else check, which means it will post a tweet regardless of whether or not the post.id was in your file.
I also want to address your method of looping the bot. Your use of recursion is causing your double-posting issue and could technically recursively loop a post many tweets at once if multiple posts in a row end up in the "else" branch listed above.
Also, if you are using python with open(...) as f: you don't need to call python f.close()
Here is a solution I came up with that should solve your problem and doesn't use recursion:
import time
import tweepy
import datetime
import praw
import urllib.request
import os
import json
def initBot():
# this function logs into your reddit and twitter accounts
# and returns their instances
reddit = praw.Reddit(client_id='XXXX',
client_secret='XXXX',
user_agent='RedditFetchBot by FlyingThunder')
auth = tweepy.OAuthHandler("XXXX", "XXXX")
auth.set_access_token("XXXX",
"XXXX")
twitter = tweepy.API(auth)
return reddit, twitter
def Tweet(post):
# this function simply tries to post a tweet
postinfo = post.title + " (https://www.reddit.com" + post.permalink + ")"
try:
twitter.update_with_media("local-filename.jpg", postinfo)
except:
print("not a file post"+post.permalink)
def Mainbot():
while True:
with open('data.json', 'r+') as e: # 'r+' let's you read and write to a file
eread = e.read()
# This section loops until it finds a reddit submission
# that's not in your file
post = reddit.subreddit('okbrudimongo').random()
x = post.id
while x in eread:
post = reddit.subreddit('okbrudimongo').random()
x = post.id
# add the post.id to the file
json.dump(x, e)
print(post.url + " " + post.title)
# Get and tweet image
urllib.request.urlretrieve(post.url, "local-filename.jpg")
Tweet(post)
# Remove image file
try:
time.sleep(5)
os.remove("local-filename.jpg")
except:
print("Datei nicht vorhanden")
# sleep for a total of three hours, but report status every 30 minutes
for i in range(6):
time.sleep(1800)
print("still running")
print(datetime.datetime.now())
if __name__ == "__main__":
reddit, twitter = initBot()
Mainbot()
I haven't tested this because I don't have twitter keys.
Solution i found (i still dont fully understand what mechanic caused the bug):
import time
import tweepy
import datetime
import praw
import urllib.request
import os
import json
def Mainbot():
reddit = praw.Reddit(client_id='XXXX',
client_secret='XXXX',
user_agent='RedditFetchBot by FlyingThunder')
def Tweet(postinfo):
auth = tweepy.OAuthHandler("XXXX", "XXXX")
auth.set_access_token("XXXX",
"XXXX")
api = tweepy.API(auth)
try:
api.update_with_media("local-filename.jpg", postinfo)
except:
print("not a file post"+post.permalink)
Mainbot()
post = reddit.subreddit('okbrudimongo').random()
x = post.id
with open('data.json', 'r') as e:
eread = e.read()
if x not in eread:
with open('data.json', 'a') as f:
json.dump(x, f)
f.close()
e.close()
print(post.url + " " + post.title)
urllib.request.urlretrieve(post.url, "local-filename.jpg")
Tweet(postinfo=post.title + " (https://www.reddit.com" + post.permalink + ")")
try:
time.sleep(5)
os.remove("local-filename.jpg")
except:
print("Datei nicht vorhanden")
else:
e.close()
print("already posted")
Mainbot()
def loop():
time.sleep(1800)
print("still running")
print(datetime.datetime.now())
while True:
Mainbot()
loop()
loop()
loop()
loop()
loop()
loop()

Making a bot with PRAW. It seems to ignore some people?

my first post here. If you check out /r/EDH right now you'll see my bot on the front page. The idea is that when you type ?edhbot [name of a card]? it gives you links to relevant pages on edhrec.com.
For some reason however it seems to ignore some people and my bot isn't reporting any rate limit issues. They're not getting the wakeword wrong, so I really can't work out what they're doing wrong? Here's my code for reference:
#imported modules
import praw
import string
#functions
def main():
#API login
#I have no idea what I can't show so I redacted most lol
reddit = praw.Reddit(client_id="REDACTED",
client_secret = "REDACTED",
user_agent = "a bot for my epq, by aggressivechairs",
username = "EPQ_MTG_BOT",
password = "REDACTED")
subreddit = reddit.subreddit("all")
wakeword = "?edhbot "
for comment in subreddit.stream.comments():
if wakewordCheck(comment, wakeword): reply = generateComment(comment)
#Checks comment to see if it contains the wake word for the bot
def wakewordCheck(comment, wakeword):
if wakeword in comment.body: return(True)
else: return(False)
#Generates the link extension
def linkGenerator(inString):
startPoint = inString.index("?edhbot ")
inString = inString.lower()
outString = ""
for i in range(startPoint+8, len(inString)):
if inString[i] == "?":
break
elif inString[i] in string.punctuation:
continue
elif inString[i] in string.whitespace:
outString += "-"
continue
outString += inString[i]
return(outString)
#Generates the text that will be used in the bot's reply
def generateComment(comment):
try:
normalisedLink = linkGenerator(comment.body)
commentReply = "[EDHRec page for the commander](https://edhrec.com/commanders/"+normalisedLink+") \n\n [EDHREC page for that as a card](https://edhrec.com/cards/"+normalisedLink+")"
comment.reply(commentReply)
print("I did it with " + comment.body)
except Exception as e: print(e)
if __name__ == "__main__":
main()
So yeah, when you use type ?edhbot it either reads until the end of the comment or until it encounters a new question mark. If you check out this thread though you can see that it just elects to ignore some people. https://www.reddit.com/r/EDH/comments/9ec677/im_a_new_bot_made_just_for_this_sub/?sort=new
What do you suggest? I can't work out what's going wrong :/ Oh and if you spot any bad coding practice feel free to tell me! I want to improve haha

Indentation Error Python Not Working

Im trying to run my code and there is an
File "C:/trcrt/trcrt.py", line 42
def checkInternet():
^
IndentationError: unexpected unindent
The code supposed to check for the traceroute to a website... i know... its not very smart but its what i was told to do
Ive checked the code using pep8 and eveything is seems to be fine...
'''
Developer: Roei Edri
File name: trcrt.py
Date: 24.11.17
Version: 1.1.0
Description: Get an url as an input and prints the traceroute to it.
'''
import sys
import urllib2
i, o, e = sys.stdin, sys.stdout, sys.stderr
from scapy.all import *
from scapy.layers.inet import *
sys.stdin, sys.stdout, sys.stderr = i, o, e
def trcrt(dst):
"""
Check for the route for the given destination
:param dst: Final destination, in a form of a website.
:type dst: str
"""
try:
pckt = IP(dst=dst)/ICMP() # Creates the
# packet
ip = [p for p in pckt.dst] # Gets the ip
print "Tracerouting for {0} : {1}".format(dst, ip[0])
for ttl in range(1, 40):
pckt = IP(ttl=ttl, dst=dst)/ICMP()
timeBefore = time.time()
reply = sr1(pckt, verbose=0, timeout=5)
timeAfter = time.time()
timeForReply = (timeAfter - timeBefore)*1000
if reply is not None:
print "{0} : {1} ; Time for reply: {2}".format(ttl,
reply.src, timeForReply)
if reply.type == 0:
print "Tracerout Completed"
break
else:
print "{0} ... Request Time Out".format(ttl)
def checkInternet():
"""
Checks if there is an internet connection
:return: True if there is an internet connection
"""
try:
urllib2.urlopen('http://45.33.21.159', timeout=1)
return True
except urllib2.URLError as IntError:
return False
Thanks for any help...
Btw pep8 says
"module level import not at top of file"
for lines 12,13
The try block is missing its except clause.
try:
pckt = IP(dst=dst)/ICMP() # Creates the
# packet
ip = [p for p in pckt.dst] # Gets the ip
print "Tracerouting for {0} : {1}".format(dst, ip[0])
for ttl in range(1, 40):
pckt = IP(ttl=ttl, dst=dst)/ICMP()
timeBefore = time.time()
reply = sr1(pckt, verbose=0, timeout=5)
timeAfter = time.time()
timeForReply = (timeAfter - timeBefore)*1000
if reply is not None:
print "{0} : {1} ; Time for reply: {2}".format(ttl,
reply.src, timeForReply)
if reply.type == 0:
print "Tracerout Completed"
break
else:
print "{0} ... Request Time Out".format(ttl)
except: # Here : Add the exception you wish to catch
pass # handle this exception appropriately
As a general rule, do not use catch all except clauses, and do not pass on a caught exception, it lets it fail silently.
If this is your full code, there are two things to check:
1) Have you mixed tabs and spaces? Make sure that all tabs are converted to spaces (I recommend 4 spaces per tab) for indentation. A good IDE will do this for you.
2) The try: in trcrt(dst) does not hava a matching except block.
PEP8 will by the way also tell you, that function names should be lowercase:
check_internet instead of checkInternet, ...
I will give you the same recommendation, that I give to everyone working with me: Start using an IDE that marks PEP8 and other errors for you, there is multiple around. It helps spotting those errors a lot and trains you to write clean Python code that is easily readable and (if you put comments in it) also reausable and understandable a few years later.

Reddit bot that changes windows background with downloaded images

As of right now I have a majority of the code done for browsing a subreddit, and downloading the top images at the time of the request. I was able to do this using PRAW and urllib to download the images once i get their link. The final part that i am stuck on is putting the images files in an array and actually setting them as my background. Here is what i have
import praw
import time
import os
import urllib as ul
import os
def backGroundChanger(sub):
USER_AGENT='wall paper changer for linux/windows by /u/**********' #specifies what my bot does and by who
REDDIT_ID= #reddit id
REDDIT_PASS= #reddit password
reddit=praw.Reddit(USER_AGENT) #creates bot
reddit.login(REDDIT_ID,REDDIT_PASS) #logsin
print reddit.is_logged_in()
images=reddit.get_subreddit(sub)
while True:
count=0
for sub in images.get_hot(limit=10):
imageLink=sub.url
print imageLink
n=str(count)
ul.urlretrieve(imageLink, "i" + n )
count+=1
file=[]
dir=os.getcwd()
for files in os.listdir("."):
if(files.endswith(".jpg|| .png"): # not sure if this will work
file.append(files)
changeBackGround(file,dir)
def changeBackGround(file, dir):
#Do back ground changing stuff here
def main():
subreddit=input("What subreddit would you like me to pull images from? ")
print "You chose " + subreddit
backGroundChanger(subreddit)
main()
This might work, maybe not; its untested.
Read up on the os.system function for a means to use system programs to set the background, like xsetbg in linux. Look here for setting the windows background (it only involves hacking the registry).
import os
import glob
import random
import sys
import time
import urllib
import praw
def backGroundChanger(sub):
USER_AGENT = 'wall paper changer for linux/windows by /u/**********' #specifies what my bot does and by who
REDDIT_ID = #reddit id
REDDIT_PASS = #reddit password
reddit = praw.Reddit(USER_AGENT) #creates bot
reddit.login(REDDIT_ID, REDDIT_PASS) #logsin
print reddit.is_logged_in()
images = reddit.get_subreddit(sub)
while True:
count = 0
for sub in images.get_hot(limit = 10):
imageLink = sub.url
print imageLink
n = str(count)
urllib.urlretrieve(imageLink, "i" + n )
count += 1
files = glob.glob("*.jpg") + glob.glob("*.png")
changeBackGround(files)
def changeBackGround(ifiles):
#Do back ground changing stuff here
the_file = ifiles[random.randint(0, len(ifiles) - 1)]
if(sys.platform.startswith("win")): # Windows
# Do this yourself
pass
elif(sys.platform.startswith("linux")): # Linux
os.system("xsetbg -center %s" % the_file)
def main():
subreddit = input("What subreddit would you like me to pull images from? ")
print "You chose " + subreddit
backGroundChanger(subreddit)
main()

Categories