my first post here. If you check out /r/EDH right now you'll see my bot on the front page. The idea is that when you type ?edhbot [name of a card]? it gives you links to relevant pages on edhrec.com.
For some reason however it seems to ignore some people and my bot isn't reporting any rate limit issues. They're not getting the wakeword wrong, so I really can't work out what they're doing wrong? Here's my code for reference:
#imported modules
import praw
import string
#functions
def main():
#API login
#I have no idea what I can't show so I redacted most lol
reddit = praw.Reddit(client_id="REDACTED",
client_secret = "REDACTED",
user_agent = "a bot for my epq, by aggressivechairs",
username = "EPQ_MTG_BOT",
password = "REDACTED")
subreddit = reddit.subreddit("all")
wakeword = "?edhbot "
for comment in subreddit.stream.comments():
if wakewordCheck(comment, wakeword): reply = generateComment(comment)
#Checks comment to see if it contains the wake word for the bot
def wakewordCheck(comment, wakeword):
if wakeword in comment.body: return(True)
else: return(False)
#Generates the link extension
def linkGenerator(inString):
startPoint = inString.index("?edhbot ")
inString = inString.lower()
outString = ""
for i in range(startPoint+8, len(inString)):
if inString[i] == "?":
break
elif inString[i] in string.punctuation:
continue
elif inString[i] in string.whitespace:
outString += "-"
continue
outString += inString[i]
return(outString)
#Generates the text that will be used in the bot's reply
def generateComment(comment):
try:
normalisedLink = linkGenerator(comment.body)
commentReply = "[EDHRec page for the commander](https://edhrec.com/commanders/"+normalisedLink+") \n\n [EDHREC page for that as a card](https://edhrec.com/cards/"+normalisedLink+")"
comment.reply(commentReply)
print("I did it with " + comment.body)
except Exception as e: print(e)
if __name__ == "__main__":
main()
So yeah, when you use type ?edhbot it either reads until the end of the comment or until it encounters a new question mark. If you check out this thread though you can see that it just elects to ignore some people. https://www.reddit.com/r/EDH/comments/9ec677/im_a_new_bot_made_just_for_this_sub/?sort=new
What do you suggest? I can't work out what's going wrong :/ Oh and if you spot any bad coding practice feel free to tell me! I want to improve haha
Related
I am creating a discord bot that gives me gif when I say !gif in a particular channel. The problem I am facing is when I type # it cannot change itself to %23 as it is used in links. I just want a way to change # in the string to %23. Please help me for that. The whole code is given below. I am very new to python so if you want any other errors please fix it and also clean up the code.I am using the tenor API Thank you
Code :
import discord
import os
import json
import requests
import random
client = discord.Client()
global search_term_public
global url
search_term_public = "Rick Roll"
def tenor():
global url
# set the apikey
apikey = (os.getenv("TENORAPIKEY"))
# our test search
search_term = search_term_public
# get the GIFs for the search term
r = requests.get("https://g.tenor.com/v1/search?q=%s&key=%s&contentfilter=high" % (search_term, apikey))
if r.status_code == 200:
# load the GIFs using the urls for the smaller GIF sizes
top_8gifs = json.loads(r.content)
g = len(top_8gifs['results'])
i = random.randint(0,g)
if(i == g):
i = g-1
h = str(g)
f = str(i)
url = top_8gifs['results'][i]['media'][0]['gif']['url']
print("The number picked is " + f +" out of " + h + ". Search Term : " + search_term + ". Url : " +url)
else:
top_8gifs = None
return url
#client.event
async def on_ready():
print("Bot has successfully logged in as {0.user}".format(client))
#client.event
async def on_message(message):
global search_term_public
if message.author == client:
return
if message.content.startswith("!gif") and message.channel.id == 831789159587774504:
# put the search term into the public variable. split the content with space and the second or more than second word should be in a variable
tokens = message.content.split(' ')
if tokens.__contains__(""):
tokens.remove("!gif")
tokens.remove("")
elif tokens.__contains__("#"):
# I want to change # in the token and change it to %23
print()
else :
tokens.remove("!gif")
search_term_public = ("".join(tokens))
if search_term_public == "":
search_term_public = "Rick Roll"
await message.channel.send("You got rick rolled!")
url = tenor()
await message.channel.send(url)
client.run(os.getenv("DISCORDTOKEN"))
Try this, it might work
You can just use the "replace" as follows
elif tokens.__contains__("#"):
token=token.replace("#","%23")
You want to look at
url encoding probably.
However, to directly answer the question as a hotfix, I think you can do this directly after the .split() line
tokens = [token.replace('#', '%23') for token in tokens]
I am wondering how do u make a loop for an api call that will keep calling that API, but when I tried making one it didn't work here is the code:
while True:
api_requesting = requests.get("https://api.battlemetrics.com/servers/3411152?include=player", headers=headers)
time.sleep(5)
jsoned_api = api_requesting.json()
function = jsoned_api["included"]
names = []
for person in function:
names.append(person['attributes']['name'])
And this is for e to call upon the request, and parsed it to give me the names of each player etc
#client.command(name='players')
async def createEmbed(ctx):
await ctx.send(f"{len(names)} players are online currently")
urString = ""
for name in names:
urString = urString + "> " + name + "\n"
urString = "```" + urString + "```"
await ctx.send(urString)
So I am wondering how will I make a loop for my request it's all the way at the beginning where it says while true: but when I run it the bot doesn't respond, and doesn't do anything.
If you want your code to stop when the bot does not respond:
success = True
while success:
api_requesting = requests.get("https://api.battlemetrics.com/servers/3411152?include=player", headers=headers)
# Success is True when the response status code is 200
success = api_requesting.status_code==200
But if you want to keep making requests, you can try:
while True:
api_requesting = requests.get("https://api.battlemetrics.com/servers/3411152?include=player", headers=headers)
if api_requesting.status_code == 200:
# Do something when the bot responds
else:
time.sleep(5)
# Do something else when the bot does not respond
Today I wrote a twitter bot that replies anybody who mentions it with a random image from a folder.
The problem here is that I'm a newbie in python and I don't know how to make it funcitonal at all. When I started running it, the bot started replying all the mentions from other users (I'm using an old account a friend gave to me), and that's not precisely what I want, even if it's working, but not as I desire.
The bot is replying all the mentions from the very beggining and it won't stop until all these are replied (the bot is now turned off, I don't want to annoy anybody)
How can I make it to only reply to latest mentions instead of the first ones?
here's the code:
import tweepy
import logging
from config import create_api
import time
import os
import random
from datetime import datetime
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
api = create_api()
imagePath = random.choice(os.listdir("images/"))
while True:
for tweet in tweepy.Cursor(api.mentions_timeline).items():
try:
imagePath = random.choice(os.listdir("images/"))
tweetId = tweet.user.id
username = tweet.user.screen_name
api.update_with_media('images/' + imagePath, "#" + username + " ", in_reply_to_status_id=tweet.id)
print('Replying to ' + username + 'with ' + imagePath)
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
time.sleep(12)
Thanks in advance.
I don't have the ability to test this code currently but this should work.
Instead of iterating over every tweet, it turns the iterator that tweepy.Cursor returns into a list and then just gets the last item in that list.
api = create_api()
imagePath = random.choice(os.listdir("images/"))
while True:
tweet_iterator = tweepy.Cursor(api.mentions_timeline).items()
latest_tweet = list(tweet_iterator)[-1]
try:
imagePath = random.choice(os.listdir("images/"))
tweetId = latest_tweet.user.id
username = latest_tweet.user.screen_name
api.update_with_media('images/' + imagePath, "#" + username + " ", in_reply_to_status_id=latest_tweet.id)
print('Replying to ' + username + 'with ' + imagePath)
except tweepy.TweepError as e:
print(e.reason)
except StopIteration:
break
time.sleep(12)
You will also want to keep track of what user you last replied to, so you don't just keep spamming the same person over and over.
This isn't the most efficient way of doing it but should be easy enough to understand:
latest_user_id = None
while True:
# Rest of the code
try:
if latest_user_id == latest_tweet.user.id:
# don't do anything
else:
latest_user_id = latest_tweet.user.id
# the rest of your code
I'm writing a simple little script to send me a text message when the Ultra Music Festival early bird tickets go on sale so I can snatch them up. When I came to writing this I figured python would be a quick way to achieve my goal. What I do is collect the links and then count them and determine if there is a change and send a google voice text message to a couple numbers. Here is my code ran against stackoverflow.
from googlevoice import Voice
from googlevoice.util import input
from bs4 import BeautifulSoup, SoupStrainer
from time import sleep
import urllib2
from array import *
#define login details
email = 'example#gmail.com'
password = 'password'
url = 'http://stackoverflow.com/questions'
def send_message(var_text):
voice = Voice()
voice.login(email, password)
phoneNumber = array('L',[9998675309, 9998675309])
for i in phoneNumber:
voice.send_sms(i, var_text)
#init
soup = BeautifulSoup(urllib2.urlopen(url).read(), parse_only=SoupStrainer('a'))
link_count = len(soup)
#start the loop
var = 1
while var == 1 : # This constructs an infinite loop
soup = BeautifulSoup(urllib2.urlopen(url).read(), parse_only=SoupStrainer('a'))
if link_count != len(soup):
string = str('Link Count Changed\n\nSite:\n' + url + '\nPrev:\n' + str(link_count) + '\nNew:\n' + str(len(soup)))
send_message(string)
print (string)
link_count = len(soup)
sleep(10)
pass
else:
print('Number of links ('+ str(link_count) + ') has not changed, going to sleep now.')
sleep(10)
pass
print "Good bye!"
Here is the error I keep getting (only seems to happen when sending to more then one number)
doesn't work array('L',[9998675309, 9998675309])
works array('L',[9998675309])
ERROR:
bash-3.2# python gvsendalert.py
Number of links (195) has not changed, going to sleep now.
Traceback (most recent call last):
File "gvsendalert.py", line 32, in <module>
send_message(string)
File "gvsendalert.py", line 19, in send_message
voice.send_sms(i, var_text)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/googlevoice/voice.py", line 151, in send_sms
self.__validate_special_page('sms', {'phoneNumber': phoneNumber, 'text': text})
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/googlevoice/voice.py", line 225, in __validate_special_page
load_and_validate(self.__do_special_page(page, data))
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/googlevoice/util.py", line 65, in load_and_validate
validate_response(loads(response.read()))
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/googlevoice/util.py", line 59, in validate_response
raise ValidationError('There was a problem with GV: %s' % response)
googlevoice.util.ValidationError: There was a problem with GV: {u'data': {u'code': 58}, u'ok': False}
Ok I've taken into consideration what some of you have posted and come out with this. For the number array sending my google voice number twice it will send 2 messages. If I put my friends number as the second it breaks it. Could this be because my friends number is not a google voice number? I have been able to send messages to this number using Google Voice and some other 3rd party iPhone applications so I would think the python module would work the same way.
Here is my 2nd Revision Code:
def send_message(var_text):
voice = Voice()
voice.login(email, password)
phoneNumber = ['myrealgooglenumber', 'myfriendsactualphonenumber']
for i in phoneNumber:
print( str('sending to: ') + str(i))
voice.send_sms(str(i), str(var_text))
sleep(5)
#init
soup = BeautifulSoup(urllib2.urlopen(url).read(), parse_only=SoupStrainer('a'))
link_count = len(soup)
#start the loop
var = 1
while var == 1 : # This constructs an infinite loop
soup = BeautifulSoup(urllib2.urlopen(url).read(), parse_only=SoupStrainer('a'))
if link_count != len(soup):
string = ('Link Count Changed\n\nSite:\n{0}\nPrev:\n{1}\nNew:\n{2}').format(url, link_count, len(soup))
send_message(string)
link_count = len(soup)
print (string)
sleep(10)
pass
else:
string = ('Number of links ({0}) has not changed, going to sleep now.').format(str(link_count))
print(string)
sleep(10)
pass
print "Good bye!"
Have tested with 2 google voice numbers and it works. Still doesn't work with non google voice numbers.
It looks like you're using ints for the phone numbers.
Phone numbers are not true numbers.
Try strings instead:
phoneNumber = ['9998675309', '9998675309']
Also, on a style note, have a look at string formatting:
string = 'Link Count Changed\n\nSite:\n{0}\nPrev:\n{1}\nNew:\n{2}').format(url, link_count, len(soup))
Google may have a timer to prevent you sending too many SMS messages back to back.
Perhaps you could try changing your loop to something like:
for i in phoneNumber:
voice.send_sms(i, var_text)
sleep(5)
One other thought, does it work better if you use 2 different phone numbers?
I would like to check an infinite number of self generated URLs for validity, and if valid safe body of response in a file. URLs look like this: https://mydomain.com/ + random string (e.g. https://mydomain.com/ake3t) and I want to generate them using the alphabet "abcdefghijklmnopqrstuvwxyz0123456789_-" and just brute force try out all possibilities.
I wrote a script in python but as I am an absolute beginner here it was very slow! As I need something very very fast I tried to use scrapy as I thought it was meant for exactly this kind of job.
The problem now is I cannot find out how to dynamically generate URLs on the fly, I cannot generate them beforehand as it is not a fixed number of them.
Could somebody please show me how to achieve this or recommend me another tool or library even better suited for this job?
UPDATE:
This is the script I used, but I think it is slow. What worries me the most is that it gets slower if I use more than one Thread (specified in threadsNr)
import threading, os
import urllib.request, urllib.parse, urllib.error
threadsNr = 1
dumpFolder = '/tmp/urls/'
charSet = 'abcdefghijklmnopqrstuvwxyz0123456789_-'
Url_pre = 'http://vorratsraum.com/'
Url_post = 'alwaysTheSameTail'
# class that generate the words
class wordGenerator ():
def __init__(self, word, charSet):
self.currentWord = word
self.charSet = charSet
# generate the next word set that word as currentWord and return the word
def nextWord (self):
self.currentWord = self._incWord(self.currentWord)
return self.currentWord
# generate the next word
def _incWord(self, word):
word = str(word) # convert to string
if word == '': # if word is empty
return self.charSet[0] # return first char from the char set
wordLastChar = word[len(word)-1] # get the last char
wordLeftSide = word[0:len(word)-1] # get word without the last char
lastCharPos = self.charSet.find(wordLastChar) # get position of last char in the char set
if (lastCharPos+1) < len(self.charSet): # if position of last char is not at the end of the char set
wordLastChar = self.charSet[lastCharPos+1] # get next char from the char set
else: # it is the last char
wordLastChar = self.charSet[0] # reset last char to have first character from the char set
wordLeftSide = self._incWord(wordLeftSide) # send left site to be increased
return wordLeftSide + wordLastChar # return the next word
class newThread(threading.Thread):
def run(self):
global exitThread
global wordsTried
global newWord
global hashList
while exitThread == False:
part = newWord.nextWord() # generate the next word to try
url = Url_pre + part + Url_post
wordsTried = wordsTried + 1
if wordsTried == 1000: # just for testing how fast it is
exitThread = True
print( 'trying ' + part) # display the word
print( 'At URL ' + url)
try:
req = urllib.request.Request(url)
req.addheaders = [('User-agent', 'Mozilla/5.0')]
resp = urllib.request.urlopen(req)
result = resp.read()
found(part, result)
except urllib.error.URLError as err:
if err.code == 404:
print('Page not found!')
elif err.code == 403:
print('Access denied!')
else:
print('Something happened! Error code', err.code)
except urllib.error.URLError as err:
print('Some other error happened:', err.reason)
resultFile.close()
def found(part, result):
global exitThread
global resultFile
resultFile.write(part +"\n")
if not os.path.isdir(dumpFolder + part):
os.makedirs(dumpFolder + part)
print('Found Part = ' + part)
wordsTried = 0
exitThread = False # flag to kill all threads
newWord = wordGenerator('',charSet); # word generator
if not os.path.isdir(dumpFolder):
os.makedirs(dumpFolder)
resultFile = open(dumpFolder + 'parts.txt','a') # open file for append
for i in range(threadsNr):
newThread().start()
You cannot check "an infinite number of URLs" without it being "very slow", beginner or no.
The time your scraper is taking is almost certainly dominated by the response time of the server you're accessing, not by the efficiency of your script.
What are you trying to do, exactly?
Do you want brute force or random? Below is a sequential brute force method with repeating characters. The speed of this is going to be largely determined by your server response. Also note that this would likely generate a denial of service condition very quickly.
import itertools
import url
pageChars = 5
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789_-"
#iterate over the product of alphabet with <pageChar> elements
#this assumes repeating characters are allowed
# Beware this generates len(alphabet)**pageChars possible strings
for chars in itertools.product(alphabet,repeat=pageChars):
pageString = ''.join(chars)
urlString = 'https://mydomain.com/' + pageString
try:
url = urllib2.urlopen(url)
except urllib2.HTTPError:
print('No page at: %s' % urlString)
continue
pageDate = url.read()
#do something with page data