how do I infinitely loop this function without an error? - python

I am trying to loop this function over here to retrieve vaccine alerts via WhatsApp so it constantly monitors the server when I start the script. I tried using the while True: command, but this error kept occuring - how cld i fix this?
Traceback (most recent call last):
File "/Users/ragz/cowin.py", line 70, in <module>
vaccine_check()
File "/Users/ragz/cowin.py", line 35, in vaccine_check
json_output = json.dumps(available_centers, indent=4)
File "/usr/local/Cellar/python#3.9/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/json/__init__.py", line 234, in dumps
return cls(
File "/usr/local/Cellar/python#3.9/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/json/encoder.py", line 201, in encode
chunks = list(chunks)
File "/usr/local/Cellar/python#3.9/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/json/encoder.py", line 438, in _iterencode
o = _default(o)
File "/usr/local/Cellar/python#3.9/3.9.4/Frameworks/Python.framework/Versions/3.9/lib/python3.9/json/encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type HTTPError is not JSON serializable
If anyone needs it, heres the code -
from cowin_api import CoWinAPI
import json
import datetime
import numpy as np
import os
from twilio.rest import Client
import selenium
from selenium import webdriver
import time
import io
import requests
from selenium.common.exceptions import ElementClickInterceptedException
from selenium.webdriver.common.keys import Keys
from threading import Thread
state_id = '21'
district_id = '395'
min_age_limit = 18
time = datetime.datetime.now()
cowin = CoWinAPI()
# here im getting the centers and the vaccines
def vaccine_check():
try:
available_centers = cowin.get_availability_by_district(district_id)
#outputing it to a json file and bringing it back
json_output = json.dumps(available_centers, indent=4)
f = open(f'tests/vaccinecheck[{time.strftime("%b %d %Y %H|%M")}].json', 'w')
f.write(json_output)
f.close()
with open(f.name) as file:
data = json.load(file)
n = np.arange(100)
for x in np.nditer(n):
if data["centers"][x]["sessions"][0]["min_age_limit"] == 45:
print('')
else:
print(f'[{time.strftime("%b %d %Y %H:%M")}]', data["centers"][x]["name"], '-- vaccines:', data["centers"][x]["sessions"][0]['available_capacity'], '-- age-limit:', data["centers"][x]["sessions"][0]["min_age_limit"])
if data["centers"][x]["sessions"][0]["available_capacity"] >= 1:
twilio_send()
except IndexError: # catch the error
pass # pass will basically ignore it
def twilio_send():
client = Client()
from_whatsapp_number='whatsapp:twilio api demo num'
to_whatsapp_number='whatsapp:my phone num'
client.messages.create(body='vaccine available - book now!',
from_=from_whatsapp_number,
to=to_whatsapp_number)
while True:
vaccine_check()

Related

Bot for Comments on Instagram Giving Error

the bot enters the profile, accesses the post but when pulling the comments from the txt file it closes, without making any comments.
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from time import sleep
import instaloader
import random
import os.path
driver = webdriver.Chrome(r"C:\\Users\\Windows\\Downloads\\chromedriver.exe")
def sorteioInstagram():
abrindo_instagram("inspiracaobiel", "biel123", "https://www.instagram.com/p/CoI52RJOqnT/")
if os.path.isfile('seguidores.txt'):
print("Arquivo de Seguidores já carregado...")
else:
pegar_seguidores('bielbibiel')
comentandoPost()
def abrindo_instagram(username,password, url):
print("Abrindo Instagram...")
driver.get("https://www.instagram.com/")
sleep(2)
print("Fazendo Login no Instagram...")
driver.find_element(By.XPATH,"//input\[#name="username"\]").send_keys(username)
driver.find_element(By.XPATH,"//input\[#name="password"\]").send_keys(password)
driver.find_element(By.XPATH,"//button\[#type="submit"\]").click()
sleep(10)
print("Negando Solicitação de Segurança Instagram...")
driver.find_element(By.XPATH,"//button[text()='Agora não']").click()
sleep(5)
driver.find_element(By.XPATH,"//button[text()='Agora não']").click()
sleep(4)
print("Acessando o Post do sorteio...")
driver.get(url)
def pegar_seguidores(usuario):
L = instaloader.Instaloader()
L.login('inspiracaobiel', 'biel123')
profile = instaloader.Profile.from_username(L.context, usuario)
print(f"Salvando Seguidores de {usuario}...")
#Salvando Seguidores em Arquivo .TXT
file = open("seguidores.txt", "a+")
for followee in profile.get_followers():
username = "#" + followee.username
file.write(username + "\n")
file.close()
def comentandoPost():
z = 0
while 1 == 1:
cmt = driver.find_element(By.XPATH,'//*\[#id="react-root"\]/section/main/div/div\[1\]/article/div\[3\]/section\[3\]/div/form/textarea')
cmt.click()
comment = lendo_arquivo()
driver.find_element(By.XPATH,'//*\[#id="react-root"\]/section/main/div/div\[1\]/article/div\[3\]/section\[3\]/div/form/textarea').send_keys(comment)
driver.find_element(By.XPATH,'//*\[#id="react-root"\]/section/main/div/div\[1\]/article/div\[3\]/section\[3\]/div/form/textarea').send_keys(' ')
sleep(10)
driver.find_element(By.XPATH,'//*\[#id="react-root"\]/section/main/div/div\[1\]/article/div\[3\]/section\[3\]/div/form/textarea').send_keys(Keys.ENTER)
sleep(10)
driver.find_element(By.XPATH,'//\*\[#id="react-root"\]/section/main/div/div\[1\]/article/div\[3\]/section\[3\]/div/form/textarea').send_keys(Keys.ENTER)
z += 1
print(f"{z}")
sleep(60)
def lendo_arquivo():
with open("seguidores.txt", "r") as file:
allText = file.read()
words = list(map(str, allText.split()))
return random.choice(words)
sorteioInstagram()
```
ERROR:
Traceback (most recent call last):
File "c:\\Users\\gabri\\Downloads\\Codigo Comentarios\\venv\\Scripts\\main.py", line 76, in \<module\>
sorteioInstagram()
File "c:\\Users\\gabri\\Downloads\\Codigo Comentarios\\venv\\Scripts\\main.py", line 17, in sorteioInstagram
pegar_seguidores('bielbibiel')
File "c:\\Users\\gabri\\Downloads\\Codigo Comentarios\\venv\\Scripts\\main.py", line 41, in pegar_seguidores
L.login('inspiracaobiel', 'biel123')
File "C:\\Users\\gabri\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\instaloader\\instaloader.py", line 634, in login
self.context.login(user, passwd)
File "C:\\Users\\gabri\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\site-packages\\instaloader\\instaloadercontext.py", line 239, in login
raise ConnectionException("Login: Checkpoint required. Point your browser to "
instaloader.exceptions.ConnectionException: Login: Checkpoint required. Point your browser to https://www.instagram.com/challenge/action/AXG6u8rHRprRccgzcgLKpQNJzv0wH2s1vajDDBvJi2xj-ypRREOtDgZK3e5Ee8HekhTocA0/AfxgQISXQXjHI8kasJEBP020fZ1GKKqFXXJGeaFzdDei1KKj2Pc3OVuZl5K_J3Og2Mxa0Yx64gubOg/ffc_XxXKP7ukaNEpowIhzsecXIF8lbX5oShdPs03HikkaCikTbJ50ZWn38x98bzTC5ZI/ - follow the instructions, then retry.
I tried to fix the comment part but I couldn't, but I couldn't resolve the error, it keeps closing and not making any comment.

My googlesearch is not working in multiprocess pool, it gives a MaybeEncodingError

Currently I'm trying to use multiple processing to search up book titles find their first 30 summary links with googlesearch on python
from googlesearch import search
from bs4 import BeautifulSoup
import os
from multiprocessing import Pool
import json
import requests
from urllib.parse import urlparse
SAVE_PDF_PATH = "../books_pdf"
SAVE_SUM_PATH = "../summary_txt"
def perform_search(title):
"""Perform search for the title summaries and save it's texts into files
return quadruples of (url, domain, starting depth = 0 , maxdepth) used for crawl"""
dir_path =f"{SAVE_SUM_PATH}/{title}"
#print(f"dir_path: {dir_path}")
try:
os.mkdir(dir_path) #create a directory if needed
except:
pass
query = title + " summary"
print(f"query: {query}")
results = list()
for url in search(query, num = 30, stop = 30, verify_ssl = False):
results.append(url)
return results
def main():
titles = list()
for title in os.listdir(SAVE_PDF_PATH):
titles.append(title[:-4])
search_stuff = titles[0:2]
print(f"search_stuff: {search_stuff}")
p = Pool()
results = p.map(perform_search, titles[0:2])
p.close()
p.join()
# p2 = Pool()
# results = p2.map(crawl, quads)
# p2.close()
# p2.join()
print(f"results: {results}")
if __name__ == "__main__":
main()
The thing is it worked at one point then stopped working, if I comment out the search then the multiprocessing works again.
Otherwise it gives me this error:
File "/Users/yao/Desktop/dkp/School/projects/Stuff_with_Books/summary_scraper/sum_scrape.py", line 73, in <module>
main()
File "/Users/yao/Desktop/dkp/School/projects/Stuff_with_Books/summary_scraper/sum_scrape.py", line 62, in main
results = p.map(perform_search, titles[0:2])
File "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/pool.py", line 364, in map
return self._map_async(func, iterable, mapstar, chunksize).get()
File "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/multiprocessing/pool.py", line 771, in get
raise self._value
multiprocessing.pool.MaybeEncodingError: Error sending result: '<multiprocessing.pool.ExceptionWithTraceback object at 0x7fa4750c4490>'. Reason: 'TypeError("cannot pickle '_io.BufferedReader' object")'

Drop and load pickle file python

For a student project I have to record a sample from a 433MHz receiver and send it back to replace the initial button that send it.
I succeed to receive and save it in a pickle file.
But now, I don't know how to send it back...
I've tried different things but nothing's working.
Could you, please, help me do it ?
Here are my codes :
Receive part :
from __future__ import with_statement
from datetime import datetime
import matplotlib.pyplot as pyplot
import RPi.GPIO as GPIO
import pickle
RECEIVED_SIGNAL = [[], []] #[[time of reading], [signal reading]]
MAX_DURATION = 5
RECEIVE_PIN = 23
outputb = open('button_on.pkl', 'w')
outputa = open('time_on.pkl' ,'w')
if __name__ == '__main__':
GPIO.setmode(GPIO.BCM)
GPIO.setup(RECEIVE_PIN, GPIO.IN)
cumulative_time = 0
beginning_time = datetime.now()
print '**Started recording**'
while cumulative_time < MAX_DURATION:
time_delta = datetime.now() - beginning_time
RECEIVED_SIGNAL[0].append(time_delta)
RECEIVED_SIGNAL[1].append(GPIO.input(RECEIVE_PIN))
cumulative_time = time_delta.seconds
print '**Ended recording**'
print len(RECEIVED_SIGNAL[0]), 'samples recorded'
GPIO.cleanup()
print '**Processing results**'
for i in range(len(RECEIVED_SIGNAL[0])):
RECEIVED_SIGNAL[0][i] = RECEIVED_SIGNAL[0][i].seconds + RECEIVED_SIGNAL[0][i].microseconds/1000000.0
print RECEIVED_SIGNAL[0][i] , RECEIVED_SIGNAL[1][i]
A= RECEIVED_SIGNAL[0][i]
B= RECEIVED_SIGNAL[1][i]
outputa.write(str(A))
outputb.write(str(B))
print '**Plotting results**'
pyplot.plot(RECEIVED_SIGNAL[0], RECEIVED_SIGNAL[1])
pyplot.axis([0, MAX_DURATION, -1, 2])
pyplot.show()
outputa.close()
outputb.close()
Transmit part :
import time
import sys
import RPi.GPIO as GPIO
import pprint
import pickle
button_on = "button_on.pkl"
with open(button_on, "rb") as f:
print pickle.load(f)
button_on.close()
Here I have an error :
Traceback (most recent call last):
print pickle.load(f)
File '/usr/lib/python2.7/pickle.py', line 1378, in load
return Unpickler(file).load()
File '/usr/lib/python2.7/pickle.py', line 858, in load
dispatch[key](self)
File '/usr/lib/python2.7/pickle.py', line 1138, in load_pop del self.stack[.1]
IndexError: list assignment index out of range

Tweepy Stream returning ImportError: cannot import name RE_TYPE

I was using tweepy to pull tweets and then display them on MongoDB. When I went to rerun it a few days later I am getting an error.
Mongodb is working fine and RE_TYPE isn't even in my code.
Here is my code:
import json
import pymongo
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
CONSUMER_KEY = 'xxx'#hiding keys
CONSUMER_SECRET = 'xxx'
OAUTH_TOKEN = 'xxx'
OAUTH_TOKEN_SECRET = 'xxx'
keyword_list = ['christmas']
class MyStreamListener(StreamListener):
def __init__(self):
self.num_tweets = 0 # cpunter starting at 0
self.tweet_coll = None # signal that something will go into tweet_coll at some stage
def mongo_connect(self):
try:
client = pymongo.MongoClient()
print "Mongo is connected!"
db = client.tech_tweetsDB
self.tweet_coll = db.tweets
except pymongo.errors.ConnectionFailure, e:
print "Could not connect to MongoB: %s" % e
def on_data(self, data):
try:
# read in a tweet
status = json.loads(data)
print json.dumps(status, indent=4)
tweet = {}
tweet["text"] = status['text'].encode('utf8') # autf able to read other languages
tweet['screen_name'] = status['user']['screen_name']
tweet['followers_count'] = status['user']['followers_count']
tweet['friends_count'] = status['user']['friends_count']
tweet['favorite_count'] = status['favorite_count']
tweet['retweet_count'] = status['retweet_count']
tweet['created at'] = status['created_at']
print status.get('entities').get("media")
if status.get('entities').get("media"):
print status.get('entities').get("media")
media = status['entities']["media"]
tweet['media'] = media[0]["display_url"]
else:
tweet['media'] = None
tweet['lang'] = status['user']['lang']
tweet['location'] = status['user']['location']
self.num_tweets += 1
print self.num_tweets
if self.num_tweets < 50:
# Insert tweet in to the collection
self.tweet_coll.insert(tweet)
return True
else:
return False
return True
except BaseException as e:
print('Failed on_data: %s' % str(e))
return True
def on_error(self, status):
print(status)
return True
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
# api = tweepy.API(auth)
stream = MyStreamListener()
stream.mongo_connect()
twitter_stream = Stream(auth, stream)
twitter_stream.filter(track=keyword_list)
My error is:
Traceback (most recent call last):
File "C:\Program Files (x86)\JetBrains\PyCharm 4.5.4\helpers\profiler\run_profiler.py", line 146, in <module>
profiler.run(file)
File "C:\Program Files (x86)\JetBrains\PyCharm 4.5.4\helpers\profiler\run_profiler.py", line 85, in run
pydev_imports.execfile(file, globals, globals) # execute the script
File "C:/Users/Andrew/PycharmProjects/mongoDB/firstMongo.py", line 64, in <module>
import pymongo
File "C:\Users\Andrew\flask_test\lib\site-packages\pymongo\__init__.py", line 83, in <module>
from pymongo.collection import ReturnDocument
File "C:\Users\Andrew\flask_test\lib\site-packages\pymongo\collection.py", line 34, in <module>
from pymongo.cursor import Cursor
File "C:\Users\Andrew\flask_test\lib\site-packages\pymongo\cursor.py", line 22, in <module>
from bson import RE_TYPE
ImportError: cannot import name RE_TYPE
Process finished with exit code 1
This issue has been fixed since pymongo 3.2, simply upgrade your package through pip:
pip install pymongo --upgrade

How do I debug this error with Python?

My code that I will post below gives me this error and I can't figure out why or how to fix it. If anyone could help I would greatly appreciate it. Thanks!
Traceback (most recent call last):
File "C:\Users\Robert\Documents\j-a-c-o-b\newlc.py", line 99, in <module>
main()
File "C:\Users\Robert\Documents\j-a-c-o-b\newlc.py", line 76, in main
for final_url in pool.imap(handle_listing, listings):
File "C:\Python27\lib\site-packages\eventlet-0.9.16-py2.7.egg\eventlet\greenpool.py", line 232, in next
val = self.waiters.get().wait()
File "C:\Python27\lib\site-packages\eventlet-0.9.16-py2.7.egg\eventlet\greenthread.py", line 166, in wait
return self._exit_event.wait()
File "C:\Python27\lib\site-packages\eventlet-0.9.16-py2.7.egg\eventlet\event.py", line 120, in wait
current.throw(*self._exc)
File "C:\Python27\lib\site-packages\eventlet-0.9.16-py2.7.egg\eventlet\greenthread.py", line 192, in main
result = function(*args, **kwargs)
File "C:\Users\Robert\Documents\j-a-c-o-b\newlc.py", line 48, in handle_listing
yellow_page = BeautifulSoup(download(yellow_page_url))
File "build\bdist.win32\egg\BeautifulSoup.py", line 1519, in __init__
BeautifulStoneSoup.__init__(self, *args, **kwargs)
File "build\bdist.win32\egg\BeautifulSoup.py", line 1144, in __init__
self._feed(isHTML=isHTML)
File "build\bdist.win32\egg\BeautifulSoup.py", line 1168, in _feed
smartQuotesTo=self.smartQuotesTo, isHTML=isHTML)
File "build\bdist.win32\egg\BeautifulSoup.py", line 1770, in __init__
self._detectEncoding(markup, isHTML)
File "build\bdist.win32\egg\BeautifulSoup.py", line 1915, in _detectEncoding
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>').match(xml_data)
TypeError: expected string or buffer
I don't know what it wants or what it means...
This is my code:
from gzip import GzipFile
from cStringIO import StringIO
import re
import webbrowser
import time
from difflib import SequenceMatcher
import os
import sys
from BeautifulSoup import BeautifulSoup
import eventlet
from eventlet.green import urllib2
import urllib2
import urllib
def download(url):
print "Downloading:", url
s = urllib2.urlopen(url).read()
if s[:2] == '\x1f\x8b':
ifh = GzipFile(mode='rb', fileobj=StringIO(s))
s = ifh.read()
print "Downloaded: ", url
return s
def replace_chars(text, replacements):
return ''.join(replacements.get(x,x) for x in text)
def handle_listing(listing_url):
listing_document = BeautifulSoup(download(listing_url))
# ignore pages that link to yellowpages
if not listing_document.find("a", href=re.compile(re.escape("http://www.yellowpages.com/") + ".*")):
listing_title = listing_document.title.text
# define an alphabet
alfa = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
reps = {' ':'-', ',':'', '\'':'', '[':'', ']':'', '-Suite-' + alfa[1-26] : ''}
if TITLE_MATCH.match(listing_title) is not None:
title, = TITLE_MATCH.match(listing_title).groups()
if ADDRESS_MATCH.match(listing_title) is not None:
address, = ADDRESS_MATCH.match(listing_title).groups()
yellow_page_url = "http://www.yellowpages.com/%s/%s?order=distance" % (
replace_chars(address, reps),
replace_chars(title, reps),
)
yellow_page = BeautifulSoup(download(yellow_page_url))
page_url = yellow_page.find("h3", {"class" : "business-name fn org"})
if page_url:
page_url = page_url.a["href"]
business_name = title[:title.index(",")]
page = BeautifulSoup(download(page_url))
yellow_page_address = page.find("span", {"class" : "street-address"})
if yellow_page_address:
if SequenceMatcher(None, address, yellow_page_address.text).ratio() >= 0.5:
pid, = re.search(r'p(\d{5,20})\.jsp', listing_url).groups(0)
page_escaped = replace_chars(page_url, {':':'%3A', '/':'%2F', '?':'%3F', '=':'%3D'})
final_url = "http://www.locationary.com/access/proxy.jsp?ACTION_TOKEN=proxy_jsp$JspView$SaveAction&inPlaceID=%s&xxx_c_1_f_987=%s" % (
pid, page_escaped)
return final_url
def main():
pool = eventlet.GreenPool()
listings_document = BeautifulSoup(download(START_URL))
listings = listings_document.findAll("a", href = LOCATION_LISTING)
listings = [listing['href'] for listing in listings]
for final_url in pool.imap(handle_listing, listings):
print final_url
"""
if str(final_url) is not None:
url = str(final_url)
req = urllib2.Request(url)
response = urllib2.urlopen(req)
page = response.read()
time.sleep(2)
"""
for a in range(0,1):
START_URL = 'http://www.locationary.com/place/en/US/Arkansas/Fayetteville-page2/?ACTION_TOKEN=NumericAction'
TITLE_MATCH = re.compile(r'(.*) \(\d{1,10}.{1,100}\)$')
ADDRESS_MATCH = re.compile(r'.{1,100}\((.*), .{4,14}, United States\)$')
LOCATION_LISTING = re.compile(r'http://www\.locationary\.com/place/en/US/.{1,50}/.{1,50}/.{1,100}\.jsp')
if __name__ == '__main__':
main()
A very common mistake made by novices using any language that supports exceptions is that they catch exceptions that they do not actually handle. This leads to hard-to-debug errors since it disrupts the normal flow of the program.
Specifically, catching urllib2.HTTPError in download() is preventing actual problems from being propagated to the rest of the program. Either remove the exception handler altogether, or raise at the end of the handler to maintain flow.

Categories