I have made a simple chat system with python-requests. There are two different files one is the sender and another is the receiver. the main concept of these two files is
1. sender file contains a while loop which always takes the message as input. after
giving the message as input, it sends the message to a website.
2. receiver file also contains a while loop which gets requests from the website after every
5 seconds.
Now I want to run these two different works in the same window with Tkinter. how to do it? Thanks in advance.
Sender.py Code is here
import configme as con
import requests
import datetime
from cryptography.fernet import Fernet
nam = con.my_name
cookies_dict = con.cookie
key = con.crypto_key
url = con.base_url + '/config.php'
def makeID():
return datetime.datetime.now().timestamp()
# encription staff
fernet = Fernet(key)
# member joining message
if nam.__len__() != 0:
requests.get(url+f"?iD={makeID()}&name=<<<>>>&msg={nam} join the room.", cookies=cookies_dict)
with requests.Session() as r:
while True:
msg = input("Enter your Messege: ")
if msg == ".exit":
# r.get(url+f"?iD={makeID()}&name=<<<>>>&msg={nam} has left the room.", cookies=cookies_dict)
break
else:
encMessage = fernet.encrypt(msg.encode())
messenger = {'iD': makeID() ,'name': nam , 'msg': encMessage}
if msg != "":
r.get(url, params=messenger, cookies=cookies_dict)
Receiver.py code here...
import configme as con
import requests
import json
from cryptography.fernet import Fernet
from time import sleep
from datetime import datetime
from pytz import timezone
import pytz
cookies_dict = con.cookie
ozone = con.my_timezone
key = con.crypto_key
time_format = con.date_time_format
url = con.base_url + '/log.json'
t = con.receive_time
# encription staff
fernet = Fernet(key)
timezone = timezone(ozone)
def setTime(t):
stamptime = int(float(t))
GMT0 = pytz.utc.localize(datetime.utcfromtimestamp(stamptime))
return GMT0.astimezone(timezone).strftime(time_format)
j = 0
while True:
r = requests.get(url, cookies=cookies_dict).text
message = json.loads(r)
message_sz = len(message)
if message_sz == 0:
print("Looks like there are no message")
break
for msg in message[j:]:
local_time = setTime(msg['id'])
if msg['nam'] == '<<<>>>':
print(f"{local_time} :: {msg['nam']} :: {msg['msg']}")
else:
decMessage = fernet.decrypt(bytes(msg['msg'], "utf-8")).decode()
print(f"{local_time} :: {msg['nam']} :: {decMessage}")
j = message_sz
sleep(t)
I would not suggest using this checking and going to website method, but you could thread the while loops to go at the same time. And you could update tk when you want using tk.update().
You could get Data from vars that the threaded loops are setting and use them in your single tk window.
use multi threading .or else load data desperately
I did my best writing this API, but I'm now stuck on the while loop.
My main goal is to scan batch by batch the API results and write it in the database.
The way the code is written is bringing me straight the results as offset=4000
Don't know what am I doing wrong.
Follow my code
#!/bin/env python
from asyncio.windows_events import NULL
from re import X
from typing import ItemsView
import requests
import json
import hashlib
import base64
import time
import hmac
import pandas as pd
import datetime
import pyodbc
#Account Info
AccessId = ''
AccessKey = ''
Company = ''
#Request Info
httpVerb ='GET'
resourcePath = '/alert/alerts'
offset = 0
while offset < 5000:
# Query parameters
queryParams ='?size=1000&offset=' + str(offset) + '&sort=-startEpoch&filter=cleared:*,rule:critical'
offset += 1000
data = ''
#Construct URL
url = 'https://'+ Company +'.logicmonitor.com/santaba/rest' + resourcePath + queryParams
print(url)
#Get current time in milliseconds
epoch = str(int(time.time() * 1000))
#Concatenate Request details
requestVars = httpVerb + epoch + data + resourcePath
#Construct signature
hmac1 = hmac.new(AccessKey.encode(),msg=requestVars.encode(),digestmod=hashlib.sha256).hexdigest()
signature = base64.b64encode(hmac1.encode())
#Construct headers
auth = 'LMv1 ' + AccessId + ':' + signature.decode() + ':' + epoch
headers = {'Content-Type':'application/json','Authorization':auth}
#Make request
response = requests.get(url, data=data, headers=headers)
data = response.json()
alerts_df = pd.DataFrame(data['data']['items'])
alerts_df = alerts_df[['id','internalId','rule','monitorObjectName','startEpoch','endEpoch','cleared','resourceTemplateName']]
alerts_df['startEpoch'] = pd.to_datetime(alerts_df['startEpoch'],unit='s')
alerts_df['endEpoch'] = alerts_df['endEpoch'].apply(lambda x: pd.to_datetime(x,unit='s') if x !=0 else x)
print(alerts_df)
You are overwriting the variable constantly before even making one request.
Also, the while loop should be <= 5000 if you want to include the 5000 in the loop.
There are two ways to achieve it.
append the results to a list for later use
make the request after you generated the string
So I have been using youtube api to scrape a channel. Everything was working fine until 3 days ago (03/15/2019) when the result isn't sorted anymore. It seems that no matter what I put in the order parameter, the results are all the same. Can anyone tell me why it isn't working? Here's the code snippet:
import re
import os
import json
import MySQLdb
from pytube import YouTube
import urllib
import isodate
import sys
def get_all_video_in_channel(channel_id):
api_key = '<MY KEY>'
video_url = 'https://www.googleapis.com/youtube/v3/videos?part=snippet,contentDetails&id={}&key={}'
first_url = 'https://www.googleapis.com/youtube/v3/search?key={}&channelId={}&part=snippet,id&order=date&maxResults=50'.format(api_key, channel_id) #order by date but won't work
res = []
url = first_url
while True:
inp = urllib.urlopen(url)
resp = json.load(inp)
vidIds = []
for jobject in resp['items']:
if jobject['id']['kind'] == "youtube#video":
vidIds.append(jobject['id']['videoId'])
vidreq = urllib.urlopen(video_url.format(",".join(vidIds),api_key))
vidres = json.load(vidreq)
for vidjson in vidres['items']:
res.append(vidjson)
if (len(res) >= 50):
break
try:
next_page_token = resp['nextPageToken']
url = first_url + '&pageToken={}'.format(next_page_token)
except:
break
return res
c_id = 'UCycyxZMoPwg9cuRDMyQE7PQ'
episodes = get_all_video_in_channel(c_id)
Edit: I did some more research and people say that the API indeed is not working properly due to Youtube doing something with deleting the New Zealand shooting video and it will soon be working properly again.
I recommend you to see the answer https://stackoverflow.com/a/55220182/8327971. This is a known and acknowledged issue by Google: https://issuetracker.google.com/issues/128673552.
i'm trying to download twitter followers from a list of accounts. my function (that uses twython) works pretty well for short account lists but rise an error for longer lists. it is not a RateLimit problem since my function sleeps until the next time bin if the rate limit is hit.
the error is this
twythonerror: ('Connection aborted.', error(10054, ''))
others seem to have the same problem and the proposed solution is to make the function sleep between different REST API calls so i implemented the following code
del twapi
sleep(nap[afternoon])
afternoon = afternoon + 1
twapi = Twython(app_key=app_key, app_secret=app_secret,
oauth_token=oauth_token, oauth_token_secret=oauth_token_secret)
nap is a list of intervals in seconds and afternoon is an index.
despite this suggestion i still have the exact same problem. it seems that the sleep doesen't resolve the problem.
can anyone help me?
here is the whole finction
def download_follower(serie_lst):
"""Creates account named txt files containing followers ids. Uses for loop on accounts names list."""
nap = [1, 2, 4, 8, 16, 32, 64, 128]
afternoon = 0
for exemplar in serie_lst:
#username from serie_lst entries
account_name = exemplar
twapi = Twython(app_key=app_key, app_secret=app_secret,
oauth_token=oauth_token, oauth_token_secret=oauth_token_secret)
try:
#initializations
del twapi
if afternoon >= 7:
afternoon =0
sleep(nap[afternoon])
afternoon = afternoon + 1
twapi = Twython(app_key=app_key, app_secret=app_secret,
oauth_token=oauth_token, oauth_token_secret=oauth_token_secret)
next_cursor = -1
result = {}
result["screen_name"] = ""
result["followers"] = []
iteration = 0
file_name = ""
#user info
user = twapi.lookup_user(screen_name = account_name)
#store user name
result['screen_name'] = account_name
#loop until all cursored results are stored
while (next_cursor != 0):
sleep(random.randrange(start = 1, stop = 15, step = 1))
call_result = twapi.get_followers_ids(screen_name = account_name, cursor = next_cursor)
#loop over each entry of followers id and append each entry to results_follower
for i in call_result["ids"]:
result["followers"].append(i)
next_cursor = call_result["next_cursor"] #new next_cursor
iteration = iteration + 1
if (iteration > 13): #skip sleep if all cursored pages are processed
error_msg = localtime()
error_msg = "".join([str(error_msg.tm_mon), "/", str(error_msg.tm_mday), "/", str(error_msg.tm_year), " at ", str(error_msg.tm_hour), ":", str(error_msg.tm_min)])
error_msg ="".join(["Twitter API Request Rate Limit hit on ", error_msg, ", wait..."])
print(error_msg)
del error_msg
sleep(901) #15min + 1sec
iteration = 0
#output file
file_name = "".join([account_name, ".txt"])
#print output
out_file = open(file_name, "w") #open file "account_name.txt"
#out_file.write(str(result["followers"])) #standard format
for i in result["followers"]: #R friendly table format
out_file.write(str(i))
out_file.write("\n")
out_file.close()
except twython.TwythonRateLimitError:
#wait
error_msg = localtime()
error_msg = "".join([str(error_msg.tm_mon), "/", str(error_msg.tm_mday), "/", str(error_msg.tm_year), " at ", str(error_msg.tm_hour), ":", str(error_msg.tm_min)])
error_msg ="".join(["Twitter API Request Rate Limit hit on ", error_msg, ", wait..."])
print(error_msg)
del error_msg
del twapi
sleep(901) #15min + 1sec
#initializations
if afternoon >= 7:
afternoon =0
sleep(nap[afternoon])
afternoon = afternoon + 1
twapi = Twython(app_key=app_key, app_secret=app_secret,
oauth_token=oauth_token, oauth_token_secret=oauth_token_secret)
next_cursor = -1
result = {}
result["screen_name"] = ""
result["followers"] = []
iteration = 0
file_name = ""
#user info
user = twapi.lookup_user(screen_name = account_name)
#store user name
result['screen_name'] = account_name
#loop until all cursored results are stored
while (next_cursor != 0):
sleep(random.randrange(start = 1, stop = 15, step = 1))
call_result = twapi.get_followers_ids(screen_name = account_name, cursor = next_cursor)
#loop over each entry of followers id and append each entry to results_follower
for i in call_result["ids"]:
result["followers"].append(i)
next_cursor = call_result["next_cursor"] #new next_cursor
iteration = iteration + 1
if (iteration > 13): #skip sleep if all cursored pages are processed
error_msg = localtime()
error_msg = "".join([str(error_msg.tm_mon), "/", str(error_msg.tm_mday), "/", str(error_msg.tm_year), " at ", str(error_msg.tm_hour), ":", str(error_msg.tm_min)])
error_msg = "".join(["Twitter API Request Rate Limit hit on ", error_msg, ", wait..."])
print(error_msg)
del error_msg
sleep(901) #15min + 1sec
iteration = 0
#output file
file_name = "".join([account_name, ".txt"])
#print output
out_file = open(file_name, "w") #open file "account_name.txt"
#out_file.write(str(result["followers"])) #standard format
for i in result["followers"]: #R friendly table format
out_file.write(str(i))
out_file.write("\n")
out_file.close()
As discussed in the comments, there are a few issues with your code at present. You shouldn't need to delete your connection for it to function properly, and I think the issue comes because you initialise for a second time without having any catches for hitting your rate limit. Here is an example using Tweepy of how you can get the information you require:
import tweepy
from datetime import datetime
def download_followers(user, api):
all_followers = []
try:
for page in tweepy.Cursor(api.followers_ids, screen_name=user).pages():
all_followers.extend(map(str, page))
return all_followers
except tweepy.TweepError:
print('Could not access user {}. Skipping...'.format(user))
# Include your keys below:
consumer_key = 'YOUR_KEY'
consumer_secret = 'YOUR_KEY'
access_token = 'YOUR_KEY'
access_token_secret = 'YOUR_KEY'
# Set up tweepy API, with handling of rate limits
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
main_api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
# List of usernames to get followers for
lookup_users = ['asongtoruin', 'mbiella']
for username in lookup_users:
user_followers = download_followers(username, main_api)
if user_followers:
with open(username + '.txt', 'w') as outfile:
outfile.write('\n'.join(user_followers))
print('Finished outputting: {} at {}'.format(username, datetime.now().strftime('%Y/%m/%d %H:%M:%S')))
Tweepy is clever enough to know when it has hit its rate limit when we use wait_on_rate_limit=True, and checks how long it needs to sleep for before it can start again. By using wait_on_rate_limit_notify=True, we allow it to paste out how long it will be waiting until it can next get a page of followers (through this ID-based method, it seems as though there are 5000 IDs per page).
We additionally catch a TweepError exception - this can occur if the username provided relates to a protected account for which our authenticated user does not have permission to view. In this case, we simply skip the user to allow other information to be downloaded, but print out a warning that the user could not be accessed.
Running this saves a text file of follower ids for any user it can access. For me this prints the following:
Rate limit reached. Sleeping for: 593
Finished outputting: asongtoruin at 2017/02/22 11:43:12
Could not access user mbiella. Skipping...
With the follower IDs of asongtoruin (aka me) saved as asongtoruin.txt
There is one possible issue, in that our pages of followers start from the newest first. This could (though I don't understand the API well enough to say with certainty) result in issues with our output dataset if new users are added between our calls, as we may both miss these users and end up with duplicates in our dataset. If duplicates become an issue, you could change return all_followers to return set(all_followers)
import time
from TwitterAPI import TwitterAPI
import requests
from requests_oauthlib import OAuth1
from urlparse import parse_qs
# application's key and key secret
ck = ''
cs = ''
# obtain request token
oauth = OAuth1(ck, cs)
r = requests.post(url='https://api.twitter.com/oauth/request_token', auth=oauth)
credentials = parse_qs(r.content)
request_key = credentials.get('oauth_token')[0]
request_secret = credentials.get('oauth_token_secret')[0]
#obtain authorization from twitter user
print('Visit this link to authorize the TweetBot:\n https://api.twitter.com/oauth/authorize?oauth_token=%s' % request_key)
verifier = raw_input('Enter your verification code: ')
# obtain access token
oauth = OAuth1(ck, cs, request_key, request_secret, verifier=verifier)
r = requests.get(url='https://api.twitter.com/oauth/access_token', auth=oauth)
credentials = parse_qs(r.content)
tk = credentials.get('oauth_token')[0]
ts = credentials.get('oauth_token_secret')[0]
# access TwitterAPI with the obtained access
api = TwitterAPI(ck, cs, tk, ts)
f = open('tweetbot.txt', 'rU')
for line in f:
r = api.request('statuses/update', {'status' : line})
print line,
print r.status_code
time.sleep(600)
I have this code but I want it to add multiple twitter accounts. I guess I'd need to loop the input and store them in different variables? Please give me the best solution. I'm not that advanced with python.
Alright #user3392493, what you need is a while loop, but with several if statements inside and a counter variable, so the variables are set based on what number the counter is at:
enoughaccounts = False
counter = 0
while not enoughaccounts:
counter += 1
if counter == 1:
# Do Everything
account1 = .....
if counter == 2:
# Do Everything
account2 = .....
if counter == 3:
# Do Everything
account3 = .....
if counter == 4:
# Do Everything
account4 = .....
moreaccounts = input('Add another account? (type yes or no)')
if moreaccounts == 'no' or moreaccounts == 'No' or moreaccounts == 'NO':
enoughaccounts = True
This should do what you want it to, if you need anything else just ask :)