How to use nordvpn servers as proxy for python requests? - python

I am trying to create a list of proxies using nordvpn for python requests, but I can't seem to figure the correct format to write the server as a proxy.
From what I understood the format is a such with this server as an example:
proxy = {
'http': "username:password#us6181.nordvpn.com",
'https': "username:password#us6181.nordvpn.com"
}
I have tried various combinations:
my login email and password
my nordvpn account username and password
I realize not all servers can be used as proxy so I made sure they are
I tried using udp/tcp instead of http/https
None of these attempts worked, and I really hope someone can tell me the proper way to do it.

Here is a simple script that i made:
import requests
from requests.auth import HTTPProxyAuth
import re
import random
#this is some proxy to use for nordvpn
#196.240.57.107
#37.120.217.219
up1 = ['username:password'
]
up2 = random.choice(up1)
u1 = re.findall(r'[\w]+:', up2)
p1 = re.findall(r':+[\w]+[\w]', up2)
u2 = str(u1)
u3 = u2.replace(':', '')
u3 = u3.replace('[', '')
u3 = u3.replace("'", '')
u3 = u3.replace(']', '')
p2 = str(p1)
p3 = p2.replace(':', '')
p3 = p3.replace('[', '')
p3 = p3.replace("'", '')
p3 = p3.replace(']', '')
proxies = {"http":"http://217.138.202.147"}
print(s)
auth = HTTPProxyAuth(u3, p3)
x = requests.get("http://ifconfig.me/ip")
print('Real ip: ' + x.text)
try:
r = requests.get("http://ipv4.icanhazip.com", proxies=proxies, auth=auth)
print(r.text)
except requests.exceptions.ProxyError:
proxies = {"https":"http://217.138.202.147"}
r = requests.get("http://ipv4.icanhazip.com/", proxies=proxies, auth=auth)
print(r.text)
some proxies won't work you had to test them
and a tester
import requests
from requests.auth import HTTPProxyAuth
import re
import random
list1 = []
def main():
c1 = random.randint(0,230)
c2 = str(c1)
c3 = c2.replace("'", '')
url = 'https://nordvpn.com/wp-admin/admin-ajax.php?action=servers_recommendations&filters={"country_id":' + c3 + '}'
headers = {
'accept': "application/json/",
'content-type': "application/json"
}
response = requests.request("GET", url, headers=headers)
rep1 = response.text
rep2 = re.findall(r'"ip":"[\d]+.[\d]+.[\d]+.[\d]+"', rep1)
rep3 = str(rep2)
if '[]' not in rep3:
rep4 = rep3.replace('"ip":"', '')
rep4 = rep4.replace("'", '')
rep4 = rep4.replace('"', '')
rep4 = rep4.replace(']', '')
rep4 = rep4.replace('[', '')
rep4 = rep4.replace(',', '')
rep5 = rep4.split()
for list2 in rep5:
list1.append(list2)
if '[]' in rep3:
main()
main()
for a in list1:
try:
prox = a
up1 = ['username:password'
]
up2 = random.choice(up1)
u1 = re.findall(r'[\w]+:', up2)
p1 = re.findall(r':+[\w]+[\w]', up2)
u2 = str(u1)
u3 = u2.replace(':', '')
u3 = u3.replace('[', '')
u3 = u3.replace("'", '')
u3 = u3.replace(']', '')
p2 = str(p1)
p3 = p2.replace(':', '')
p3 = p3.replace('[', '')
p3 = p3.replace("'", '')
p3 = p3.replace(']', '')
proxies = {"http":"http://" + prox}
auth = HTTPProxyAuth(u3, p3)
r = requests.get("http://ipv4.icanhazip.com", proxies=proxies, auth=auth)
if '<p>The following error was encountered while trying to retrieve the URL: <a' in r:
print(prox + ' Auth failed')
elif '<p>The following error was encountered while trying to retrieve the URL: <a' not in r:
print(prox + ' Good')
print('Your Ip: ' + r.text)
except requests.exceptions.ProxyError:
print(prox + ' Failed')
pass
Auth Failed means you had to use more username and passwords
Hope you enjoy it

Related

Using python to parse the getEmailActivityUserDetail report

I'm using a python script, to pull data from https://graph.microsoft.com. The output that is delivered duplicates itself (10) times for each parsed user. What step is missing to only capture the requested once?
import requests
import urllib
import json
import csv
import os
client_id = urllib.parse.quote_plus("#######################")
client_secret = urllib.parse.quote_plus("######################")
tenant = urllib.parse.quote_plus("#########################")
auth_uri = "https://login.microsoftonline.com/" + tenant + "/oauth2/v2.0/token"
auth_body = "grant_type=client_credentials&client_id=" + client_id + "&client_secret=" + client_secret + "&scope=https%3A%2F%2Fgraph.microsoft.com%2F.default"
authorization = requests.post(auth_uri, data=auth_body, headers={"Content-Type": "application/x-www-form-urlencoded"})
token = json.loads(authorization.content)['access_token']
graph_uri = "https://graph.microsoft.com/v1.0/reports/getEmailActivityUserDetail(period=%27D30%27)"
response = requests.get(graph_uri, data=auth_body, headers={'Content-Type': "application/json", 'Authorization': 'Bearer ' + token})
print(response.text)
temp_usr_list = [
'User.One#domain.com',
'User.Two#domain.com'
]
report_user_list = []
for line in response.iter_lines():
line_fields = line.decode("utf-8").split(',')
for entry in line_fields:
if len(entry) < 1:
continue
if line_fields[1] in temp_usr_list:
d = dict(
user_principle_name = line_fields[1],
send_count = line_fields[6],
recv_count = line_fields[7],
read_count = line_fields[8],
assigned_products = line_fields[9]
)
report_user_list.append(d)
print(report_user_list)
OUTPUT:
{'user_principle_name': 'User.One#domain.com', 'send_count': '0', 'recv_count': '0', 'read_count': '0', 'assigned_products': 'MICROSOFT'},...
{'user_principle_name': 'User.Two#domain.com', 'send_count': '0', 'recv_count': '0', 'read_count': '0', 'assigned_products': 'MICROSOFT'},...
Try running it once in this format and let me know if you get the same output.
import requests
import urllib
import json
import csv
import os
# Parms
client_id = urllib.parse.quote_plus('#######################')
client_secret = urllib.parse.quote_plus('######################')
tenant = urllib.parse.quote_plus('#########################')
auth_uri = 'https://login.microsoftonline.com/' + tenant \
+ '/oauth2/v2.0/token'
auth_body = 'grant_type=client_credentials&client_id=' + client_id \
+ '&client_secret=' + client_secret \
+ '&scope=https%3A%2F%2Fgraph.microsoft.com%2F.default'
authorization = requests.post(auth_uri, data=auth_body,
headers={'Content-Type': 'application/x-www-form-urlencoded'
})
token = json.loads(authorization.content)['access_token']
graph_uri = \
'https://graph.microsoft.com/v1.0/reports/getEmailActivityUserDetail(period=%27D30%27)'
response = requests.get(graph_uri, data=auth_body,
headers={'Content-Type': 'application/json',
'Authorization': 'Bearer ' + token})
print response.text
temp_usr_list = ['User.One#domain.com', 'User.Two#domain.com']
report_user_list = []
for line in response.iter_lines():
line_fields = line.decode('utf-8').split(',')
for entry in line_fields:
if len(entry) < 1:
continue
if line_fields[1] in temp_usr_list:
d = dict(user_principle_name=line_fields[1],
send_count=line_fields[6],
recv_count=line_fields[7],
read_count=line_fields[8],
assigned_products=line_fields[9])
report_user_list.append(d)
print report_user_list

pandas .loc stops working when imbedded in loop

import requests
import json
import pandas as pd
CSV_output_df = pd.read_csv('output/DEMO_CSV.csv', index_col=None)
payload = {}
headers = {
'Authorization': 'Basic ***********************************************************'
}
for index, row in CSV_output_df.iterrows():
Package_label = CSV_output_df.loc[index, "Package Id"]
licenseNumber = CSV_output_df.loc[index, "licenseNumber"]
Package_label = str(Package_label)
licenseNumber = str(licenseNumber)
url = ("https://api-mi.metrc.com/packages/v1/" + Package_label + "?licenseNumber=" + licenseNumber)
response = requests.request("GET", url, headers=headers, data=payload)
json_data = (response.text.encode('utf8'))
json_data = str(json_data)
json_data = (json_data.strip('b'))
json_data = (json_data.strip("'"))
json_data = (json_data.strip('{'))
json_data = (json_data.strip('}'))
json_data = (json_data.replace('"Item":{', ''))
json_data = (json_data.split(','))
json_data_df = pd.DataFrame(json_data)
Id = json_data_df.loc[0, 0]
Id = Id.replace('"Id":', '')
CSV_output_df.loc[index, "api_id"] = Id
for index, row in CSV_output_df.iterrows():
api_id = CSV_output_df.loc[index, "api_id"]
licenseNumber = CSV_output_df.loc[index, "licenseNumber"]
api_id = str(api_id)
licenseNumber = str(licenseNumber)
url0 = ("https://api-mi.metrc.com/labtests/v1/results?packageId=" + api_id + "&licenseNumber=" + licenseNumber)
response0 = requests.request("GET", url0, headers=headers, data=payload)
json_data0 = (response0.text.encode('utf8'))
json_data0 = str(json_data0)
json_data0 = (json_data0.strip('b'))
json_data0 = (json_data0.strip("'"))
json_data0 = (json_data0.strip('{'))
json_data0 = (json_data0.strip('}'))
json_data0 = (json_data0.strip('['))
json_data0 = (json_data0.strip(']'))
json_data0 = (json_data0.split(','))
json_data_df0 = pd.DataFrame(json_data0)
data_point = (json_data_df0.loc[1187, 0])
Python noobie here and 1st-time poster. So the issue is, below command not working in my for loop but is working as a standalone command.
data_point = (json_data_df0.loc[1187, 0])
The traceback log is telling me
ValueError: 1187 is not in range
but there are 1326 rows in json_data_df0 and all values except 0, 0 do not work in the loop.
I think you are supposed to use .iloc if you want to access the columns/rows using integer. .loc is for accessing columns/rows using the label.
For your reference: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html

Search Splunk API using python

What I am trying to do is perform a search on Splunk's API using python, I am able to get a session key but thats it. I'm new to both python and splunk so im a bit out-of-depth and any help would be really appreciated.
The error:
Traceback (most recent call last):
File "splunkAPI.py", line 31, in <module>
sid = minidom.parseString(r.text).getElementsByTagName('sid')[0].firstChild.nodeValue
IndexError: list index out of range
python:
import time # need for sleep
from xml.dom import minidom
import json, pprint
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
base_url = 'https://___________:8089'
username = '______'
password = '______'
search_query = "____________"
#-------------------------get session token------------------------
r = requests.get(base_url+"/servicesNS/admin/search/auth/login",
data={'username':username,'password':password}, verify=False)
session_key = minidom.parseString(r.text).getElementsByTagName('sessionKey')[0].firstChild.nodeValue
print ("Session Key:", session_key)
#-------------------- perform search -------------------------
r = requests.post(base_url + '/services/search/jobs/', data=search_query,
headers = { 'Authorization': ('Splunk %s' %session_key)},
verify = False)
sid = minidom.parseString(r.text).getElementsByTagName('sid')[0].firstChild.nodeValue
done = False
while not done:
r = requests.get(base_url + '/services/search/jobs/' + sid,
headers = { 'Authorization': ('Splunk %s' %session_key)},
verify = False)
response = minidom.parseString(r.text)
for node in response.getElementsByTagName("s:key"):
if node.hasAttribute("name") and node.getAttribute("name") == "dispatchState":
dispatchState = node.firstChild.nodeValue
print ("Search Status: ", dispatchState)
if dispatchState == "DONE":
done = True
else:
time.sleep(1)
r = requests.get(base_url + '/services/search/jobs/' + sid + '/results/',
headers = { 'Authorization': ('Splunk %s' %session_key)},
data={'output_mode': 'json'},
verify = False)
pprint.pprint(json.loads(r.text))
Hmm... that code looks awfully familiar :P Unfortunately, error checking wasn't that important when I wrote it.
The issue you see occurs if the search_query is not defined properly. It must start with search=. Also note that you need to include an initial search command if doing a standard Splunk search,
For example, search=search index=* will work, search=index=* will not work.
If you need to include quotes in your search string, I suggest you use something like the following format.
search_query = """search=search index=* "a search expression" | stats count"""
Tried this but did not give needed result not sure what is missing
import urllib
import httplib2 #import library
import json
import pprint
import time
import re
from xml.dom import minidom
searchquery = 'search index="movable_in" sourcetype="movable:in:assets" | stats avg(exposure_score)'
myhttp = httplib2.Http()
baseurl = 'https://xxxx.splunkxxx.com:8089'
usernamesp = 'xxxx'
passwordsp = 'xxxx'
def get_splunk_result(searchquery):
# Step 1: Get a session key
servercontent = myhttp.request(f'{baseurl}/services/auth/login', 'POST', headers={},
body=urllib.parse.urlencode({'username': usernamesp, 'password': passwordsp}))[1]
sessionkey = minidom.parseString(servercontent).getElementsByTagName('sessionKey')[0].childNodes[0].nodeValue
# print ("====>sessionkey: %s <====" % sessionkey)
sid = ''
# ------------------
if not searchquery.startswith('search'):
searchquery = f'search {searchquery}'
# Step 2: Get a sid with the search query
i = 0
while True:
time.sleep(1)
try:
searchjob = myhttp.request(f'{baseurl}/services/search/jobs', 'POST',
headers={F'Authorization': F'Splunk %s' % sessionkey},
body=urllib.parse.urlencode({'search': searchquery}))[1]
sid = minidom.parseString(searchjob).getElementsByTagName('sid')[0].childNodes[0].nodeValue
break
except:
i = i + 1
# print(i)
if (i > 30): break
# print("====>SID: %s <====" % sid)
# Step 3: Get search status
myhttp.add_credentials(usernamesp, passwordsp)
servicessearchstatusstr = '/services/search/jobs/%s/' % sid
isnotdone = True
while isnotdone:
searchstatus = myhttp.request(f'{baseurl}{servicessearchstatusstr}', 'GET')[1]
isdonestatus = re.compile('isDone">(0|1)')
strstatus = str(searchstatus)
isdonestatus = isdonestatus.search(strstatus).groups()[0]
if (isdonestatus == '1'):
isnotdone = False
# Step 4: Get the search result
services_search_results_str = '/services/search/jobs/%s/results?output_mode=json_rows&count=0' % sid
searchresults = myhttp.request(f'{baseurl}{services_search_results_str}', 'GET')[1]
searchresults = json.loads(searchresults)
# searchresults = splunk_result(searchresults)
return searchresults
output = get_splunk_result(searchquery)
print(output)

How to get real estate data with Idealista API?

I've been trying to use the API of the website Idealista (https://www.idealista.com/) to retrieve information of real estate data.
Since I'm not familiarized with OAuth2 I haven't been able to obtain the token so far. I have just been provided with the api key, the secret and some basic info of how to mount the http request.
I would appreciate an example (preferably in Python) of the functioning of this API, or else some more generic info about dealing with OAuth2 and Python.
After some days of research I came up with a basic python code to retrieve real estate data from the Idealista API.
def get_oauth_token():
http_obj = Http()
url = "https://api.idealista.com/oauth/token"
apikey= urllib.parse.quote_plus('Provided_API_key')
secret= urllib.parse.quote_plus('Provided_API_secret')
auth = base64.encode(apikey + ':' + secret)
body = {'grant_type':'client_credentials'}
headers = {'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8','Authorization' : 'Basic ' + auth}
resp, content = http_obj.request(url,method='POST',headers=headers, body=urllib.parse.urlencode(body))
return content
This function would return a JSON with the OAuth2 token and the session time in seconds. Afterwards, to query the API, it would be as simple as:
def search_api(token):
http_obj = Http()
url = "http://api.idealista.com/3.5/es/search?center=40.42938099999995,-3.7097526269835726&country=es&maxItems=50&numPage=1&distance=452&propertyType=bedrooms&operation=rent"
headers = {'Authorization' : 'Bearer ' + token}
resp, content = http_obj.request(url,method='POST',headers=headers)
return content
This time the we would find in the content var the data we were looking for, again as a JSON.
That can't be marked as correct answer since
auth = base64.encode(apikey + ':' + secret)
body = {'grant_type':'client_credentials'}
headers = {'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8','Authorization' : 'Basic ' + auth}
Will give you TypeError:
can only concatenate str (not "bytes") to str
Since base64encode returns a byte type object...
It's true Idealista API is very limited about documentation, but I think this is a better approach since I don't use unnecesary libs (Only native):
#first request
message = API_KEY + ":" + SECRET
auth = "Basic " + base64.b64encode(message.encode("ascii")).decode("ascii")
headers_dic = {"Authorization" : auth,
"Content-Type" : "application/x-www-form-urlencoded;charset=UTF-8"}
params_dic = {"grant_type" : "client_credentials",
"scope" : "read"}
r = requests.post("https://api.idealista.com/oauth/token",
headers = headers_dic,
params = params_dic)
This works flawless with only python requests and base64 module...
regards
This is my code, improving #3... this run ok! for me!!!!
only put your apikey and your password (secret)...
import pandas as pd
import json
import urllib
import requests as rq
import base64
def get_oauth_token():
url = "https://api.idealista.com/oauth/token"
apikey= 'your_api_key' #sent by idealista
secret= 'your_password' #sent by idealista
auth = base64.b64encode(apikey + ':' + secret)
headers = {'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8' ,'Authorization' : 'Basic ' + auth}
params = urllib.urlencode({'grant_type':'client_credentials'})
content = rq.post(url,headers = headers, params=params)
bearer_token = json.loads(content.text)['access_token']
return bearer_token
def search_api(token, url):
headers = {'Content-Type': 'Content-Type: multipart/form-data;', 'Authorization' : 'Bearer ' + token}
content = rq.post(url, headers = headers)
result = json.loads(content.text)['access_token']
return result
country = 'es' #values: es, it, pt
locale = 'es' #values: es, it, pt, en, ca
language = 'es' #
max_items = '50'
operation = 'sale'
property_type = 'homes'
order = 'priceDown'
center = '40.4167,-3.70325'
distance = '60000'
sort = 'desc'
bankOffer = 'false'
df_tot = pd.DataFrame()
limit = 10
for i in range(1,limit):
url = ('https://api.idealista.com/3.5/'+country+'/search?operation='+operation+#"&locale="+locale+
'&maxItems='+max_items+
'&order='+order+
'&center='+center+
'&distance='+distance+
'&propertyType='+property_type+
'&sort='+sort+
'&numPage=%s'+
'&language='+language) %(i)
a = search_api(get_oauth_token(), url)
df = pd.DataFrame.from_dict(a['elementList'])
df_tot = pd.concat([df_tot,df])
df_tot = df_tot.reset_index()
I found some mistakes. At least, I cannot run it.
I believe, I improved with this:
import pandas as pd
import json
import urllib
import requests as rq
import base64
def get_oauth_token():
url = "https://api.idealista.com/oauth/token"
apikey= 'your_api_key' #sent by idealist
secret= 'your_password' #sent by idealista
apikey_secret = apikey + ':' + secret
auth = str(base64.b64encode(bytes(apikey_secret, 'utf-8')))[2:][:-1]
headers = {'Authorization' : 'Basic ' + auth,'Content-Type': 'application/x-www-form-
urlencoded;charset=UTF-8'}
params = urllib.parse.urlencode({'grant_type':'client_credentials'}) #,'scope':'read'
content = rq.post(url,headers = headers, params=params)
bearer_token = json.loads(content.text)['access_token']
return bearer_token
def search_api(token, URL):
headers = {'Content-Type': 'Content-Type: multipart/form-data;', 'Authorization' : 'Bearer ' + token}
content = rq.post(url, headers = headers)
result = json.loads(content.text)
return result

Twitter Search API using urllib2

I am beginner to API calls using python (or even just API calls). I am trying a basic call with the Twitter API.
My Code for generating oauth_signature is as follows :
def getSignature(query):
key_dict['q'] = urllib.quote(query, '')
finKey = ""
for key in sorted(key_dict.keys()):
finKey += key + "="+key_dict[key]+"&"
finKey = finKey[:-1]
finKey = HTTP_METHOD + "&" + urllib.quote(BASE_URL, '') + "&" + urllib.quote(finKey, '')
key = urllib.quote(CONSUMER_SECRET_KEY, '')+"&"+urllib.quote(ACCESS_TOKEN_SECRET, '')
hashed = hmac.new(key, finKey, sha1)
finKey = binascii.b2a_base64(hashed.digest())
key_dict['oauth_signature'] = urllib.quote(finKey, '')
where key_dict stores all the keys :
key_dict = dict()
key_dict['oauth_consumer_key'] = urllib.quote(CONSUMER_KEY, '')
key_dict['oauth_nonce'] = urllib.quote("9ab59691142584g739134971f75aa986", '')
key_dict['oauth_signature_method'] = urllib.quote("HMAC-SHA1", '')
key_dict['oauth_timestamp'] = urllib.quote(str(int(time.time())), '')
key_dict['oauth_token'] = urllib.quote(ACCESS_TOKEN, '')
key_dict['oauth_version'] = urllib.quote(OAUTH_VERSION, '')
BASE_URL = "https://api.twitter.com/1.1/search/tweets.json?" + urllib.quote("q=delhi+elections", '')
I generate the Base Header String using the following :
def getHeaderString():
ret = "OAuth "
key_list =['oauth_consumer_key', 'oauth_nonce', 'oauth_signature', 'oauth_signature_method', 'oauth_timestamp', 'oauth_token', 'oauth_version']
for key in key_list:
ret = ret+key+"=\""+key_dict[key]+"\", "
ret = ret[:-2]
return ret
Although when I am making the call, I get :
urllib2.HTTPError: HTTP Error 401: Unauthorized
OR
urllib2.URLError: <urlopen error [Errno 60] Operation timed out>
My final request is made using the following :
getSignature("delhi+elections")
headers = { 'Authorization' : getHeaderString()}
req = urllib2.Request(BASE_URL, headers= headers)
response = urllib2.urlopen(req)
Where am I going wrong ?
Few Points that should have been mentioned somewhere :
The method : binascii.b2a_base64(hashed.digest()) appends a new line feed at the end of the string. This cause the oauth_signature to fail Authenticate.
The delhi+elections is actually supposed to be delhi elections. This mismatch again made the Hash Value match in sha1 to fail.
Removing both of them solved the problem.
The final Code :
key_dict = dict()
key_dict['oauth_consumer_key'] = urllib.quote(CONSUMER_KEY, '')
key_dict['oauth_nonce'] = urllib.quote("9aa39691142584s7df134971375aa986", '')
key_dict['oauth_signature_method'] = urllib.quote("HMAC-SHA1", '')
key_dict['oauth_timestamp'] = urllib.quote(str(int(time.time())), '')
key_dict['oauth_token'] = urllib.quote(ACCESS_TOKEN, '')
key_dict['oauth_version'] = urllib.quote(OAUTH_VERSION, '')
BASE_URL = "https://api.twitter.com/1.1/search/tweets.json"
def getSignature(query):
key_dict['q'] = urllib.quote(query, '')
finKey = ""
for key in sorted(key_dict.keys()):
finKey += key + "="+key_dict[key]+"&"
finKey = finKey[:-1]
finKey = HTTP_METHOD + "&" + urllib.quote(BASE_URL, '') + "&" + urllib.quote(finKey, '')
key = urllib.quote(CONSUMER_SECRET_KEY, '')+"&"+urllib.quote(ACCESS_TOKEN_SECRET, '')
hashed = hmac.new(key, finKey, sha1)
finKey = binascii.b2a_base64(hashed.digest())[:-1]
key_dict['oauth_signature'] = urllib.quote(finKey, '')
def getHeaderString():
ret = "OAuth "
key_list =['oauth_consumer_key', 'oauth_nonce', 'oauth_signature', 'oauth_signature_method', 'oauth_timestamp', 'oauth_token', 'oauth_version']
for key in key_list:
ret = ret+key+"=\""+key_dict[key]+"\", "
ret = ret[:-2]
return ret
url = BASE_URL
getSignature("delhi elections")
headers = { 'Authorization' : getHeaderString()}
values = {'q':'delhi elections'}
data = urllib.urlencode(values)
req = urllib2.Request(url+"?"+data, headers= headers)
response = urllib2.urlopen(req)
the_page = response.read()
print the_page
Instead of coding your own client, have you tried using tweepy? For a reference implementation using this library, you can check twitCheck client.

Categories