I'm trying to make a call to the xmatter api to pull information using http basicauth
its working fine with my vpn turned off, but when I'm connected to the company vpn, I'm getting the below error:
ConnectionError: ('Connection aborted.', ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None))
I've also tried running in a loop and running after sometime, but no luck
import requests
from requests.auth import HTTPBasicAuth
import json
base_URL = "https://acmeco.xmatters.com/api/xm/1"
groupId = "954ada78-7b89-4356-b02c-df85ff30dfd2"
limit = "2"
membersPerShift = "5"
endpoint_URL = (
"/on-call?groups="
+ groupId
+ "&offset=0&limit="
+ limit
+ "&membersPerShift="
+ membersPerShift
+ "&embed=shift,members.owner"
)
url = base_URL + endpoint_URL
print("Sending request to url: " + url)
auth = HTTPBasicAuth("username", "password")
response = requests.get(url, auth=auth)
responseCode = response.status_code
if responseCode == 200:
rjson = response.json()
for d in rjson.get("data"):
print(
"Found shift with name: "
+ d["shift"]["name"]
+ " and id: "
+ d["shift"]["id"]
)
for md in d["members"]["data"]:
print(
"\tFound member with targetName: "
+ md["member"]["targetName"]
+ ", id: "
+ md["member"]["id"]
+ ", and recipientType: "
+ md["member"]["recipientType"]
)
else:
rjson = response.json()
print(
"The request did not succeed. Response code is: "
+ str(responseCode)
+ "\n"
+ json.dumps(rjson, indent=4, sort_keys=False)
)
Related
I need to validate services and their dependencies using around 500+ URL's and I already have a python code that does it.
But the problem is that some of the URL's take a minute each to give a response [due to some known dependencies being down].
As each URL is hosted on a different server, is there a way to access multiple URL's at once using the requests module?
Below is my entire code I use in pycharm:
import requests
import json
import pandas
import datetime
from requests.auth import HTTPBasicAuth
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
def validate():
line = "---------------------------------------------------------------------------------------------------"
print("Validation started for:", datetime.datetime.now().strftime("%d-%B-%Y at %H:%M:%S"), "\n" + line)
username = 'username'
password = 'password'
mydata = pandas.read_excel(r'C:\mydata.xlsx', sheet_name='Sheet1')
for i in mydata.index:
srno = str(mydata['Sr No'][i])
service = mydata['Service Name'][i]
machine = mydata['Machine Name'][i]
url = mydata['Node'][i]
alwaysdownservice = ['service1', 'service2']
paydown = ['dependency1', 'dependency2', 'dependency3']
otherdown = ['dependency3']
def get():
response = requests.get(url, verify=False, auth=HTTPBasicAuth(username, password))
data = json.loads(response.text)
status = data['Success']
if not status:
response = requests.get(url, verify=False, auth=HTTPBasicAuth(username, password))
data = json.loads(response.text)
status = data['Success']
if not status:
for j in list(data['Dependencies']):
dependency = j['DependencyName']
d_status = j['Success']
if not d_status:
if service in alwaysdownservice:
if dependency not in paydown:
print(Dependency, "down on", machine, "for", service.)
else:
if dependency not in otherdown:
print(Dependency, "down on", machine, "for", service.)
try:
get()
except Exception as e:
print(line, "\n", e, "\n", srno, "| Below URL is not accessible: \n", url, "\n" + line)
validate()
You can use threads (using the threading library in Python) to call multiple URL's at once. To do that you can use the following code:
import requests
import json
import pandas
import datetime
from requests.auth import HTTPBasicAuth
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import threading
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
threads = []
def validate():
line = "---------------------------------------------------------------------------------------------------"
print("Validation started for:", datetime.datetime.now().strftime("%d-%B-%Y at %H:%M:%S"), "\n" + line)
username = 'username'
password = 'password'
mydata = pandas.read_excel(r'C:\mydata.xlsx', sheet_name='Sheet1')
for i in mydata.index:
srno = str(mydata['Sr No'][i])
service = mydata['Service Name'][i]
machine = mydata['Machine Name'][i]
url = mydata['Node'][i]
alwaysdownservice = ['service1', 'service2']
paydown = ['dependency1', 'dependency2', 'dependency3']
otherdown = ['dependency3']
def get():
response = requests.get(url, verify=False, auth=HTTPBasicAuth(username, password))
data = json.loads(response.text)
status = data['Success']
if not status:
response = requests.get(url, verify=False, auth=HTTPBasicAuth(username, password))
data = json.loads(response.text)
status = data['Success']
if not status:
for j in list(data['Dependencies']):
dependency = j['DependencyName']
d_status = j['Success']
if not d_status:
if service in alwaysdownservice:
if dependency not in paydown:
print(Dependency, "down on", machine, "for", service)
else:
if dependency not in otherdown:
print(Dependency, "down on", machine, "for", service)
try:
t = threading.Thread(target=get) # Using threading over here
t.start()
threads.append(t)
except Exception as e:
print(line, "\n", e, "\n", srno, "| Below URL is not accessible: \n", url, "\n" + line)
validate()
for thread in threads:
thread.join()
For people who need the solution. I found this from #Yurii Kramarenko. Which worked perfectly and now my script finishes its run in 30 seconds instead of 10-11 minutes.
My Script -
def validate():
alwaysdownservice = ['service1', 'service2']
paydown = ['dependency1', 'dependency2', 'dependency3']
otherdown = ['dependency3']
username = 'username'
password = 'password'
mydata = pandas.read_excel(r'C:\mydata.xlsx', sheet_name='Sheet1')
urls = mydata['urls']
line = "---------------------------------------------------------------------------------------------------"
print("Validation started for:", datetime.datetime.now().strftime("%d-%B-%Y at %H:%M:%S"), "\n" + line)
async def fetch(session, url):
async with session.get(url, auth=aiohttp.BasicAuth(username, password), ssl=False) as response:
data = await response.text()
data = json.loads(data)
status = data['Success']
if not status:
for j in list(data['Dependencies']):
dependency = j['DependencyName']
d_status = j['Success']
if not d_status:
if service in alwaysdownservice:
if dependency not in paydown:
print("Dependency -",
"\'" + dependency + "\'", "down on", "\nURL -", url, "\n" + line)
else:
if dependency not in otherdown:
("Dependency -",
"\'" + dependency + "\'", "down on", "\nURL -", url, "\n" + line)
print(url, "validated at:", datetime.datetime.now().strftime("%H:%M:%S"))
async def fetch_all(urls, loop):
async with aiohttp.ClientSession(loop=loop) as session:
results = await asyncio.gather(*[fetch(session, url) for url in urls], return_exceptions=True)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
htmls = loop.run_until_complete(fetch_all(urls, loop))
print("Validation completed for:",
datetime.datetime.now().strftime("%d-%B-%Y at %H:%M:%S"), "\n" + line, "\n" + line,)
validate()
-I found the problem!- In function SendMessage I was using UserID (with capital letters) instead of userid (which was the actual parameter passed to each thread). So Python printed the UserID of the for cycle instead of the "individual" userid passed to the different functions. It was only a logging problem, the program sent messages correctly.
I have a for that loops through the elements of a user's list. Each iteration, I would like to start a separate background thread to send a message to that user. By saying "send a message" I mean a simple POST request made using the requests Python lib. At the end of the thread, an output on the console is written. Every 24 requests (so every 24 threads) the app needs to stop for about a second.
Success = 0
Bounces = 0
def SendMessage(botid, token, userid, messageid, tag):
global Success
global Bounces
try:
payload = {...}
r = requests.post("...", params=payload, headers=head, timeout=2)
#problem with request?
pjson = json.loads(r.text)
if r.status_code != 200:
log(str(r.status_code) + " " + pjson["result"] + " UserID: " + UserID + "; URL: " + "..." + BotID + "/users/" + UserID + "/send; Params: " + str(payload))
Bounces += 1
return
Success += 1
return
except requests.exceptions.Timeout:
#wait for connection to be available again!
while not conn_available():
print("... Waiting for a new connection...")
time.sleep(10)
log("Request timed out. UserID: " + UserID + "; URL: " + "..." + BotID + "/users/" + UserID + "/send; Params: " + str(payload))
Bounces += 1
except requests.exceptions.ConnectionError:
log("Unable to connect. UserID: " + UserID + "; URL: " + "..." + BotID + "/users/" + UserID + "/send; Params: " + str(payload))
Bounces += 1
except requests.exceptions.HTTPError:
log("Invalid request. UserID: " + UserID + "; URL: " + "..." + BotID + "/users/" + UserID + "/send; Params: " + str(payload))
Bounces += 1
except requests.exceptions.RequestException:
log("Invalid request. UserID: " + UserID + "; URL: " + "..." + BotID + "/users/" + UserID + "/send; Params: " + str(payload))
Bounces += 1
while True:
newMsgsReq = ""
try:
#Check for new messages
newMsgsReq = requests.get("...", timeout=2)
if newMsgsReq.text == "false":
#exit sub
time.sleep(2)
continue
except requests.exceptions.HTTPError as errh:
log("Request has failed: There was an error in the request: [" + str(errh) + "]")
time.sleep(2)
continue
except requests.exceptions.ConnectionError as errc:
log("Request has failed: check internet connection & retry: [" + str(errc) + "]")
time.sleep(2)
continue
except requests.exceptions.Timeout as errt:
log("Request has failed: check internet connection & retry: [" + str(errt) + "]")
time.sleep(2)
continue
except requests.exceptions.RequestException as err:
log("Request has failed: There was an error in the request: [" + str(err) + "]")
time.sleep(2)
continue
#we have a message!!!
#Extract BotID, Token, MessageID
msgInf = newMsgsReq.text.split("|")
MessageID = msgInf[0]
BotID = msgInf[1]
Token = msgInf[2]
Tag = msgInf[3]
del msgInf[0:4]
suc("New message found: " + str(MessageID))
suc("Total recipients: " + str(len(msgInf)))
#Begin send!
Cycles = 0
TotCycles = 0
#Loop through msgInf
for UserID in msgInf:
#Create the thread.
process = threading.Thread(target=SendMessage, args=[BotID, Token, UserID, MessageID, Tag])
process.start()
TotCycles += 1
pb.print_progress_bar(TotCycles)
Cycles += 1
if Cycles == 24:
time.sleep(1)
Cycles = 0
suc("Message " + str(MessageID) + " sent successfully (" + str(Success) + " success, " + str(Bounces) + " bounces")
Success = 0
Bounces = 0
time.sleep(3)
Let's say my user list is:
{1, 2, 3, 4, ..., 24, 25, ...}. I expect my application to output:
1. Message 1 sent successfully...
2. Message 2 sent successfully...
...
24. Message 24 sent successfully.
Instead, I am getting this output:
1. Message 1 sent successfully.
2. Message 1 sent successfully.
...
24. Message 1 sent successfully.
So all the 24 outputs are related to the first of the 24 ids. It seems like the for loop does not proceed...
This prints the incremented counter without any trouble so I think you may need to provide all of the code and some sample input.
import threading
import time
def SendMessage(userid):
print(userid)
while True:
cycles = 1
for user_id in [1, 2, 3]:
process = threading.Thread(target=SendMessage, args=[user_id])
process.start()
cycles += 1
if cycles == 24:
time.sleep(1)
cycles = 0
time.sleep(3)
Run it on repl.it
So I am trying to request a definition from oxford dictionary through their API and when the user type !define (users word) the bot returns the definition. I am having a few issues see the code below.
#client.command()
async def define(word_id):
app_id = '************'
app_key = '***********'
language = 'en'
url = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/' +
language + '/' + word_id.lower()
r = requests.get(url, headers={'app_id': app_id, 'app_key': app_key})
await client.say("The definition is " + ("text \n" + r.text))
The error I'm getting is as follows:
discord.errors.HTTPException: BAD REQUEST (status code: 400) + discord.ext.commands.errors.CommandInvokeError: Command raised an exception: HTTPException: BAD REQUEST (status code: 400)
Here is what they expect me to use:
import requests
import json
# TODO: replace with your own app_id and app_key
app_id = '****'
app_key = '******'
language = 'en'
word_id = 'Ace'
url = 'https://od-api.oxforddictionaries.com:443/api/v1/entries/' +
language + '/' + word_id.lower()
r = requests.get(url, headers = {'app_id': app_id, 'app_key':
app_key})
print("code {}\n".format(r.status_code))
print("text \n" + r.text)
print("json \n" + json.dumps(r.json()))
I am trying to remove host from Cloudera Manager 5.3.2 using cm_api 9.0.0
I tried
api.get_cloudera_manager().hosts_decommission([host])
api.get_cluster("cluster").remove_host(host)
but got error for remove_host()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python2.6/site-packages/cm_api/endpoints/clusters.py", line 218, in remove_host
return self._delete("hosts/" + hostId, ApiHostRef, api_version=3)
File "/usr/lib/python2.6/site-packages/cm_api/endpoints/types.py", line 352, in _delete
api_version)
File "/usr/lib/python2.6/site-packages/cm_api/endpoints/types.py", line 380, in _call
api_version)
File "/usr/lib/python2.6/site-packages/cm_api/endpoints/types.py", line 139, in call
ret = method(path, params=params)
File "/usr/lib/python2.6/site-packages/cm_api/resource.py", line 124, in delete
return self.invoke("DELETE", relpath, params)
File "/usr/lib/python2.6/site-packages/cm_api/resource.py", line 63, in invoke
headers=headers)
File "/usr/lib/python2.6/site-packages/cm_api/http_client.py", line 161, in execute
raise self._exc_class(ex)
cm_api.api_client.ApiException: ip-10-0-8-187.ec2.internal still has roles assigned to it. (error 400)
What is right sequence to remove host from Cluster or Is there one command in cm_api to do this ?
Thanks
This python script helps to remove the hosts from the cluster. The following are the steps:
stop and decommission all roles in a host
remove the roles from a host
identify and delete the roles one by one
remove host from a cluster
remove host from cloudera manager
This script removes the hosts from the cloudera managed cluster running in aws. It is intend to scale down the worker node(node manager role) and gateway role from the cluster once there is no resource demand.
You can change the script accordingly based on your environment.
#!/bin/python
import httplib2
import os
import requests
import json
import boto3
import time
from requests.auth import HTTPBasicAuth
os.environ["AWS_ACCESS_KEY_ID"] = "ACCESS_KEY"
os.environ["AWS_SECRET_ACCESS_KEY"] = "SECRET_ACCESS_KEY"
os.environ["AWS_DEFAULT_REGION"] = "us-east-1"
region='us-east-1'
metadata = requests.get(url='http://169.254.169.254/latest/meta-data/instance-id')
instance_id = metadata.text
host = requests.get(url='http://169.254.169.254/latest/meta-data/hostname')
host_id = host.text
username='admin'
password='admin'
cluster_name='cluster001'
scm_protocol='http'
scm_host='host.compute-1.amazonaws.com'
scm_port='7180'
scm_api='v17'
client = boto3.client('autoscaling')
ec2 = boto3.client('autoscaling', region_name=region)
response = client.describe_auto_scaling_instances(InstanceIds=[instance_id,])
state = response['AutoScalingInstances'][0]['LifecycleState']
print "vm is in " + state
if state == 'Terminating:Wait':
print "host decommision started"
##decommission host
service_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/cm/commands/hostsDecommission'
#service_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/cm/hostsRecommission'
#service_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/cm/commands/hostsStartRoles'
print service_url
headers = {'content-type': 'application/json'}
req_body = { "items":[ host_id ]}
print req_body
req = requests.post(url=service_url, auth=HTTPBasicAuth(username, password), data=json.dumps(req_body), headers=headers)
print req.text
time.sleep(120)
##delete roles in a host
api_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/hosts/' + host_id
req = requests.get(api_url, auth=HTTPBasicAuth(username, password))
a = json.loads(req.content)
for i in a['roleRefs']:
scm_uri='/api/' + scm_api + '/clusters/' + cluster_name + '/services/'+i['serviceName']+'/roles/'+i['roleName']
scm_url = scm_protocol + '://' + scm_host + ':' + scm_port + scm_uri
print scm_url
req = requests.delete(scm_url, auth=HTTPBasicAuth(username, password))
print req.text
time.sleep(10)
##remove host from cluster
service_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/clusters/' + cluster_name + '/hosts/' + host_id
print service_url
req = requests.delete(service_url, auth=HTTPBasicAuth(username, password))
time.sleep(10)
##remove host from cloudera manager
os.system("/etc/init.d/cloudera-scm-agent stop")
service_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/hosts/' + host_id
print service_url
req = requests.delete(service_url, auth=HTTPBasicAuth(username, password))
print req.text
time.sleep(10)
##refresh cluster configuration
service_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/clusters/' + 'commands/refresh'
print service_url
req = requests.post(service_url, auth=HTTPBasicAuth(username, password))
print req.text
time.sleep(10)
##deploy client configuration
service_url = scm_protocol + '://' + scm_host + ':' + scm_port + '/api/' + scm_api + '/clusters/' + 'commands/deployClientConfig'
print service_url
req = requests.post(service_url, auth=HTTPBasicAuth(username, password))
print req.text
time.sleep(10)
I am trying to build a PoC with the API REST of Copy but I have a problem when I try to get the ACCESS TOKEN:
Message: oauth_problem=signature_invalid&debug_sbs=GET&https%3A%2F%2Fapi.copy.com%...
#app.route('/get_access_token')
def get_access_token():
print "Get Access Token"
oauth_verifier = request.args['oauth_verifier']
oauth_token = request.args['oauth_token']
print oauth_token + " & " + oauth_verifier
# Create your consumer with the proper key/secret.
consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)
print "Consumer: ", consumer
client = oauth.Client(consumer)
url = access_url + "?oauth_verifier=%s&oauth_token=%s" % (oauth_verifier, oauth_token)
print url
resp, content = client.request(url, "GET")
print "Resp: ", resp
print "Content: ", content
return content
I would appreciate any help.
I have been able to solve my own issue. The problem was the creation of a new consumer (I had one for the first step of the oauth handshake) and not using the oauth.Token provided by the library (I put the oauth_verifier and the oauth_token with a workaround)
The solution:
#app.route('/get_access_token')
def get_access_token():
print "Get Access Token"
try:
oauth_verifier = request.args['oauth_verifier']
oauth_token = request.args['oauth_token']
print oauth_token + " & " + oauth_verifier
token = oauth.Token(oauth_token, request_token_secret) # request_token_secret is global
token.set_verifier(oauth_verifier)
client = oauth.Client(consumer, token) #consumer is global
url = "https://api.copy.com/oauth/access"
resp, content = client.request(url, "GET")
print "Resp: ", resp
print "Content: ", content
return content
except Exception as e:
return e.message()