I have a problem with a simple python script which triggers a jenkins job and returns intermediate results while the jenkins job is running. The script sleeps, checks jenkins again and prints some info to a web browser output.
This generally works fine, but on Firefox 90.0.2, I do not get anything but a blank page until the job is done (the job gets triggered though).
On edge browser (vers 91.0.864.70) everything works as expected.
Here are relevant parts of my python script:
#!path/python.exe
import requests
import time
import functools
print = functools.partial(print, flush=True)
jenkins_url = "https://xxxx"
auth = ("user", "password")
job_name = "job"
request_url = "{0:s}/job/{1:s}/build?token=tokenName".format(
jenkins_url,
job_name,
)
print("Content-Type: text/html\n")
print("Determining next build number<br>")
job = requests.get(
"{0:s}/job/{1:s}/api/json".format(
jenkins_url,
job_name,
),
auth=auth,
).json()
next_build_number = job['nextBuildNumber']
next_build_url = "{0:s}/job/{1:s}/{2:d}/api/json".format(
jenkins_url,
job_name,
next_build_number,
)
print("Triggering build: {0:s} #{1:d}<br>".format(job_name, next_build_number))
response = requests.post(request_url, auth=auth)
print("Job triggered successfully<br>")
while True:
print("Querying Job current status...<br>")
try:
build_data = requests.get(next_build_url, auth=auth).json()
except ValueError:
print("No data, build still in queue<br>")
print("Sleep for 20 sec<br>")
time.sleep(20)
continue
print("Building: {0}<br>".format(build_data['building']))
building = build_data['building']
if building is False:
break
else:
print("Sleep for 60 sec<br>")
time.sleep(60)
print("Job finished with status: {0:s}<br>".format(build_data['result']))
Any suggestions or hints are greatly appreciated!
Thanks
Related
I fount a lot of How to start Python at Windows Startup but maybe I have something different here...
I made a Task Schedulation at Windows Startup (I want it before User loggin in) with a .vbs
The VBS run correctly, but the .py file doesn't run.
The VBS log to FILE.log correctly, but the Python script doesn't log anything (now I removed log).
It isn't called...
I tried to modify the VBS to have full path to python and to my script, but notting changed... the python script look like is not executed.
Why?
VBS file:
MyLog = "C:\Users\myUser\Desktop\FILE.log"
Set fso = CreateObject("Scripting.FileSystemObject")
if fso.FileExists(MyLog) = false then
Set MyOutputLog = FSO.CreateTextFile(MyLog)
else
Set MyOutputLog = FSO.OpenTextFile(MyLog, 8, True)
end if
MyOutputLog.writeline Cstr(now()) & " - VBS is ok"
MyOutputLog.close()
Set oShell = CreateObject ("WScript.Shell")
oShell.run "python.exe ./myScript.py"
alternative bat file:
cd C:\Users\myUser\Desktop\
C:\Users\myUser\AppData\Local\Microsoft\WindowsApps\PythonSoftwareFoundation.Python.3.9_qbxxxxxx8p0\python.exe "C:/Users/myUser/Desktop/myscript.py"
Python file:
import socket
import time
import json
import requests
TOKEN = "<my-bot-token>"
URL = "https://api.telegram.org/bot{}/".format(TOKEN)
CHAT = 111111111 # my chat ID
TEXT = "my message"
def get_IPpubl():
endpoint = 'https://ipinfo.io/json'
response = requests.get(endpoint, verify = True)
if response.status_code != 200:
print('Status:', response.status_code, 'Problem with the request. Exiting.')
exit()
data = response.json()
return(data['ip'])
#check if DDNS is updated
def check_ddns():
IPddns = socket.gethostbyname('my.dynamic.dns')
IPpubl = get_IPpubl()
if IPddns == IPpubl:
return True
return False
#check if I'm online
def test_connection():
try:
socket.create_connection(('google.com', 80))
return True
except OSError:
return False
#functions for Telegram Bot
def get_url(url):
response = requests.get(url)
content = response.content.decode("utf8")
return content
def get_json_from_url(url):
content = get_url(url)
js = json.loads(content)
return js
def get_updates():
url = URL + "getUpdates"
js = get_json_from_url(url)
return js
def get_last_chat_id_and_text(updates):
num_updates = len(updates["result"])
last_update = num_updates - 1
text = updates["result"][last_update]["message"]["text"]
chat_id = updates["result"][last_update]["message"]["chat"]["id"]
return (text, chat_id)
def send_message(text, chat_id):
url = URL + "sendMessage?text={}&chat_id={}&parse_mode={}".format(text, chat_id, 'html')
get_url(url)
#try connection 300 times, after check DDNS and send message to Telegram
for _ in range(300):
if(test_connection()):
if(check_ddns()):
TEXT = TEXT + "DDNS OK"
else:
TEXT = TEXT + "DDNS need an update"
send_message(TEXT, CHAT)
break
time.sleep(5)
Task scheduler is enabled, because the VBS run...
However what I list what I select:
[General]
Account: myuser
Run whether user is logged on or not
Run with highest
privileges
[Triggers]
At Windows Startup
[Actions]
Start a program: myVBS.vbs (try also with /c start "C:\path_of_vbs_and_py"
The code in my program essentially conducts 10 python requests simultaneously and processes their output also simultaneously, it was working for a while but I changed something and can't work out what broke it.
The following code is the code that calls, the code appears to freeze between lines 3 and 4, so in the process of doing the multithreaded requests.
The line 'print("failed to close") does not print, appearing to indicate that the program does not reach the pool.close() instruction.
listoftensites = listoftensites
pool = Pool(processes=10) # Initalize a pool of 10 processes
listoftextis, listofonline = zip(*pool.map(onionrequestthreaded, listoftensites)) # Use the pool to run the function on the items in the iterable
print("failed to close ")
pool.close()
# this means that no more tasks will be added to the pool
pool.join()
The function which is called at which it hangs, is immediately after the line 'print("failed in return")', this would appear to indicate that the requests do not terminate properly and return the expected values.
def onionrequestthreaded(onionurl):
session = requests.session()
session.proxies = {}
session.proxies['http'] = 'socks5h://localhost:9050'
session.proxies['https'] = 'socks5h://localhost:9050'
onionurlforrequest = "http://" + onionurl
#print(onionurlforrequest)
print("failed with proxy session")
try:
print("failed in request")
r = session.get(onionurlforrequest, timeout=15, allow_redirects=True)
online = 2
print("failed in text extraction")
textis = r.text
except:
print("failed in except")
#print("failed")
online = 1
textis = ""
print("failed in return")
return textis, online
Very confusing but i'm probably doing something simple. Please let me know if there's a solution to this as i'm pulling my hair out.
I am trying to do stress test on a server using Python 3. The idea is to send an HTTP request to the API server every 1 second for 30 minutes. I tried using requests and apscheduler to do this but I kept getting
Execution of job "send_request (trigger: interval[0:00:01], next run at: 2017-05-23 11:05:46 EDT)"
skipped: maximum number of running instances reached (1)
How can I make this work? Below is my code so far:
import requests, json, time, ipdb
from apscheduler.schedulers.blocking import BlockingScheduler as scheduler
def send_request():
url = 'http://api/url/'
# Username and password
credentials = { 'username': 'username', 'password': 'password'}
# Header
headers = { 'Content-Type': 'application/json', 'Client-Id': 'some string'}
# Defining payloads
payload = dict()
payload['item1'] = 1234
payload['item2'] = 'some string'
data_array = [{"id": "id1", "data": "some value"}]
payload['json_data_array'] = [{ "time": int(time.time()), "data": data_array]
# Posting data
try:
request = requests.post(url, headers = headers, data = json.dumps(payload))
except (requests.Timeout, requests.ConnectionError, requests.HTTPError) as err:
print("Error while trying to POST pid data")
print(err)
finally:
request.close()
print(request.content)
return request.content
if __name__ == '__main__':
sched = scheduler()
print(time.time())
sched.add_job(send_request, 'interval', seconds=1)
sched.start()
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
try:
# This is here to simulate application activity (which keeps the main thread alive).
while true:
pass
except (KeyboardInterrupt, SystemExit):
# Not strictly necessary if daemonic mode is enabled but should be done if possible
scheduler.shutdown()
I tried searching on stack overflow but none of the other questions does what I want so far, or maybe I missed something. I would appreciate someone to point me to the correct thread if that is the case. Thank you very much!
I think your error is described well by the duplicate that I marked as well as the answer by #jeff
Edit: Apparently not.. so here I'll describe how to fix the maximum instances problem:
Maximum instances problem
When you're adding jobs to the scheduler there is an argument you can set for the number of maximum allowed concurrent instances of the job. You can should read about this here:
BaseScheduler.add_job()
So, fixing your problem is just a matter of setting this to something higher:
sch.add_job(myfn, 'interval', seconds=1, max_instances=10)
But, how many concurrent requests do you want? If they take more than one second to respond, and you request one per second, you will always eventually get an error if you let it run long enough...
Schedulers
There are several scheduler options available, here are two:
BackgroundScheduler
You're importing the blocking scheduler - which blocks when started. So, the rest of your code is not being executed until after the scheduler stops. If you need other code to be executed after starting the scheduler, I would use the background scheduler like this:
from apscheduler.schedulers.background import BackgroundScheduler as scheduler
def myfn():
# Insert your requests code here
print('Hello')
sch = scheduler()
sch.add_job(myfn, 'interval', seconds=5)
sch.start()
# This code will be executed after the sceduler has started
try:
print('Scheduler started, ctrl-c to exit!')
while 1:
# Notice here that if you use "pass" you create an unthrottled loop
# try uncommenting "pass" vs "input()" and watching your cpu usage.
# Another alternative would be to use a short sleep: time.sleep(.1)
#pass
#input()
except KeyboardInterrupt:
if sch.state:
sch.shutdown()
BlockingScheduler
If you don't need other code to be executed after starting the scheduler, you can use the blocking scheduler and it's even easier:
apscheduler.schedulers.blocking import BlockingScheduler as scheduler
def myfn():
# Insert your requests code here
print('Hello')
# Execute your code before starting the scheduler
print('Starting scheduler, ctrl-c to exit!')
sch = scheduler()
sch.add_job(myfn, 'interval', seconds=5)
sch.start()
I have never used the scheduler in python before, however this other stackOverflow question seems to deal with that.
It means that the task is taking longer than one second and by default only one concurrent execution is allowed for a given job... -Alex Grönholm
In your case I imagine using threading would meet your needs.
If you created a class that inherited threads in python, something like:
class Requester(threading.Thread):
def __init__(self, url, credentials, payload):
threading.Thread._init__(self)
self.url = url
self.credentials = credentials
self.payload = payload
def run(self):
# do the post request here
# you may want to write output (errors and content) to a file
# rather then just printing it out sometimes when using threads
# it gets really messing if you just print everything out
Then just like how you handle with a slight change.
if __name__ == '__main__':
url = 'http://api/url/'
# Username and password
credentials = { 'username': 'username', 'password': 'password'}
# Defining payloads
payload = dict()
payload['item1'] = 1234
payload['item2'] = 'some string'
data_array = [{"id": "id1", "data": "some value"}]
payload['json_data_array'] = [{ "time": int(time.time()), "data": data_array]
counter = 0
while counter < 1800:
req = Requester(url, credentials, payload)
req.start()
counter++
time.sleep(1)
And of course finish the rest of it however you would like to, if you want to you could make it so that the KeyboardInterrupt is what actually finishes the script.
This of course is a way to get around the scheduler, if that is what the issue is.
I'm trying to take a list of items and check for their status change based on certain processing by the API. The list will be manually populated and can vary in number to several thousand.
I'm trying to write a script that makes multiple simultaneous connections to the API to keep checking for the status change. For each item, once the status changes, the attempts to check must stop. Based on reading other posts on Stackoverflow (Specifically, What is the fastest way to send 100,000 HTTP requests in Python? ), I've come up with the following code. But the script always stops after processing the list once. What am I doing wrong?
One additional issue that I'm facing is that the keyboard interrup method never fires (I'm trying with Ctrl+C but it does not kill the script.
from urlparse import urlparse
from threading import Thread
import httplib, sys
from Queue import Queue
requestURLBase = "https://example.com/api"
apiKey = "123456"
concurrent = 200
keepTrying = 1
def doWork():
while keepTrying == 1:
url = q.get()
status, body, url = checkStatus(url)
checkResult(status, body, url)
q.task_done()
def checkStatus(ourl):
try:
url = urlparse(ourl)
conn = httplib.HTTPConnection(requestURLBase)
conn.request("GET", url.path)
res = conn.getresponse()
respBody = res.read()
conn.close()
return res.status, respBody, ourl #Status can be 210 for error or 300 for successful API response
except:
print "ErrorBlock"
print res.read()
conn.close()
return "error", "error", ourl
def checkResult(status, body, url):
if "unavailable" not in body:
print status, body, url
keepTrying = 1
else:
keepTrying = 0
q = Queue(concurrent * 2)
for i in range(concurrent):
t = Thread(target=doWork)
t.daemon = True
t.start()
try:
for value in open('valuelist.txt'):
fullUrl = requestURLBase + "?key=" + apiKey + "&value=" + value.strip() + "&years="
print fullUrl
q.put(fullUrl)
q.join()
except KeyboardInterrupt:
sys.exit(1)
I'm new to Python so there could be syntax errors as well... I'm definitely not familiar with multi-threading so perhaps I'm doing something else wrong as well.
In the code, the list is only read once. Should be something like
try:
while True:
for value in open('valuelist.txt'):
fullUrl = requestURLBase + "?key=" + apiKey + "&value=" + value.strip() + "&years="
print fullUrl
q.put(fullUrl)
q.join()
For the interrupt thing, remove the bare except line in checkStatus or make it except Exception. Bare excepts will catch all exceptions, including SystemExit which is what sys.exit raises and stop the python process from terminating.
If I may make a couple comments in general though.
Threading is not a good implementation for such large concurrencies
Creating a new connection every time is not efficient
What I would suggest is
Use gevent for asynchronous network I/O
Pre-allocate a queue of connections same size as concurrency number and have checkStatus grab a connection object when it needs to make a call. That way the connections stay alive, get reused and there is no overhead in creating and destroying them and the increased memory use that goes with it.
I have a working python/tk program that runs a cgi script based on user selection. I'm working to cut this down to a small script that just focuses on one particular cgi script. It appears to be getting the session id correctly but when I launch the browser I keep getting "access denied". As the other program works I not expecting any issues from the website. Any help will be appreciated.
UPDATE:
If I use a debugger and set a breakpoint on the line print url the url printed in the console, as seen below, does work. I now know the session id token is good.
Also if I step into the webbrowser function and then step over after that the script also works.
Here is my code.
import json
import tornado.web
import tornado.websocket
from tornado import gen
import tornado.ioloop
import webbrowser
from struct import *
request_id = 71
ip_address = "10.22.4.14"
# ************************************************
# Procedure to open websocket and get session id
# ***********************************************
#gen.coroutine
def open_ws(ip, username, password):
global client
global request_id
global session_id
ws_url = "ws://" + ip + ":7011/"
try:
client = yield tornado.websocket.websocket_connect(ws_url, None, None, 5, None, None)
# print("websocket %s open" % ws_url)
except error:
exit()
# Send Mercury login request
JSON = '{"requests":[{"request_id": %s, "login":{"username": "%s","password": "%s"}}]}' % (str(request_id), username, password)
client.write_message(JSON)
results = yield client.read_message()
# print("msg is %s" % results)
# Parse the response of login request to get the error code
parsed_json = json.loads(results)
err_code = parsed_json['responses'][0]['request_response']['result']['err_code']
if 0 == err_code:
# Parse the response of get_command_result to get the session id
session_id = parsed_json['responses'][0]['request_response']['login']['session_id']
# print("login succeeded - session id: %s" % session_id)
else:
print("login failed")
# error_exit(err_code)
def get_token():
tornado.ioloop.IOLoop.instance().run_sync(lambda: open_ws(ip_address, 'admin', 'admin'))
return session_id
session_id = get_token()
print "Token is " + session_id
url = "http://" + ip_address + "/scripts/dostuff.cgi?session=" + session_id
print url # add breakpoint here
# launch browser
webbrowser.open(url)
Console output:
Token is 7zNSZX9liaUDFFN0ijn-LWQ8
http://10.222.4.14/scripts/dostuff.cgi?session=7zNSZX9liaUDFFN0ijn-LWQ8
Resolved. The script was ending therefore closing the socket before the browser had a chance to respond to the request