I have a script that basically pssh into multiple hosts for some information and checks a file to see if the information matches or not. The script runs fine when run directly from the Jenkins node. But when I try to run it via Jenkins, it hangs. It does create the output file, I added debug statements and see that the hosts from the file.txt are getting printed on Jenkins, but otherwise it just hangs.
I'm passing the file name as one of the arg. On Jenkins, I used to string parameter to pass the file. What am I doing wrong?
Thanks in advance.
import sys
import os
import subprocess
import re
import datetime
try:
hosts_file = sys.argv[1]
with open(hosts_file, 'r') as f:
hosts = []
for line in f:
hosts.append(line.split()[0])
except IOError:
print 'Error opening hosts_file'
sys.exit(1)
for host in hosts:
cmd = ('sshpass -f password pssh -A -O UserKnownHostsFile=/dev/null -O StrictHostKeyChecking=no -H ' + host + ' -l root -i "<cmd>"')
with open('file.txt', 'w') as fw:
subprocess.call(cmd, shell=True, stdout=fw)
with open('file.txt', 'r') as fr:
for line in fr:
match = re.search(r'some_word', line, re.I)
if match:
id_list.append(line.split()[-1])
model_dict[id_list[-1]] = line.strip()
if len(id_list) == 0:
print "something not found for hostname => %s" % host
with open('file.txt', 'r') as fr, open('output', 'a') as fw:
fw.write("something ==> %s" % host + "\n")
fw.write("*****************************************************************************************************************************************************************" + "\n")
for line in fr:
for item in id_list:
match = re.search(r'%s' % item, line, re.I)
if match:
my_line = model_dict[item] + ' => ' + line
fw.write(my_line)
id_list.remove(item)
if len(id_list) > 0:
with open('output', 'a') as fw:
for item in id_list:
my_line = model_dict[item] + ' => ' + 'No match found'
fw.write(my_line + "\n")
with open('models.txt', 'r') as fr, open('output', 'a') as fw:
fw.write("#### Printing Controller information ####" + "\n")
for line in fr:
match = re.search(r'vmhba', line, re.I)
if match:
fw.write(line.strip() + "\n")
os.remove('models.txt')
#I'm assuming this will print on the Jenkins console
with open('output', 'r') as fr:
for line in fr:
print line
Related
I have a log file which consists the capacity of 1TB. I am uncertain that how to run this python script in the command line. I use the sys library but still my csv data is not added.
Below is my python code.
import re
import sys
from csv import writer
import datetime
log_file = '/Users/kiya/Desktop/mysql/ipscan/ip.txt'
output_file = '/Users/kiya/Desktop/mysql/ipscan/output.csv'
try:
ip_file =sys.argv[1]
except Exception:
print("usage: pythone3 {} [ip file]".format(sys.argv[0]))
sys.exit()
name_to_check = 'MBX_AUTHENTICATION_FAILED'
with open(log_file,encoding="utf-8") as infile:
for line in infile:
if name_to_check in line:
username = re.search(r'(?<=userName=)(.*)(?=,)', line)
username = username.group()
ip = re.search(r'(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])',line)
ip = ip.group()
with open(output_file, 'a') as outfile:
outfile.write('{username},{ip}\n'.format(username=username, ip=ip))
try this it work well and if the problem persist check your search regex :
from sys import argv
log_file = ""
if len(argv) > 0 :
log_file = argv[1]
else :
quit("No log_file specified, exiting script.")
with open(log_file, encoding="utf-8") as infile:
for line in infile:
if name_to_check in line:
username = re.search(r'(?<=userName=)(.*)(?=,)', line)
username = username.group()
date = re.search(r'(?P<date>\d{8})\s+(?P<time>\d{9})\+(?P<zone>\d{4})', line)
date = datetime.datetime.strptime(date.group('date'), "%Y%m%d").strftime("%Y-%m-%d")
print(date)
time = re.search(r'(?P<date>\d{8})\s+(?P<time>\d{9})\+(?P<zone>\d{4})', line)
time = datetime.datetime.strptime(time.group('time'), "%H%M%S%f").strftime("%H:%M:%S")
print(time)
ip = re.search(r'(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])',line)
with open(output_file, "ab", buffering=0) as outfile:
outfile.write( ("{},{},{},{}\n".format(username, date, time, ip)).encode() )
I have text that is key-value pairs separated by '='. I would like to replace the line if the key matches. if not, i would like to append it at the bottom. I've tried several ways, including:
def split_command_key_and_value(command):
if '=' in command:
command2 = command.split('=')
return command2
def test(command, path):
command2 = split_command_key_and_value(command)
pattern = command2[0]
myfile = open(path,'r') # open file handle for read
# use r'', you don't need to replace '\' with '/'
result = open(path, 'w') # open file handle for write
for line in myfile:
line = line.strip() # it's always a good behave to strip what you read from files
if pattern in line:
line = command # if match, replace line
result.write(line) # write every line
myfile.close() # don't forget to close file handle
result.close()
I know the above is just to replace text, but it deletes the text in the file, and I can't see why. Could someone point me in the right direction?
Thanks
Update:
I'm almost there, but some of my lines have similar keys, so mutiple lines are matching when only 1 should. I've tried to incorporate a regex boundary in my loop with no luck. My code is below. Does anyone have a suggestion?
There is some text in the file that isn't key-value, so I would like to skip that.
def modify(self, name, value):
comb = name + ' ' + '=' + ' ' + value + '\n'
with open('/file/', 'w') as tmpstream:
with open('/file/', 'r') as stream:
for line in stream:
if setting_name in line:
tmpstream.write(comb)
else:
tmpstream.write(line)
I think I got it. See code below.
def modify(self, name, value):
comb = name + ' ' + '=' + ' ' + value + '\n'
mylist = []
with open('/file/', 'w') as tmpstream:
with open('/file/', 'r') as stream:
for line in stream:
a = line.split()
b = re.compile('\\b'+name+'\\b')
if len(a) > 0:
if b.search(a[0]):
tmpstream.write(comb)
else:
tmpstream.write(line)
I spoke too soon. It stops at the key-value I provide. So, it only writes one line, and doesn't write the lines that don't match.
def modify(name, value):
comb = name + ' ' + '=' + ' ' + value + '\n'
mylist = []
with open('/file1', 'w') as tmpstream:
with open('/file2', 'r') as stream:
for line in stream:
a = line.split()
b = re.compile('\\b'+name+'\\b')
if len(a) > 0:
if b.search(a[0]):
tmpstream.write(comb)
else:
tmpstream.write(line)
Can anyone see the issue?
Because when you open file for writing
result = open(path, 'w') # open file handle for write
you just erase it content. Try to write in different file and after all work done replace old file with new one. Or read all data into memory and then process it and write to file.
with open(path) as f:
data = f.read()
with open(path, 'w') as f:
for l in data:
# make job here
first of all you are reading an writing the same file ...
you could first read it all and the write line by line
with open(path,'r') as f:
myfile = f.read() # read everything in the variable "myfile"
result = open(path, 'w') # open file handle for write
for line in myfile.splitlines(): # process the original file content 1 line at a time
# as before
I strongly recommend reading python's documentation on how to read and write files.
If you open an existing file in write-mode open(path, 'w'), its content will be erased:
mode can be (...) 'w' for only writing (an existing file with the same name will be erased)
To replace a line in python you can have a look at this: Search and replace a line in a file in Python
Here is one the solutions provided there adapted to your context (tested for python3):
from tempfile import mkstemp
from shutil import move
from os import close
def test(filepath, command):
# Split command into key/value
key, _ = command.split('=')
matched_key = False
# Create a temporary file
fh, tmp_absolute_path = mkstemp()
with open(tmp_absolute_path, 'w') as tmp_stream:
with open(filepath, 'r') as stream:
for line in stream:
if key in line:
matched_key = True
tmp_stream.write(command + '\n')
else:
tmp_stream.write(line)
if not matched_key:
tmp_stream.write(command + '\n')
close(fh)
move(tmp_absolute_path, filepath)
Note that with the code above every line that matches key (key=blob or blob=key) will be replaced.
I am using the python nmap module to do certificate discovery and monitoring.
import nmap
import time
import datetime
from contextlib import redirect_stdout
from datetime import date
import itertools
This is the function that manages the nmap scan.
SSLmonitor = nmap.PortScanner()
def SSLmon(IPaddress):
now = datetime.datetime.now()
filename = now.strftime("/results/%Y-%m-%dSSLmonWeekly.txt", "r")
filename2 = now.strftime("/results/%Y-%m-%dSSLmonWeeklyExpiring.txt", "r")
results = SSLmonitor.scan(hosts=IPaddress, arguments='--script=ssl-cert -p 443')
# If statement checks to see if last scanned address has ['scan'].
#print(results.keys())
if 'scan' in results:
hosts = results['scan']
#print(hosts)
# If host had cert on 443 try, else do pass.
try:
for host in hosts: # Chunk through the tuple.
try:
# Get the information for each host in the tuple
cert = hosts[host]['tcp'][443]['script']['ssl-cert']
try:
for host in hosts: # Chunk through the dictionary to get the key value pairs we want.
try:
# Get the information for each host in the hecka-dictionary.
cert = hosts[host]['tcp'][443]['script']['ssl-cert']
cert2 = cert.replace('Not valid after: ', '~')
indexed = cert2.index('~')
if datetime.date(int(cert2[indexed+1:indexed+5]), int(cert2[indexed+6:indexed+8]), int(cert2[indexed+9:indexed+11])) - datetime.date.today()
with open(filename, 'a') as f:
with redirect_stdout(f):
print("\n", IPaddress, cert.replace(':', '=').replace('commonName=', '\ncommonName=').replace('/', '\n'))
else:
with open(filename2, 'a') as e:
with redirect_stdout(e):
print("\n", IPaddress, cert.replace(':', '=').replace('commonName=', '\ncommonName=').replace('/', '\n'))
except Exception:
pass
except Exception:
pass
I looping through a list of IP addresses I know have certs
on port 443, and running them through the scanner.
#--------------------------------------------------------------
# Iterate through list of hosts with discovered certs
#--------------------------------------------------------------
with open("/weeklyscanlist/DiscoveredCertsByIP.txt", "r") as text_file:
for line in itertools.islice(text_file, 1, 4250):
SSLmon(str(line))
When I was handling output like this
if datetime.date(int(expDate[0]), int(expDate[1]), int(expDate[2])) - datetime.date.today() < datetime.timedelta(days = 30):
print("\n", IPaddress, cert.replace(':', '=').replace('commonName=', '\ncommonName=').replace('/', '\n'), "this cert is expiring soon)
else:
print("\n", IPaddress, cert.replace(':', '=').replace('commonName=', '\ncommonName=').replace('/', '\n'), "this cert is good for a while)
And it was working fine, so I know it the way I am handling writing the output to a file, but I can't find a way to handle this.
I've also tried
if datetime.date(int(expDate[0]), int(expDate[1]), int(expDate[2])) - datetime.date.today() < datetime.timedelta(days = 30):
fn = open(filename2, 'a')
fn.write("\n", IPaddress, cert.replace(':', '=').replace('commonName=', '\ncommonName=').replace('/', '\n'))
fn.close()
else:
f = open(filename, 'a')
f.write("\n", IPaddress, cert.replace(':', '=').replace('commonName=', '\ncommonName=').replace('/', '\n'))
f.close()
without success.
Here is what you should do, following up on deweyredman’s sage advice to use string formatting:
Use string formatting to generate the output line of the file
Use an if-then to pick the filename
Only use one file write block to stay DRY
dt_diff = datetime.date(int(expDate[0]), int(expDate[1]), int(expDate[2])) - datetime.date.today()
which_filename = filename if dt_diff < datetime.timedelta(days = 30) else filename2
with open(which_filename, 'a') as f:
line = '\n%s %s' % (
IPaddress,
cert.replace(':', '='
).replace('commonName=', '\ncommonName='
).replace('/', '\n'),)
f.write(line)
It seems your args to f.write are incorrect...try:
if datetime.date(int(expDate[0]), int(expDate[1]), int(expDate[2])) - datetime.date.today() < datetime.timedelta(days = 30):
fn = open(filename2, 'a')
fn.write("{} {} {}".format("\n", IPaddress, cert.replace(':', '=').replace('commonName=', '\ncommonName=').replace('/', '\n')))
fn.close()
else:
f = open(filename, 'a')
f.write("{} {} {}".format("\n", IPaddress, cert.replace(':', '=').replace('commonName=', '\ncommonName=').replace('/', '\n')))
f.close()
f.write takes one argument...you're passing it three.
I'll preface this with I'm new to threading. I have tried a couple different ways, and come sort of close to success, but not really.
I have a list that is created from a file as such:
with open(server_file, 'rU') as fi:
servers = fi.read().split('\n')
I have two functions grabdata() and runcheck() that I need to be run for each item in the list, each pair (grabdata and then runcheck) in their own thread.
The functions:
def grabdata():
for server in servers:
print "Checking: "+str(server)
devnull = open(os.devnull, 'wb') # hide unnecessary output
command = "wget -O tmp/servers/" +server + " " + server+"/avicapture.html#status"
result = Popen(command, shell=True, stdout=subprocess.PIPE, stderr=devnull).communicate()[0]
def runcheck():
for server in servers:
file_path = current_dir+"/tmp/servers/"+server
with open(file_path, 'r') as file:
file_content = file.readlines()
for x in file_content:
match = re.search( r'In Progress \((.*)%\)', x)
if match:
temp = match.group(1)
results = temp
if int(temp) >= 90 :
message = "\nServer: "+server+ "... {0}%
print(message)
How would I properly do this?
I am using this code to search for emails in a particular file and write them into a another file. I have used 'in' operator to make sure that the email are not duplicated.
But this code does not get executed after the for line in f: line.
Can any one point out the mistake i have made here?
tempPath = input("Please Enter the Path of the File\n")
temp_file = open(tempPath, "r")
fileContent = temp_file.read()
temp_file.close()
pattern_normal = re.compile("[-a-zA-Z0-9._]+#[-a-zA-Z0-9_]+.[a-zA-Z0-9_.]+")
pattern_normal_list = pattern_normal.findall(str(fileContent))
with open('emails_file.txt', 'a+') as f:
for item in pattern_normal_list:
for line in f:
if line in item:
print("duplicate")
else:
print("%s" %item)
f.write("%s" %item)
f.write('\n')
New solution:
tempPath = input("Please Enter the Path of the File\n")
temp_file = open(tempPath, "r")
fileContent = temp_file.read()
temp_file.close()
pattern_normal = re.compile("[-a-zA-Z0-9._]+#[-a-zA-Z0-9_]+.[a-zA-Z0-9_.]+")
addresses = list(set(pattern_normal.findall(str(fileContent))))
with open('new_emails.txt', 'a+') as f:
f.write('\n'.join(addresses))
I think your logic was wrong, this works:
addresses = ['test#wham.com', 'heffa#wham.com']
with open('emails_file.txt', 'a+') as f:
fdata = f.read()
for mail in addresses:
if not mail in fdata:
f.write(mail + '\n')
Without reading to much into your code,
it looks like youre looping line by line, checking if the address you've also looping through exists in the line, if it doesn't you append your e-mail to it? But in 99% of a 100 lines the address will not be in the line, hence you'll get an unwanted addition.
Output of my code snippet:
[Torxed#faparch ~]$ cat emails_file.txt
test#wham.com
Torxed#whoever.com
[Torxed#faparch ~]$ python test.py
[Torxed#faparch ~]$ cat emails_file.txt
test#wham.com
Torxed#whoever.com
heffa#wham.com
[Torxed#faparch ~]$
for line in f:
Shouldn't you first call f.readlines()?
lines = f.readlines()
for line in lines:
Check this.