Is there a way to output a PowerShell log through Python? - python

I'm running a PowerShell script through Python that sends an email if a condition is met.
However, I'm running into a logical error when scheduling this program to run with Windows Scheduler. The program runs with no errors (it seems) but I never receive an email, and scheduler says "task run successfully".
However, if I run it manually through Python IDE, it runs well and I get an email. The Python script itself doesn't seem to be the problem, it seems to be more something going on with the PowerShell portion.
Is there a way to log in a text file or anything the PowerShell error code through Python so I can see what is happening in the background?
Below is my full code:
import glob, os, time, subprocess, sys
from datetime import datetime
import datetime
from plyer import notification
print("Program has started.")
print("Analyzing contents of folder. This will take some time")
os.system('mode con: cols=40 lines=10')
path_to_watch = '//...Alert/Testing Folder/'
print("Right before we make the list of files")
list_of_files = glob.glob('//...Alert/Testing Folder/*')
textfile = '...Alert/Logs/NoNewFiles_Log.txt'
textNewFiles = '//...Alert/Logs/NewFilesAdded_Log.txt'
latest_file = max(list_of_files, key=os.path.getctime)
m_time = os.path.getmtime(latest_file)
dt_m = datetime.datetime.fromtimestamp(m_time).strftime('%m-%d-%Y')
dt_t = datetime.datetime.fromtimestamp(m_time).strftime('%m-%d-%Y %H:%M:%S')
print(latest_file, dt_m)
today = datetime.date.today().strftime('%m-%d-%Y')
#todays date and time
now = datetime.datetime.now()
dt_string = now.strftime("%m/%d/%Y %H:%M:%S")
#this is where you send the emails and create the alert
if dt_m != today:
print("No new files were added today")
notification.notify(
title = 'Alert! No New CDR Files',
message = 'No new files were added today. Check text file log \\...Alert\Logs',
app_icon = None,
timeout = 20,
)
#writes to a log file
f = open(textfile, "a")
f.write("Program ran on "+dt_string+" and it was found that no new files were added. Last file update was on "+dt_t+"\n--------\n")
f.close()
#powershell path
#This grabs a powershell script I made and sends an email out.
ps = '//...Alert/Emailing_Users_Scripts/No_New_File_Email_Alert.ps1'
p = subprocess.Popen(["powershell",ps], stdout=subprocess.PIPE)
p_out, p_err = p.communicate()
print(p_out)
elif dt_m == today:
print("New files added today. Ending with: ", latest_file)
#writes to a log file
f = open(textNewFiles, "a")
f.write("Program ran on "+dt_string+" and new files were added today. Ending with: "+latest_file+". Last file update was on "+dt_t+"\n--------\n")
f.close()
#powershell path
#This grabs a powershell script I made and sends an email out.
ps = '//...Alert/Emailing_Users_Scripts/Email_Alert.ps1'
#p = subprocess.Popen(["powershell","-ExecutionPolicy","Unrestricted",ps], stdout=subprocess.PIPE)
p = subprocess.Popen(["powershell", ps], stdout=subprocess.PIPE)
p_out, p_err = p.communicate()
print(p_out)
print(p_err)

Related

how can i update a python file by comparing it to a file hosted on my rasbery pi?

I am attempting to make a program update itself to the newest version that I have made. E.g. I added a new functionality to it. It would be useful for me to be able to upload the updated file to a central location like my Raspberry Pi and have the program update itself across all of my computers without updating each one individually.
I have made the bellow code, but it does not work. It can recognize when the file is up-to-date but running the new program it downloads fails, it successfully downloads and deletes itself, but the new program is not run, with no error messages being shown.
Update test.py:
#updaterV1.py
import time
import requests
import os
import hashlib
time.sleep(5)
cwd = os.getcwd()
URL = r"http://[rasberry pi's ip]/update%20files/dev/hash.txt"
hash_path = os.path.join(cwd,"remote hash.txt")
with open (hash_path, "wb") as f:
f.write(requests.get(URL).content)
with open(hash_path,"r") as hash_file:
remotehash = (hash_file.readline()).strip()
os.remove(hash_path)
hasher = hashlib.sha256()
with open(__file__, 'rb') as self_file:
selfunhashed = self_file.read()
hasher.update(selfunhashed)
selfhash = hasher.hexdigest()
print(selfhash)
print(remotehash)
if (selfhash == remotehash):
print("program is up to date")
input()
else:
update_path = os.path.join(cwd,"temp name update.py")
URL = r"http://[rasberry pi's ip]/update%20files/dev/update.py"
with open (update_path, "wb") as f:
f.write(requests.get(URL).content)
with open(update_path,"r") as f:
name = f.readline().strip()
name = name[1:] #use the 1st line as "#name.py" not "# name"
update_path = os.path.join(cwd,name)
try:
os.remove(update_path)
except:
pass
os.rename(os.path.join(cwd,"temp name update.py"),update_path)
os.system("python \""+update_path+"\"")
print("removing self file now")
os.remove(__file__)
It uses a separate TXT file with the hash of the program stored in the same folder to check the remote files hash without downloading the actual file to hash it locally.

Run command in CMD via python and extract the data

I am trying to use the below code to run a command and extract the data from the cmd.
the file with the commands and data is a txt file. (let me know if I should change it or use an excel if better).
the commands look something like this: ping "host name" which would result in some data in the cmd.there is list of these in the file. so it would ping "hostname1" then line two ping "hostname2"..etc
THE QUESTION: I want it to run every line individually and extract the results from the cmd and store them in a txt file or excel file - Ideally I want all the results in the same file. is this possible? and how?
here is the code so far:
root_dir = pathlib.Path(r"path to file here")
cmds_file = root_dir.joinpath('actual file here with commands and data')
#fail = []
cmds = cmds_file.read_text().splitlines()
try:
for cmd in cmds:
args = cmd.split()
print(f"\nRunning: {args[0]}")
output = subprocess.check_output(args)
print(output.decode("utf-8"))
out_file = root_dir.joinpath(f"Name of file where I want results printed in")
out_file.write_text(output.decode("utf-8"))
except:
pass
You can use a module called subprocess import subprocess
Then you can define a variable like this
run = subprocess.run(command_to_execute, capture_output=True)
After that you can do print(run.stdout) to print the command output.
If you want to write it to a file you can do this after you run the above code
with open("PATH TO YOUR FILE", "w") as file:
file.write(run.stdout)
This should write a file which contains the output of your command
After that close the file using file.close() and reopen it but in "a" mode
with open("PATH TO YOUR FILE", "a") as file:
file.write(\n + run.stdout)
This should append data to your file.
Remember to close the file just for best practice, I have some bad memorys about not closing the file after I opened it :D
My plan is simple:
Open input, output file
Read input file line by line
Execute the command and direct the output to the output file
#!/usr/bin/env python3
import pathlib
import shlex
import subprocess
cmds_file = pathlib.Path(__file__).with_name("cmds.txt")
output_file = pathlib.Path(__file__).with_name("out.txt")
with open(cmds_file, encoding="utf-8") as commands, open(output_file, "w", encoding="utf-8") as output:
for command in commands:
command = shlex.split(command)
output.write(f"\n# {shlex.join(command)}\n")
output.flush()
subprocess.run(command, stdout=output, stderr=subprocess.STDOUT, encoding="utf-8")
Notes
Use shlex.split() to simulate the bash shell's command split
The line output.write(...) is optional. You can remove it
With subprocess.run(...), the stdout=output will redirect the command's output to the file. You don't have to do anything.
Update
I updated the subprocess.run line to redirect stderr to stdout, so error will show.

Watch logs in a folder in real time with Python

I'm trying to make a custom logwatcher of a log folder using python. The objective is simple, finding a regex in the logs and write a line in a text if find it.
The problem is that the script must be running constantly against a folder in where could be multiple log files of unknown names, not a single one, and it should detect the creation of new log files inside the folder on the fly.
I made some kind of tail -f (copying part of the code) in python which is constantly reading a specific log file and write a line in a txt file if regex is found in it, but I don't know how could I do it with a folder instead a single log file, and how can the script detect the creation of new log files inside the folder to read them on the fly.
#!/usr/bin/env python
import time, os, re
from datetime import datetime
# Regex used to match relevant loglines
error_regex = re.compile(r"ERROR:")
start_regex = re.compile(r"INFO: Service started:")
# Output file, where the matched loglines will be copied to
output_filename = os.path.normpath("log/script-log.txt")
# Function that will work as tail -f for python
def follow(thefile):
thefile.seek(0,2)
while True:
line = thefile.readline()
if not line:
time.sleep(0.1)
continue
yield line
logfile = open("log/service.log")
loglines = follow(logfile)
counter = 0
for line in loglines:
if (error_regex.search(line)):
counter += 1
sttime = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
out_file=open(output_filename, "a")
out_file.write(sttime + line)
out_file.close()
if (start_regex.search(line)):
sttime = datetime.now().strftime('%Y%m%d_%H:%M:%S - ')
out_file=open(output_filename, "a")
out_file.write(sttime + "SERVICE STARTED\n" + sttime + "Number of errors detected during the startup = {}\n".format(counter))
counter = 0
out_file.close()
You can use watchgod for this purpose. This may be a comment too, not sure if it deserves to be na answer.

Python subprocess display logs on terminal and save in file

I am running a Python script using subprocess and willing to save output to a file as well as show live logs on terminal.
I have written below code and its saving logs in file but not showing live script execution logs on terminal.
TCID = sys.argv[1]
if TCID == "5_2_5_3":
output = subprocess.check_output([sys.executable, './script.py'])
with open('scriptout.log', 'wb') as outfile:
outfile.write(output)
I think this will fix your issue
import subprocess
outputfile = open('scriptout.log', 'a')
process = subprocess.Popen(["ping", "127.0.0.1"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
output = process.stdout.readline()
if output == b'' and process.poll() is not None:
break
if output:
out = output.decode()
outputfile.write(out)
print(out, end="")
outputfile.close()
also I tried
import subprocess
output = subprocess.check_output(["ping", "127.0.0.1"])
with open('scriptout.log', 'wb') as outfile:
print(output)
outfile.write(output)
but it outputs after command execution ends. Also I want try with logging module but I don't know how to use it sorry :(

Python: Reading and processing Multiple gzip files in remote server

Problem Statement:
I have multiple(1000+) *.gz files in a remote server. I have to read these files and check for certain strings. If the strings matches, I have to return the file name. I have tried the following code. The following program is working but doesnot seem efficient as there is a huge IO involved. Can you please suggest an efficient way to do this.
My Code:
import gzip
import os
import paramiko
import multiprocessing
from bisect import insort
synchObj=multiprocessing.Manager()
hostname = '192.168.1.2'
port = 22
username='may'
password='Apa$sW0rd'
def miniAnalyze():
ifile_list=synchObj.list([]) # A synchronized list to Store the File names containing the matched String.
def analyze_the_file(file_single):
strings = ("error 72","error 81",) # Hard Coded the Strings that needs to be searched.
try:
ssh=paramiko.SSHClient()
#Code to FTP the file to local system from the remote machine.
.....
........
path_f='/home/user/may/'+filename
#Read the Gzip file in local system after FTP is done
with gzip.open(path_f, 'rb') as f:
contents = f.read()
if any(s in contents for s in strings):
print "File " + str(path_f) + " is a hit."
insort(ifile_list, filename) # Push the file into the list if there is a match.
os.remove(path_f)
else:
os.remove(path_f)
except Exception, ae:
print "Error while Analyzing file "+ str(ae)
finally:
if ifile_list:
print "The Error is at "+ ifile_list
ftp.close()
ssh.close()
def assign_to_proc():
# Code to glob files matching a pattern and pass to another function via multiprocess .
apath = '/home/remotemachine/log/'
apattern = '"*.gz"'
first_command = 'find {path} -name {pattern}'
command = first_command.format(path=apath, pattern=apattern)
try:
ssh=paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(hostname,username=username,password=password)
stdin, stdout, stderr = ssh.exec_command(command)
while not stdout.channel.exit_status_ready():
time.sleep(2)
filelist = stdout.read().splitlines()
jobs = []
for ifle in filelist:
p = multiprocessing.Process(target=analyze_the_file,args=(ifle,))
jobs.append(p)
p.start()
for job in jobs:
job.join()
except Exception, fe:
print "Error while getting file names "+ str(fe)
finally:
ssh.close()
if __name__ == '__main__':
miniAnalyze()
The above code is slow. There are lot of IO while getting the GZ file to local system. Kindly help me to find a better way to do it.
Execute a remote OS command such as zgrep, and process the command results locally. This way, you won't have to transfer the whole file contents on your local machine.

Categories