I want to assign one custom grain ("__hW_raid_active") to store information if hardware (1)-or software RAID (0) is used on a minion and set it accordingly. Minions with software RAID have the directory (/proc/mdstat for mdadm). So I would use salt module file.directory.exists, which gives a boolean as return value and can be used in an if-statement.
This is the python-script I try to make it work with in /srv/salt/_grains
!/usr/bin Python
import salt.modules.file
# this is to make the module available
__salt__ = {
'dir_exists':salt.modules.file.directory_exists
}
# Since I had errors saying module '__salt__' does not exist
# Now errors are gone but no effect on the grains list
# function:
def raiddevcheck():
# Instantiate grains dictionary
grains = {}
# check it sofware RAID is on minion (we use mdadm)
if __salt__['dir_exists']('/proc/mdstat'):
grains["__hw_raid_active"] = 0
else:
grains["__hw_raid_active"] = 1
return grains
if __name__ == '__main__':
raiddevcheck()
salt 'minion' grains.ls
No grain named: __hw_raid_active
No errors nor from master (in debug mode_ -l debug), the minion or in /var/log/salt/master
All I see is an almost empty file "grains" just contataining {} on the minion I tested it (has hardware RAID).
Appreciate verry much any helpfull idea or am I totally on the wrong spot with the whole “custom-grain idea” managed in one central spot (_grains on master)? Do I have to copy the python script to the minons, if so where? I am pretty new to salt-stack still.
Cheers
Marco
I had errors saying module '__salt__' does not exist
Because you were running it directly instead of via the salt loader?
If you want it to work in both, then this should work:
def raiddevcheck():
# Instantiate grains dictionary
grains = {}
# check if sofware RAID is on minion (we use mdadm)
if __salt__["file.directory_exists"]("/proc/mdstat"):
grains["__hw_raid_active"] = 0
else:
grains["__hw_raid_active"] = 1
return grains
if __name__ == "__main__":
import salt.modules.file
__salt__ = {
"file.directory_exists": salt.modules.file.directory_exists
}
raiddevcheck()
Or use the os module instead:
import os.path
def raiddevcheck():
# Instantiate grains dictionary
grains = {}
# check if sofware RAID is on minion (we use mdadm)
if os.path.is_dir("/proc/mdstat"):
grains["__hw_raid_active"] = 0
else:
grains["__hw_raid_active"] = 1
return grains
if __name__ == "__main__":
raiddevcheck()
Then make sure you've synced and refreshed the grains:
salt '*' saltutil.sync_grains refresh=true
This is the solution:
get the right python linkt in the shebang (same as salt-master) - not sure but can't hurt (did not test it)
Just simple references of the method (no fancy __salt__ applied)
!/usr/bin/python3 python
import salt.modules.file
def raiddevcheck():
# Instantiate grains directory
grains = {}
# check it sofware RAID is on minion (we use mdadm)
if salt.modules.file.directory_exists('/proc/mdstat'):
grains["__hw_raid_active"] = 0
else:
grains["__hw_raid_active"] = 1
# time stamp container
grains["__raid_last_notify"] = 0
grains["__smart_last_notification"] = 0
return grains
if __name__ == '__main__':
raiddevcheck()
Related
For my research project, I am running a set of scripts on many different patients. Every new script that I write, I find myself repeating the same lines of code to create variables for the different directory paths associated with each subject. First, I create global variables associated with primary locations of each type of data. Then, I create subject-specific paths to reference the given patient's directories. Here is my code:
import argparse
from os import path
from sys import exit, platform
# check OS
LINUX = False
if platform == "darwin":
NEU_DIR = "/Volumes/Shares/NEU"
elif platform == "linux":
NEU_DIR = "/shares/NEU"
LINUX = True
else:
print(f"++ Unrecognized OS '{sys.platform}'; please run on ",
"either linux or Mac OS ++")
exit()
# global paths
PROJECTS_DIR = path.join(NEU_DIR, "Projects")
USERS_DIR = path.join(NEU_DIR, "Users")
IED_ANALYSIS_DIR = path.join(USERS_DIR, "ied_analysis")
MRI_DIR = path.join(PROJECTS_DIR, "MRI")
IED_DIR = path.join(PROJECTS_DIR, "iEEG", "IED_data")
FIGURES_DIR = path.join(USERS_DIR, "figures")
SCRIPTS_DIR = path.join(NEU_DIR,"Scripts_and_Parameters","scripts")
PYTHON_DIR = path.join(SCRIPTS_DIR,"IED_scripts","__files","__python")
if __name__ == "__main__":
parser.add_argument("subj", help="subject code")
parser.add_argument("-p", "--num_parc", default=600)
parser.add_argument("-n", "--num_network", default=17)
args = parser.parse_args()
subj = args.subj
n_parcs = int(args.num_parc); n_network = int(args.num_network)
# subject-specific paths and directories
subj_ied_dir = path.join(IED_DIR, subj)
subj_mri_dir = path.join(MRI_DIR, subj)
std141_surf_dir = path.join(subj_mri_dir, "surf", "xhemi",
"std141", "orig")
general_dir = path.join(std141_surf_dir, "general")
seq_dir = path.join(subj_ied_dir, "sequence_classification")
sc_dir = path.join(subj_ied_dir,"sc", f"Schaefer_{n_parcs}P_{n_network}N")
subj_raw_fc_dir = path.join(subj_ied_dir, "raw_fc")
subj_figures_dir = path.join(FIGURES_DIR, subj,
f"Schaefer_{n_parcs}P_{n_network}N")
subj_analysis_dir = path.join(IED_ANALYSIS_DIR,
f"Schaefer_{n_parcs}P_{n_network}N")
It feels sloppy and inefficient to repeat these lines of code for every new processing script that I write. As a workaround, I tried writing a helper script with a function that creates all of these paths.
def make_subj_paths(subj, n_parcs=600, n_networks=17):
from sys import platform
from colors import Colors
from os import path
# check OS
LINUX = False
if platform == "darwin":
NEU_DIR = "/Volumes/Shares/NEU"
elif platform == "linux":
NEU_DIR = "/shares/NEU"
LINUX = True
else:
print(f"++ Unrecognized OS '{platform}'; please run on ",
"either linux or Mac OS ++", Colors.END)
exit()
# general paths
PROJECTS_DIR = path.join(NEU_DIR, "Projects")
MRI_DIR = path.join(PROJECTS_DIR, "MRI")
IED_DIR = path.join(PROJECTS_DIR, "iEEG", "IED_data")
USERS_DIR = path.join(NEU_DIR, "Users")
FIGURES_DIR = path.join(USERS_DIR, "figures")
# create name based on parcels and networks
dir_interest = f"Schaefer_{n_parcs}P_{n_networks}N"
# create global directories for subject
global subj_ied_dir
subj_ied_dir = path.join(IED_DIR,subj)
global sc_dir; global raw_fc_dir; global seq_dir; global source_loc_dir
sc_dir = path.join(subj_ied_dir,"sc",dir_interest)
raw_fc_dir = path.join(subj_ied_dir, "raw_fc")
seq_dir = path.join(subj_ied_dir,"sequence_classification")
source_loc_dir = path.join(subj_ied_dir,"source_localization",dir_interest)
global subj_fig_dir; global subj_fig_parc_dir
subj_fig_dir = path.join(FIGURES_DIR,subj)
subj_fig_parc_dir = path.join(subj_fig_dir, dir_interest)
global docs_dir; global align_dir; global std141_dir
docs_dir = path.join(MRI_DIR, subj, "icEEG","__docs")
align_dir = path.join(MRI_DIR, subj, "icEEG", "align_elec_alt")
std141_dir = path.join(MRI_DIR, subj, "surf", "xhemi", "std141", "orig")
The intended effect of this code would be to create (as global variables) all of the directories I need for a given subject when I run make_subj_paths(subj, n_parcs=600, n_networks=17). This method still seems inefficient/sloppy due to creating global variables within a function. It also does not work. My other idea is to create a Subject Class that creates all of the paths for a given subject. For every new script I write, I can first create an instance of that class for the subject input. I am not sure if this idea makes sense and I don't know the best way to execute it. Any help would be appreciated! Thank you.
I am trying to create a memory scanner. similar to Cheat Engine. but only for extract information.
I know how to get the pid (in this case is "notepad.exe"). But I don't have any Idea about how to know wicht especific adress belong to the program that I am scanning.
Trying to looking for examples. I could see someone it was trying to scan every adress since one point to other. But it's to slow. Then I try to create a batch size (scan a part of memory and not one by one each adress). The problem is if the size is to short. still will take a long time. and if it is to long, is possible to lose many adress who are belong to the program. Because result from ReadMemoryScan is False in the first Adress, but It can be the next one is true. Here is my example.
import ctypes as c
from ctypes import wintypes as w
import psutil
from sys import stdout
write = stdout.write
import numpy as np
def get_client_pid(process_name):
pid = None
for proc in psutil.process_iter():
if proc.name() == process_name:
pid = int(proc.pid)
print(f"Found '{process_name}' PID = ", pid,f" hex_value = {hex(pid)}")
break
if pid == None:
print('Program Not found')
return pid
pid = get_client_pid("notepad.exe")
if pid == None:
sys.exit()
k32 = c.WinDLL('kernel32', use_last_error=True)
OpenProcess = k32.OpenProcess
OpenProcess.argtypes = [w.DWORD,w.BOOL,w.DWORD]
OpenProcess.restype = w.HANDLE
ReadProcessMemory = k32.ReadProcessMemory
ReadProcessMemory.argtypes = [w.HANDLE,w.LPCVOID,w.LPVOID,c.c_size_t,c.POINTER(c.c_size_t)]
ReadProcessMemory.restype = w.BOOL
GetLastError = k32.GetLastError
GetLastError.argtypes = None
GetLastError.restype = w.DWORD
CloseHandle = k32.CloseHandle
CloseHandle.argtypes = [w.HANDLE]
CloseHandle.restype = w.BOOL
processHandle = OpenProcess(0x10, False, int(pid))
# addr = 0x0FFFFFFFFFFF
data = c.c_ulonglong()
bytesRead = c.c_ulonglong()
start = 0x000000000000
end = 0x7fffffffffff
batch_size = 2**13
MemoryData = np.zeros(batch_size, 'l')
Size = MemoryData.itemsize*MemoryData.size
index = 0
Data_address = []
for c_adress in range(start,end,batch_size):
result = ReadProcessMemory(processHandle,c.c_void_p(c_adress), MemoryData.ctypes.data,
Size, c.byref(bytesRead))
if result: # Save adress
Data_address.extend(list(range(c_adress,c_adress+batch_size)))
e = GetLastError()
CloseHandle(processHandle)
I decided from 0x000000000000 to 0x7fffffffffff Because cheat engine scan this size. I am still a begginer with this kind of this about memory scan. maybe there are things that I can do to improve the efficiency.
I suggest you take advantage of existing python libraries that can analyse Windows 10 memory.
I'm no specialist but I've found Volatility. Seems to be pretty useful for your problem.
For running that tool you need Python 2 (Python 3 won't work).
For running python 2 and 3 in the same Windows 10 machine, follow this tutorial (The screenshots are in Spanish but it can easily be followed).
Then see this cheat sheet with main commands. You can dump the memory and then operate on the file.
Perhaps this leads you to the solution :) At least the most basic command pslist dumps all the running processes addresses.
psutil has proc.memory_maps()
pass the result as map to this function
TargetProcess eaxample 'Calculator.exe'
def get_memSize(self,TargetProcess,map):
for m in map:
if TargetProcess in m.path:
memSize= m.rss
break
return memSize
if you use this function, it returns the memory size of your Target Process
my_pid is the pid for 'Calculator.exe'
def getBaseAddressWmi(self,my_pid):
PROCESS_ALL_ACCESS = 0x1F0FFF
processHandle = win32api.OpenProcess(PROCESS_ALL_ACCESS, False, my_pid)
modules = win32process.EnumProcessModules(processHandle)
processHandle.close()
base_addr = modules[0] # for me it worked to select the first item in list...
return base_addr
to get the base address of your prog
so you search range is from base_addr to base_addr + memSize
I have this Python function and it works well taking a regular screenshot.
I would like to be able to take the screenshot in high resolution. Could this code be modified to accomplish this, or would I need to integrate with another library?
def SaveScreen(self):
print "save screen"
# SCREENSHOT_CWDSAVE
if SCREENSHOT_CWDSAVE:
if not os.path.exists(os.getcwd()+os.sep+"screenshot"):
os.mkdir(os.getcwd()+os.sep+"screenshot")
(succeeded, name) = grp.SaveScreenShotToPath(os.getcwd()+os.sep+"screenshot"+os.sep)
elif SCREENSHOT_DIR:
(succeeded, name) = grp.SaveScreenShot(SCREENSHOT_DIR)
else:
(succeeded, name) = grp.SaveScreenShot()
# END_OF_SCREENSHOT_CWDSAVE
if succeeded:
pass
"""
chat.AppendChat(chat.CHAT_TYPE_INFO, name + localeInfo.SCREENSHOT_SAVE1)
chat.AppendChat(chat.CHAT_TYPE_INFO, localeInfo.SCREENSHOT_SAVE2)
"""
else:
chat.AppendChat(chat.CHAT_TYPE_INFO, localeInfo.SCREENSHOT_SAVE_FAILURE)
2 solutions for you: either you try another module like MSS (easy, efficient and creates high quality PNG pictures), either you change the code of the function SaveJPEG:
// https://pastebin.com/xAv30gK1 at line 169
return jpeg_save(pbyBuffer, uWidth, uHeight, 85, pszFileName) != 0;
You see the 85? It is the picture quality. Just change to 100 ;)
My head is probably in the wrong place with this, but I want to put a variable within a variable.
My goal for this script is to compare current versions of clients software with current software versions that are available from the vendor. At this stage I just want to print out what's available.
I have some def's setup with:
def v80(program_1 = '80.24', program_2 = '80.5', program_3 = '80.16'):
pass
def v81(program_1 = '81.16', program_2 = '81.7', program_3 = '81.14'):
pass
def v82(program_1 = '82.15', program_2 = '82.4', program_3 = '82.9'):
pass
def v83(program_1 = '83.01', program_2 = '83.0', program_3 = '83.1'):
pass
I'm then reading all of the clients versions from a text file and doing comparisons.
One of the vars I'm generating is "program_main", currently I'm doing something like:
If program_main == "83":
if program_1:
if v83['program_1'] > float(program_1):
print ("Update available", program_1, "-->", v83[program_1])
if program_2:
if v83['program_2'] > float(program_2):
print ("Update available", program_2, "-->", v83[program_2])
if program_main == "82"
if program_1:
if v82['program_1'] > float(program_1):
print ("Update available", program_1, "-->", v82[program_1])
etc etc
My train of though would be something like
if program_1:
if v[program_main] > float(program_1):
print('Update available", program_1, "-->", v[program_main])
etc etc
I'm sure there's a much better way to do this entire setup, but this is one of my first proper python scripts so I'm happy to chalk it up to noobish-ness, just wanted to know what the right way of doing what I'm trying to achieve is.
You can put your functions into a dictionary:
per_version = {
'83': v83,
'82': v82,
}
and simply use that to map string to function:
per_version[program_main]('program_1')
However, you may want to instead parameterise your version functions; make one function that takes the version as a parameter:
def program_check(version, program_1=None, program_2=None, program_3=None):
# ...
which then looks up default values per program_x parameter based no the version, again from a dictionary perhaps.
I am working on a project to check a file directory and automatically add log files as they are created. A file is being generated every five minutes, but some of the files are being created with a "0" filesize and I would like to alert when this happens.
So the sequence of steps I would like to have are essentially:
Get time (MM:DD:YY HH:MM:SS) *Not sure if I need to do this...
CD to Folder Directory /Netflow/YY/MM/DD
Search for filename "nfcapd.YYYYMMDDHHMM" where MM increments by 5.
If filesize is 0, then email Johnny, Sally and Jimmy
Wait 6 minutes and repeat
This is what I have pieced together thus far. How can I get the desired functionality?
import os
def is_non_zero_file(fpath): storage/Netflow/
return True if os.path.isfile(fpath) and os.path.getsize(fpath) > 0 else False
# I need to check storage/Netflow for files named by time e.g 13_56_05.txt
while True:
time.sleep(360)
In addition to enumerating the files in a given path, and subsequently filtering the files which are only zero-length, you probably want to maintain some type of state to ensure you're aren't notified multiple times of the same zero length file. That is, you probably don't want to get a notification that the same file is zero-length indefinitely (although you can modify the example below if you want said behavior).
You may optionally want to do things like verify that the file name strictly meets your naming convention. You may also want to validate the the string date-stamp included in the file name is a valid datetime.
The example below uses the glob module (itself leveraging os.listdir() and fnmatch.fnmatch()) to build up a set of possible files for inclusion. [1]
The example is intentionally simple, and leverages a single class to store log sample 'state'. KEEP_SAMPLES samples are maintained (instances of logState() in the log_states list, achieved by using list slicing.
A single alert(msg) function is supplied as a stub to something that might send mail, etc...
References:
[1] https://docs.python.org/3.2/library/glob.html
#!/usr/bin/python3
import os
import glob
import re
from datetime import datetime, timezone
import time
from pprint import pprint
class logState():
def __init__(self, log_path, glob_patt, re_patt, dt_fmt):
self.dt = datetime.now(timezone.utc)
self.log_path = log_path
self.glob_patt = glob_patt
self.re_patt = re_patt
self.dt_fmt = dt_fmt
self.empty_logs = []
self.nonempty_logs = []
# Retrieve only files from glob
self.files = [ f for f in
glob.glob(self.log_path + self.glob_patt)
if os.path.isfile(f) ]
for f in self.files:
unq_fname = f.split('/')[-1]
if unq_fname == None:
continue
# Tighter pattern matching
if re.match(re_patt, unq_fname) == None:
continue
# Get the datetime portion of the file name
f_dtstamp = unq_fname.split('.')[-1]
# Make sure the datetime stamp represents
# a valid date
if datetime.strptime(f_dtstamp, self.dt_fmt) == None:
continue
# Check file size, add to the appropriate
# list
if os.path.getsize(f) <= 0:
self.empty_logs.append(f)
else:
self.nonempty_logs.append(f)
def alert(msg):
print("ALERT!: {0}".format(msg))
if __name__ == "__main__":
# How long to sleep
SLEEP_SECS = 5
# How many samples to keep
KEEP_SAMPLES = 5
log_states = []
# Definition for what logs states we'll look for
log_path = './'
glob_patt = 'nfcapd.[0-9]*'
re_patt = 'nfcapd.([0-9]{12})'
dt_fmt = "%Y%m%d%H%M"
print("-- Setup --")
print("Sample files in '{0}'".format(log_path))
print("\t{0} samples kept:".format(KEEP_SAMPLES))
print("\tglob pattern: '{0}'".format(glob_patt))
print("\tregex pattern: '{0}'".format(re_patt))
print("\tdatetime string: '{0}'".format(dt_fmt))
print("")
# Collect the initial state
log_states.append(logState(log_path,
glob_patt,
re_patt, dt_fmt))
while True:
# Print state inventory and current state detail
print( "-- Log States Stored --")
for i, log_state in enumerate(log_states):
print("Log state {0} # {1}".format(i, log_state.dt))
print(" -- Logs size > 0 --")
pprint(log_states[-1].nonempty_logs)
print(" -- Logs size <= 0 --")
pprint(log_states[-1].empty_logs)
print("")
time.sleep(SLEEP_SECS)
log_states = log_states[-KEEP_SAMPLES+1:]
log_states.append(logState(log_path,
glob_patt,
re_patt,
dt_fmt))
# p = previous sample, c = current
p = set(log_states[-2].empty_logs)
c = set(log_states[-1].empty_logs)
# only report the items in the current sample
# not in the last
if len(c.difference(p)) > 0:
alert("\nNew zero length logs: " + str(c.difference(p)) + "\n")