Get process name by PID - python

This should be simple, but I'm just not seeing it.
If I have a process ID, how can I use that to grab info about the process such as the process name.

Under Linux, you can read proc filesystem. File /proc/<pid>/cmdline contains the commandline.

Try PSUtil -> https://github.com/giampaolo/psutil
Works fine on Windows and Unix, I recall.

For Windows
A Way to get all the pids of programs on your computer without downloading any modules:
import os
pids = []
a = os.popen("tasklist").readlines()
for x in a:
try:
pids.append(int(x[29:34]))
except:
pass
for each in pids:
print(each)
If you just wanted one program or all programs with the same name and you wanted to kill the process or something:
import os, sys, win32api
tasklistrl = os.popen("tasklist").readlines()
tasklistr = os.popen("tasklist").read()
print(tasklistr)
def kill(process):
process_exists_forsure = False
gotpid = False
for examine in tasklistrl:
if process == examine[0:len(process)]:
process_exists_forsure = True
if process_exists_forsure:
print("That process exists.")
else:
print("That process does not exist.")
raw_input()
sys.exit()
for getpid in tasklistrl:
if process == getpid[0:len(process)]:
pid = int(getpid[29:34])
gotpid = True
try:
handle = win32api.OpenProcess(1, False, pid)
win32api.TerminateProcess(handle, 0)
win32api.CloseHandle(handle)
print("Successfully killed process %s on pid %d." % (getpid[0:len(prompt)], pid))
except win32api.error as err:
print(err)
raw_input()
sys.exit()
if not gotpid:
print("Could not get process pid.")
raw_input()
sys.exit()
raw_input()
sys.exit()
prompt = raw_input("Which process would you like to kill? ")
kill(prompt)
That was just a paste of my process kill program I could make it a whole lot better but it is okay.

Using psutil, here is the simplest code i can give you:
import psutil
# The PID ID of the process needed
pid_id = 1216
# Informations of the Process with the PID ID
process_pid = psutil.Process(pid_id)
print(process_pid)
# Gives You PID ID, name and started date
# psutil.Process(pid=1216, name='ATKOSD2.exe', started='21:38:05')
# Name of the process
process_name = process_pid.name()

Try this
def filter_non_printable(str):
ret=""
for c in str:
if ord(c) > 31 or ord(c) == 9:
ret += c
else:
ret += " "
return ret
#
# Get /proc/<cpu>/cmdline information
#
def pid_name(self, pid):
try:
with open(os.path.join('/proc/', pid, 'cmdline'), 'r') as pidfile:
return filter_non_printable(pidfile.readline())
except Exception:
pass
return

Related

How to write/read serial port with multithreading using pyserial

I'm currently running into a problem with trying to write to a serial device using pySerial. I want to be able to continuously update my terminal by reading the port and handle serial device writing on a seperate thread, meanwhile also be able to send a command via user input on the main thread. Everything runs as expected, except for that when I send one of the commands (cmdA or cmdB), the serial's output that I'm reading does not change (this is expected behaviour as the commands being sent alter the state of the device, which in turn changes the device's output that the serial port is reading). With all that said, it seems that the device is not receiving the command I am sending, even though the code continues to run with no exception and all functions seem to be executing as written.
Here is my current code:
A SerialMonitor class that can read the serial port and print out a specific amount of bytes once finding a set of "syncbytes"
# SerialMonitorTool.py
import threading
import time
import serial
class SerialMonitor(threading.Thread):
SYNC_BYTES = b'\x90\xeb'
def __init__(self, device='/dev/ttyUSB0', baudrate=115200, timeout=5):
print("Initializing Serial Monitor")
self._running = False
self._name = 'SerialMonitorThread-{}'.format(device)
self._device = serial.Serial(device, baudrate=baudrate, timeout=timeout)
self._write_lock = threading.Lock()
super().__init__(name=self._name)
def write(self, user_input, encode=False, terminator=None):
print("Locking for CMD Write...")
self._write_lock.acquire()
tx = user_input + terminator if terminator else user_input
print(f"Writing CMD to device: {tx}")
self._device.write(tx.encode() if encode else tx)
print("CMD Written...")
self._write_lock.release()
print("CMD Write Lock Released...")
def stop(self):
self._running = False
print('stop thread: ' + threading.current_thread().getName())
self.join()
def run(self):
print('starting thread: ' + threading.current_thread().getName())
self._running = True
try:
while self._running:
self._device.reset_input_buffer()
self._device.read_until(self.SYNC_BYTES)
ser_bytes = self._device.read(35)
print(f'\r{ser_bytes}', end='', flush=True)
time.sleep(0.25)
finally:
self._device.close()
and the main thread
# SerialMain.py
from SerialMonitorTool import *
cmdA = b'\x90\xeb\x01'
cmdB = b'\x90\xeb\x02'
monitor: SerialMonitor()
def print_help():
print('Usage: cmd [ a | b ]')
def send_cmd(cmd):
monitor.write(cmd)
def main():
monitor.start()
while True:
try:
user_input = input()
if user_input == '?' or user_input == 'h' or user_input == 'help':
print_help()
elif user_input == 'q' or user_input == 'quit':
break
elif user_input.startswith('cmd '):
cmd_type = user_input[len('cmd '):].split(' ')
if cmd_type[0] == 'a':
send_cmd(cmdA)
elif cmd_type[0] == 'b':
send_cmd(cmdB)
except Exception as e:
print(e)
monitor.stop()
def process_args():
# process arguments
import argparse
parser = argparse.ArgumentParser(description='Serial Test Tool')
parser.add_argument(
'-D', '--device',
help='Use the specified serial device.',
default='/dev/ttyUSB0',
type=str
)
global monitor
monitor = SerialMonitor()
if __name__ == "__main__":
process_args()
main()
It looks like there is issue in your write method, try to comment all the lock related code in write method or put lock syntax in below sequence.
def write(self, user_input, encode=False, terminator=None):
tx = user_input + terminator if terminator else user_input
print(f"Writing CMD to device: {tx}")
self._device.write(tx.encode() if encode else tx)
print("CMD Written...")
print("Locking for CMD Write...")
self._write_lock.acquire()
self._write_lock.release()
print("CMD Write Lock Released...")

Threading program doesn't quit

I am writing a program which constantly checks if certain IP adresses are connected to the network. If they are, nothing happens. If they are not connected for a certain time, an action is triggered.
My script works as intended as far as I can tell, however when I try to exit it using ctrl+c it simply doesnt stop.
I guess it has something to do with the threading that I am using, but I cant figure out what exactly it is.
This is my code so far:
import os
import time
from threading import Timer, Thread
import json
with open("ip_adresses.json", "r") as f:
ip_adresses_dict = json.load(f)
def timeout():
print("ACTION IS TRIGGERED")
# dummy Timer thread
print("dummy timer created")
t = Timer(999999999, timeout)
t.daemon = True
try:
while True:
ip_adress_reachable = []
for key, value in ip_adresses_dict.items():
if os.system(f"ping -c 1 -W 1 {value} > /dev/null") is 0: # this means its reachable
ip_adress_reachable.append(True)
else:
ip_adress_reachable.append(False)
print(ip_adress_reachable)
# if no ip adresses are reachable and no timer running, start a timer.
if any(ip_adress_reachable) == False and t.is_alive() == False:
print("starting a new thread")
t = Timer(15, timeout)
t.daemon = True
t.start()
# If in the meantime ip adress gets reachable cancel the timer.
elif any(ip_adress_reachable) == True and t.is_alive() == True:
# cancel the timer
print("timer was canceled")
t.cancel()
except KeyboardInterrupt:
print("quitting")
t.join(1)
I am kinda lost, because I though that deamon threads would stop after the main loop is done (i.e. after I press ctr+c)
If somebody could help me out, I would be very grateful.
After testing I found that all problem makes os.system() which catchs Ctrl+C to stop process running in os.system() - ping - and it doesn't send this information to Python.
If you run ping longer and you skip /dev/null
os.system(f"ping -c 5 -W 1 {value}")
then you will see that Ctrl+C stops ping
If I uses subprocess then I don't have this problem.
subprocess.call(f"ping -c 1 -W 1 {value} > /dev/null", shell=True)
Code which I used for test on Linux Mint 20 (based on Ubuntu 20.04)
#import os
import time
from threading import Timer, Thread
#import json
import subprocess
#with open("ip_adresses.json", "r") as f:
# ip_adresses_dict = json.load(f)
ip_adresses_dict = {
'x': '192.168.0.1',
'y': '192.168.0.2',
'z': '192.168.0.3',
}
def timeout():
print("ACTION IS TRIGGERED")
# dummy Timer thread
print("dummy timer created")
t = Timer(999999999, timeout)
t.daemon = True
try:
while True:
ip_adress_reachable = []
for key, value in ip_adresses_dict.items():
print('[DEBUG] start process')
#result = os.system(f"ping -c 1 -W 1 {value} > /dev/null")
#result = os.system(f"ping -c 5 -W 1 {value}")
result = subprocess.call(f"ping -c 1 -W 1 {value} > /dev/null", shell=True)
print('[DEBUG] end process')
ip_adress_reachable.append( result == 0 )
print(ip_adress_reachable)
# if no ip adresses are reachable and no timer running, start a timer.
if any(ip_adress_reachable) is False and t.is_alive() is False:
print("starting a new thread")
t = Timer(15, timeout)
t.daemon = True
t.start()
# If in the meantime ip adress gets reachable cancel the timer.
elif any(ip_adress_reachable) is True and t.is_alive() is True:
# cancel the timer
print("timer was canceled")
t.cancel()
except KeyboardInterrupt:
print("quitting")
if t.is_alive():
t.join(1)
Doc: Replacing os.system()

Run a command inside a running python script

I think it's a very easy thing to do but im still searching for an answer.
I have a Python script running which looks like this:
Waiting for argument:_________
Is there an easy way how i can start the script and automatically put some arguments in it?
I really don't understand u but i think u want something like
import os, sys
#def some_other_functions
def cmd():
try:
com = raw_input('Waiting for argument:_________ ')
#Or u can pass values as sys.args
#like com = sys.argv[1]
while len(com) == 0:
com = raw_input('Waiting for argument:_________ ')
if len(com) != 0:
print os.system(com)
if str(com) == 'exit':
sys.exit()
print '\nContinuer executing other commands or write --exit to quite the program\n'
while True:
com = raw_input('Waiting for argument:_________ ')
if len(com) == 0:
print 'Entered nothing'
if str(com) == 'exit':
sys.exit()
else:
print os.system(com)
except Exception, e:
print str(e)
cmd()

PyDBG process restore doesn't work

I'm using python 2.5(x86) in Windows7 x64.
I wrote the code following this book.
http://nostarch.com/ghpython.htm
But it doesn't work in my environment.
PDBG_ERR> -- IGNORING ERROR --
PDBG_ERR> process_restore: [87] WriteProcessMemory
I suppose the problem comes from Windows version because somebody mentioned it in the below url page and I heard it works in Windows XP.
http://bbs.csdn.net/topics/380255167
PyDBG process snapshots not working
from pydbg import *
from pydbg.defines import *
import threading
import time
import sys
class snapshotter(object):
def __init__(self,exe_path):
self.exe_path = exe_path
self.pid = None
self.dbg = None
self.running = True
pydbg_thread = threading.Thread(target=self.start_debugger)
pydbg_thread.setDaemon(0)
pydbg_thread.start()
while self.pid == None:
time.sleep(1)
monitor_thread = threading.Thread(target=self.monitor_debugger)
monitor_thread.setDaemon(0)
monitor_thread.start()
def monitor_debugger(self):
while self.running == True:
input = raw_input("Enter: 'snap','restore' or 'quit'")
input = input.lower().strip()
if input == "quit":
print "[*] Exiting the snapshotter."
self.running = False
self.dbg.terminate_process()
elif input == "snap":
print "[*] Suspending all threads."
self.dbg.suspend_all_threads()
print "[*] Obtaining snapshot."
self.dbg.process_snapshot()
print "[*] Resuming operation."
self.dbg.resume_all_threads()
elif input == "restore":
print "[*] Suspending all threads."
self.dbg.suspend_all_threads()
print "[*] Restoring snapshot."
self.dbg.process_restore()
print "[*] Resuming operation."
self.dbg.resume_all_threads()
def start_debugger(self):
self.dbg = pydbg()
pid = self.dbg.load(self.exe_path)
self.pid = self.dbg.pid
self.dbg.run()
exe_path = "C:\\WINDOWS\\System32\\calc.exe"
snapshotter(exe_path)
How can I avoid this error and make it work?

Python read and write to files error

I'm trying to write a python function that will take different strings and put them into a file such that it will become a python file. It will then run this python file using another python instance. I want this process to time out after a specified amount of time - for example in the case of an infinite loop. Here's the code:
# runTimeout.py
input_value = "x = addTwo(1,2)\n"
expected_output = "x == 3"
solution = "def addTwo(a, b):\n\treturn a+b"
timeout = 0.1
# Create the test file by adding (1)submission.solution (2)input_value (3)if (4)expected_output (5): (6) return True (6) return False
inputFile = solution + "\n" + input_value + "\n" + "if " + expected_output + ":" + "\n" + "\t" + "print True" + "\n" + "print False"
fin = open('inputfile', 'w')
fin.write(inputFile)
fin.close()
command = "python ~/Dropbox/django/inputFile > ~/Dropbox/django/outputFile"
def runTimeout(command, timeout):
import os, signal, time, commands
cpid = os.fork()
if cpid == 0:
while True:
commands.getstatusoutput(command)#[1].split('\n')
else:
time.sleep(timeout)
os.kill(cpid, signal.SIGKILL)
return
runTimeout(command, timeout)
fout = open('outputFile', 'r')
for line in fout:
print line
fout.close()
It correctly generates this inputFile:
def addTwo(a, b):
return a+b
x = addTwo(1,2)
if x == 3:
print True
print False
and this outputFile
True
False
but when I execute the code with python runTimeout.py, nothing is printed to the console. However, when I read out the file with the last four lines of runTimeout.py using the interpreter, I get the contents of outputFile. What's going on? I can't figure out why the same code works at once place, but not at the other.
I intend to put this into a django function after I get it working independently.
-- Update --
Brandon's solution helped, but for some reason, it doesn't seem to work consistently from the terminal - sometime, it prints True, sometime it prints nothing.
I wrote this new code instead, which works when it is a separate python file. Inside a Django function It fails (500 internal server error) on signal.signal(signal.SIGALRM, signal_handler)
command = "python ~/Dropbox/django/testcode/inputFile > ~/Dropbox/django/testcode/outputFile"
import signal, subprocess
def signal_handler(signum, frame):
raise Exception("Timed out!")
signal.signal(signal.SIGALRM, signal_handler) #fails here
signal.alarm(timeout)
results = ""
try:
subprocess.call(command, shell=True)
fout = open('outputFile', 'r')
for line in fout:
print line
results += line
fout.close()
except Exception:
print "Failure."
signal.alarm(0)
print "results = " + str(results)
I think you should wait until your child process exit which makes your code should look like this
def runTimeout(command, timeout):
import os, signal, time, subprocess
cpid = os.fork()
if cpid == 0:
while True:
subprocess.call(command, shell=True)
os._exit(0)
else:
time.sleep(timeout)
os.kill(cpid, signal.SIGKILL)
os.waitpid(cpid, 0)
return

Categories