python selenium, slow xpath 'all elements'. add timeout - python

I need to get all the elements on a page and iterate through them to search each element.
currently I am using, driver.find_elements_by_xpath('//*[#*]')
However, there can be a delay in completing the line of code above on larger pages. Is there a way to retrieve the results in increments of 100 elements? Or at least add a timeout?
Terminating driver.find_elements_by_xpath('//*[#*]') inside a multithread is the only why I currently think I can solve this.
I need to find all elements on a page that contain certain strings. For example. elem.get_attribute('outerHTML').find('type="submit"') != -1 … and so on and so forth … I also need their proximity to each other to compare index positions
Thanks!

import Globalz ###### globals import is an empty .py file
import threading
import time
import ctypes
def find_xpath():
for i in range(5):
print(i)
time.sleep(1)
Globalz.curr_value = 'DONE!'
### this is where the xpath retrieval goes (ABOVE loop is for example purposes only)
def stopwatch(info):
curr_time = 0
failed = False
Globalz.curr_value = ''
thread1 = threading.Thread(target=info['function'])
thread1.start()
while thread1.is_alive() is True:
if curr_time >= info['timeout']: failed = True; ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread1.ident), ctypes.py_object(SystemExit))
curr_time += 1; time.sleep(1)
if failed is True: return info['failed_returns']
if failed is False: return Globalz.curr_value
betty = stopwatch({'function': find_xpath, 'timeout': 10, 'failed_returns': 'failed'})
print(betty)
If anyone is interested here is a solution. I've created a wrapper called stopwatch()

Related

Multithread or multiprocess

So, currently, I am using multiprocessing to run these 3 functions together.
As only tokens changes, is it recommended to switch to multi-threading? (if yes, will it really help in a performance like speed-up and I think memory will be for sure used less)
This is my code:
from database_function import *
from kiteconnect import KiteTicker
import pandas as pd
from datetime import datetime, timedelta
import schedule
import time
from multiprocessing import Process
def tick_A():
#credentials code here
tokens = [x[0] for x in db_fetchquery("SELECT zerodha FROM script ORDER BY id ASC LIMIT 50")] #FETCHING FIRST 50 SCRIPTS TOKEN
#print(tokens)
##### TO MAKE SURE THE TASK STARTS AFTER 8:59 ONLY ###########
t = datetime.today()
future = datetime(t.year,t.month,t.day,8,59)
if ((future-t).total_seconds()) < 0:
future = datetime(t.year,t.month,t.day,t.hour,t.minute,(t.second+2))
time.sleep((future-t).total_seconds())
##### TO MAKE SURE THE TASK STARTS AFTER 8:59 ONLY ###########
def on_ticks(ws, ticks):
global ltp
ltp = ticks[0]["last_price"]
for tick in ticks:
print(f"{tick['instrument_token']}A")
db_runquery(f'UPDATE SCRIPT SET ltp = {tick["last_price"]} WHERE zerodha = {tick["instrument_token"]}') #UPDATING LTP IN DATABASE
#print(f"{tick['last_price']}")
def on_connect(ws, response):
#print(f"response from connect :: {response}")
# Subscribe to a list of instrument_tokens (TOKENS FETCHED ABOVE WILL BE SUBSCRIBED HERE).
# logging.debug("on connect: {}".format(response))
ws.subscribe(tokens)
ws.set_mode(ws.MODE_LTP,tokens) # SETTING TOKEN TO TICK MODE (LTP / FULL / QUOTE)
kws.on_ticks = on_ticks
kws.on_connect = on_connect
kws.connect(threaded=True)
#####TO STOP THE TASK AFTER 15:32 #######
end_time = datetime(t.year,t.month,t.day,15,32)
while True:
schedule.run_pending()
#time.sleep(1)
if datetime.now() > end_time:
break
#####TO STOP THE TASK AFTER 15:32 #######
def tick_B():
everything remains the same only tokens value changes
tokens = [x[0] for x in db_fetchquery("SELECT zerodha FROM script ORDER BY id ASC OFFSET (50) ROWS FETCH NEXT (50) ROWS ONLY")]
def tick_C():
everything remains the same only tokens value changes
tokens = [x[0] for x in db_fetchquery("SELECT zerodha FROM script ORDER BY id ASC OFFSET (100) ROWS FETCH NEXT (50) ROWS ONLY")]
if __name__ == '__main__':
def runInParallel(*fns):
proc = []
for fn in fns:
p = Process(target=fn)
p.start()
proc.append(p)
for p in proc:
p.join()
runInParallel(tick_A , tick_B , tick_C)
So, currently, I am using multiprocessing to run these 3 functions together.
As only tokens changes, is it recommended to switch to multi-threading? (if yes, will it really help in a performance like speed-up and I think memory will be for sure used less)
most Python implementations do not have true multi-threading, because they use global lock (GIL). So only one thread runs at a time.
For I/O heavy applications it should not make difference. But if you need CPU heavy operations done in parallel (and I see that you use Panda - so the answer must be yes) - you will be better off staying with multi-process app.

Stopping a function based on a value

I am running a python script on a raspberry-pi.
Essentially, I would like a camera to take a picture every 5 seconds, but only if I have set a boolean to true, which gets toggled on a physical button.
initially I set it to true, and then in my while(true) loop, I want to check to see if the variable is set to true, and if so, start taking pictures every 5 seconds. The issue is if I use something like time time.sleep(5), it essentially freezes everything, including the check. Combine that with the fact that I am using debouncing for the button, it then becomes impossible for me to actually toggle the script since I would have to press it exactly after the 5s wait time, right for the value check... I've been searching around and I think the likely solution would have to include threading, but I can't wrap my head around it. One kind of workaround I thought of would be to look at the system time and if the seconds is a multiple of 5, then take picture (all within the main loop). This seems a bit sketchy.
Script below:
### Imports
from goprocam import GoProCamera, constants
import board
import digitalio
from adafruit_debouncer import Debouncer
import os
import shutil
import time
### GoPro settings
goproCamera = GoProCamera.GoPro()
### Button settings
pin = digitalio.DigitalInOut(board.D12)
pin.direction = digitalio.Direction.INPUT
pin.pull = digitalio.Pull.UP
switch = Debouncer(pin, interval=0.1)
save = False #this is the variable
while(True):
switch.update()
if switch.fell:
print("Pressed, toggling value")
save = not save
if save:
goproCamera.take_photo()
goproCamera.downloadLastMedia()
time.sleep(5)
Here's something to try:
while(True):
switch.update()
if switch.fell:
print("Pressed, toggling value")
save = not save
if save:
current_time = time.time()
if current_time - last_pic_time >= 5:
goproCamera.take_photo()
goproCamera.downloadLastMedia()
last_pic_time = current_time
Depending on exactly what sort of behavior you want, you may have to fiddle with when and how often time.time() is called.
Cheers!
Maybe something like this?
import threading
def set_interval(func, sec):
def func_wrapper():
set_interval(func, sec)
func()
t = threading.Timer(sec, func_wrapper)
t.start()
return t
We call the function above inside the main loop.
Wrap your while loop content on a function:
def take_photo:
goproCamera.take_photo()
goproCamera.downloadLastMedia()
Now we create a flag initially set to False to avoid creating multiple threads.
Notice that I did this before the while loop. We just need a starting value here.
active = False
while(True):
switch.update()
if switch.fell:
print("Pressed, toggling value")
save = not save
if save: # we need to start taking photos.
if not active: # it is not active... so it is the first time it is being called or it has been toggled to save as True again.
photo_thread = set_interval(take_photo, 5) # grabbing a handle to the thread - photo_thread - so we can cancel it later when save is set to False.
active = True # marking as active to be skipped from the loop until save is False
else:
try: # photo_thread may not exist yet so I wrapped it inside a try statement here.
photo_thread.cancel() # if we have a thread we kill it
active = False #setting to False so the next time the button is pressed we can create a new one.
Let me know if it works. =)
What I ended up doing:
### Imports
from goprocam import GoProCamera, constants
import board
import digitalio
from adafruit_debouncer import Debouncer
import os
import time
import threading
### GoPro settings
gopro = GoProCamera.GoPro()
### Button settings
pin = digitalio.DigitalInOut(board.D12)
pin.direction = digitalio.Direction.INPUT
pin.pull = digitalio.Pull.UP
switch = Debouncer(pin, interval=0.1)
### Picture save location
dir_path = os.path.dirname(os.path.realpath(__file__))
new_path = dir_path+"/pictures/"
save = False
### Functions
def takePhoto(e):
while e.isSet():
gopro.take_photo()
gopro.downloadLastMedia()
fname = '100GOPRO-' + gopro.getMedia().split("/")[-1]
current_file = dir_path+'/'+fname
if os.path.isfile(current_file):
os.replace(current_file, new_path+fname) #move file, would be cleaner to download the file directly to the right folder, but the API doesn't work the way I thought it did
e.wait(5)
### Initial settings
e = threading.Event()
t1 = threading.Thread(target=takePhoto, args=([e]))
print("Starting script")
while(True):
switch.update()
if switch.fell:
#toggle value
save = not save
if save:
e.set() #should be taking pictures
else:
e.clear() #not taking pictures
if not t1.is_alive(): #start the thread if it hasn't been yet
if e.is_set():
t1.start()

Multiprocessing function not writing to file or printing

I'm working on a Raspberry Pi (3 B+) making a data collection device and I'm
trying to spawn a process to record the data coming in and write it to a file. I have a function for the writing that works fine when I call it directly.
When I call it using the multiprocess approach however, nothing seems to happen. I can see in task monitors in Linux that the process does in fact get spawned but no file gets written, and when I try to pass a flag to it to shut down it doesn't work, meaning I end up terminating the process and nothing seems to have happened.
I've been over this every which way and can't see what I'm doing wrong; does anyone else? In case it's relevant, these are functions inside a parent class, and one of the functions is meant to spawn another as a thread.
Code I'm using:
from datetime import datetime, timedelta
import csv
from drivers.IMU_SEN0 import IMU_SEN0
import multiprocessing, os
class IMU_data_logger:
_output_filename = ''
_csv_headers = []
_accelerometer_headers = ['Accelerometer X','Accelerometer Y','Accelerometer Z']
_gyroscope_headers = ['Gyroscope X','Gyroscope Y','Gyroscope Z']
_magnetometer_headers = ['Bearing']
_log_accelerometer = False
_log_gyroscope= False
_log_magnetometer = False
IMU = None
_writer=[]
_run_underway = False
_process=[]
_stop_value = 0
def __init__(self,output_filename='/home/pi/blah.csv',log_accelerometer = True,log_gyroscope= True,log_magnetometer = True):
"""data logging device
NOTE! Multiple instances of this class should not use the same IMU devices simultaneously!"""
self._output_filename = output_filename
self._log_accelerometer = log_accelerometer
self._log_gyroscope = log_gyroscope
self._log_magnetometer = log_magnetometer
def __del__(self):
# TODO Update this
if self._run_underway: # If there's still a run underway, end it first
self.end_recording()
def _set_up(self):
self.IMU = IMU_SEN0(self._log_accelerometer,self._log_gyroscope,self._log_magnetometer)
self._set_up_headers()
def _set_up_headers(self):
"""Set up the headers of the CSV file based on the header substrings at top and the input flags on what will be measured"""
self._csv_headers = []
if self._log_accelerometer is not None:
self._csv_headers+= self._accelerometer_headers
if self._log_gyroscope is not None:
self._csv_headers+= self._gyroscope_headers
if self._log_magnetometer is not None:
self._csv_headers+= self._magnetometer_headers
def _record_data(self,frequency,stop_value):
self._set_up() #Run setup in thread
"""Record data function, which takes a recording frequency, in herz, as an input"""
previous_read_time=datetime.now()-timedelta(1,0,0)
self._run_underway = True # Note that a run is now going
Period = 1/frequency # Period, in seconds, of a recording based on the input frequency
print("Writing output data to",self._output_filename)
with open(self._output_filename,'w',newline='') as outcsv:
self._writer = csv.writer(outcsv)
self._writer.writerow(self._csv_headers) # Write headers to file
while stop_value.value==0: # While a run continues
if datetime.now()-previous_read_time>=timedelta(0,1,0): # If we've waited a period, collect the data; otherwise keep looping
print("run underway value",self._run_underway)
if datetime.now()-previous_read_time>=timedelta(0,Period,0): # If we've waited a period, collect the data; otherwise keep looping
previous_read_time = datetime.now() # Update previous readtime
next_row = []
if self._log_accelerometer:
# Get values in m/s^2
axes = self.IMU.read_accelerometer_values()
next_row += [axes['x'],axes['y'],axes['z']]
if self._log_gyroscope:
# Read gyro values
gyro = self.IMU.read_gyroscope_values()
next_row += [gyro['x'],gyro['y'],gyro['z']]
if self._log_magnetometer:
# Read magnetometer value
b= self.IMU.read_magnetometer_bearing()
next_row += b
self._writer.writerow(next_row)
# Close the csv when done
outcsv.close()
def start_recording(self,frequency_in_hz):
# Create recording process
self._stop_value = multiprocessing.Value('i',0)
self._process = multiprocessing.Process(target=self._record_data,args=(frequency_in_hz,self._stop_value))
# Start recording process
self._process.start()
print(datetime.now().strftime("%H:%M:%S.%f"),"Data logging process spawned")
print("Logging Accelerometer:",self._log_accelerometer)
print("Logging Gyroscope:",self._log_gyroscope)
print("Logging Magnetometer:",self._log_magnetometer)
print("ID of data logging process: {}".format(self._process.pid))
def end_recording(self,terminate_wait = 2):
"""Function to end the recording multithread that's been spawned.
Args: terminate_wait: This is the time, in seconds, to wait after attempting to shut down the process before terminating it."""
# Get process id
id = self._process.pid
# Set stop event for process
self._stop_value.value = 1
self._process.join(terminate_wait) # Wait two seconds for the process to terminate
if self._process.is_alive(): # If it's still alive after waiting
self._process.terminate()
print(datetime.now().strftime("%H:%M:%S.%f"),"Process",id,"needed to be terminated.")
else:
print(datetime.now().strftime("%H:%M:%S.%f"),"Process",id,"successfully ended itself.")
====================================================================
ANSWER: For anyone following up here, it turns out the problem was my use of the VS Code debugger which apparently doesn't work with multiprocessing and was somehow preventing the success of the spawned process. Many thanks to Tomasz Swider below for helping me work through issues and, eventually, find my idiocy. The help was very deeply appreciated!!
I can see few thing wrong in your code:
First thing
stop_value == 0 will not work as the multiprocess.Value('i', 0) != 0, change that line to
while stop_value.value == 0
Second, you never update previous_read_time so it will write the readings as fast as it can, you will run out of disk quick
Third, try use time.sleep() the thing you are doing is called busy looping and it is bad, it is wasting CPU cycles needlessly.
Four, terminating with self._stop_value = 1 probably will not work there must be other way to set that value maybe self._stop_value.value = 1.
Well here is a pice of example code based on the code that you have provided that is working just fine:
import csv
import multiprocessing
import time
from datetime import datetime, timedelta
from random import randint
class IMU(object):
#staticmethod
def read_accelerometer_values():
return dict(x=randint(0, 100), y=randint(0, 100), z=randint(0, 10))
class Foo(object):
def __init__(self, output_filename):
self._output_filename = output_filename
self._csv_headers = ['xxxx','y','z']
self._log_accelerometer = True
self.IMU = IMU()
def _record_data(self, frequency, stop_value):
#self._set_up() # Run setup functions for the data collection device and store it in the self.IMU variable
"""Record data function, which takes a recording frequency, in herz, as an input"""
previous_read_time = datetime.now() - timedelta(1, 0, 0)
self._run_underway = True # Note that a run is now going
Period = 1 / frequency # Period, in seconds, of a recording based on the input frequency
print("Writing output data to", self._output_filename)
with open(self._output_filename, 'w', newline='') as outcsv:
self._writer = csv.writer(outcsv)
self._writer.writerow(self._csv_headers) # Write headers to file
while stop_value.value == 0: # While a run continues
if datetime.now() - previous_read_time >= timedelta(0, 1,
0): # If we've waited a period, collect the data; otherwise keep looping
print("run underway value", self._run_underway)
if datetime.now() - previous_read_time >= timedelta(0, Period,
0): # If we've waited a period, collect the data; otherwise keep looping
next_row = []
if self._log_accelerometer:
# Get values in m/s^2
axes = self.IMU.read_accelerometer_values()
next_row += [axes['x'], axes['y'], axes['z']]
previous_read_time = datetime.now()
self._writer.writerow(next_row)
# Close the csv when done
outcsv.close()
def start_recording(self, frequency_in_hz):
# Create recording process
self._stop_value = multiprocessing.Value('i', 0)
self._process = multiprocessing.Process(target=self._record_data, args=(frequency_in_hz, self._stop_value))
# Start recording process
self._process.start()
print(datetime.now().strftime("%H:%M:%S.%f"), "Data logging process spawned")
print("ID of data logging process: {}".format(self._process.pid))
def end_recording(self, terminate_wait=2):
"""Function to end the recording multithread that's been spawned.
Args: terminate_wait: This is the time, in seconds, to wait after attempting to shut down the process before terminating it."""
# Get process id
id = self._process.pid
# Set stop event for process
self._stop_value.value = 1
self._process.join(terminate_wait) # Wait two seconds for the process to terminate
if self._process.is_alive(): # If it's still alive after waiting
self._process.terminate()
print(datetime.now().strftime("%H:%M:%S.%f"), "Process", id, "needed to be terminated.")
else:
print(datetime.now().strftime("%H:%M:%S.%f"), "Process", id, "successfully ended itself.")
if __name__ == '__main__':
foo = Foo('/tmp/foometer.csv')
foo.start_recording(20)
time.sleep(5)
print('Ending recording')
foo.end_recording()

What's the fastest way to expand url in python

I have a checkin list which contains about 600000 checkins, and there is a url in each checkin, I need to expand them back to original long ones. I do so by
now = time.time()
files_without_url = 0
for i, checkin in enumerate(NYC_checkins):
try:
foursquare_url = urllib2.urlopen(re.search("(?P<url>https?://[^\s]+)", checkin[5]).group("url")).url
except:
files_without_url += 1
if i%1000 == 0:
print("from %d to %d: %2.5f seconds" %(i-1000, i, time.time()-now))
now = time.time()
But this takes too long time: from 0 to 1000 checkins, it takes 3241 seconds! Is this normal? What's the most efficient way to expand url by Python?
MODIFIED: Some Urls are from Bitly while some others are not, and I am not sure where they come from. In this case, I wanna simply use urllib2 module.
for your information, here is an example of checkin[5]:
I'm at The Diner (2453 18th Street NW, Columbia Rd., Washington) w/ 4 others. http...... (this is the short url)
I thought I would expand on my comment regarding the use of multiprocessing to speed up this task.
Let's start with a simple function that will take a url and resolve it as far as possible (following redirects until it gets a 200 response code):
import requests
def resolve_url(url):
try:
r = requests.get(url)
except requests.exceptions.RequestException:
return (url, None)
if r.status_code != 200:
longurl = None
else:
longurl = r.url
return (url, longurl)
This will either return a (shorturl, longurl) tuple, or it will
return (shorturl, None) in the event of a failure.
Now, we create a pool of workers:
import multiprocessing
pool = multiprocessing.Pool(10)
And then ask our pool to resolve a list of urls:
resolved_urls = []
for shorturl, longurl in pool.map(resolve_url, urls):
resolved_urls.append((shorturl, longurl))
Using the above code...
With a pool of 10 workers, I can resolve 500 URLs in 900 seconds.
If I increase the number of workers to 100, I can resolve 500 URLs in 30 seconds.
If I increase the number of workers to 200, I can resolve 500 URLs in 25 seconds.
This is hopefully enough to get you started.
(NB: you could write a similar solution using the threading module rather than multiprocessing. I usually just grab for multiprocessing first, but in this case either would work, and threading might even be slightly more efficient.)
Thread are most appropriate in case of network I/O. But you could try the following first.
pat = re.compile("(?P<url>https?://[^\s]+)") # always compile it
missing_urls = 0
bad_urls = 0
def check(checkin):
match = pat.search(checkin[5])
if not match:
global missing_urls
missing_urls += 1
else:
url = match.group("url")
try:
urllib2.urlopen(url) # don't lookup .url if you don't need it later
except URLError: # or just Exception
global bad_urls
bad_urls += 1
for i, checkin in enumerate(NYC_checkins):
check(checkin)
print(bad_urls, missing_urls)
If you get no improvement, now that we have a nice check function, create a threadpool and feed it. Speedup is guaranteed. Using processes for network I/O is pointless

How do I gather performance metrics for GDI and user Objects using python

Think this is my first question I have asked on here normally find all the answers I need (so thanks in advance)
ok my problem I have written a python program that will in threads monitor a process and output the results to a csv file for later. This code is working great I am using win32pdhutil for the counters and WMI, Win32_PerfRawData_PerfProc_Process for the CPU %time. I have now been asked to monitor a WPF application and specifically monitor User objects and GDI objects.
This is where I have a problem, it is that i can't seem to find any python support for gathering metrics on these two counters. these two counters are easily available in the task manager I find it odd that there is very little information on these two counters. I am specifically looking at gathering these to see if we have a memory leak, I don't want to install anything else on the system other than python that is already installed. Please can you peeps help with finding a solution.
I am using python 3.3.1, this will be running on a windows platform (mainly win7 and win8)
This is the code i am using to gather the data
def gatherIt(self,whoIt,whatIt,type,wiggle,process_info2):
#this is the data gathering function thing
data=0.0
data1="wobble"
if type=="counter":
#gather data according to the attibutes
try:
data = win32pdhutil.FindPerformanceAttributesByName(whoIt, counter=whatIt)
except:
#a problem occoured with process not being there not being there....
data1="N/A"
elif type=="cpu":
try:
process_info={}#used in the gather CPU bassed on service
for x in range(2):
for procP in wiggle.Win32_PerfRawData_PerfProc_Process(name=whoIt):
n1 = int(procP.PercentProcessorTime)
d1 = int(procP.Timestamp_Sys100NS)
#need to get the process id to change per cpu look...
n0, d0 = process_info.get (whoIt, (0, 0))
try:
percent_processor_time = (float (n1 - n0) / float (d1 - d0)) *100.0
#print whoIt, percent_processor_time
except ZeroDivisionError:
percent_processor_time = 0.0
# pass back the n0 and d0
process_info[whoIt] = (n1, d1)
#end for loop (this should take into account multiple cpu's)
# end for range to allow for a current cpu time rather that cpu percent over sampleint
if percent_processor_time==0.0:
data=0.0
else:
data=percent_processor_time
except:
data1="N/A"
else:
#we have done something wrong so data =0
data1="N/A"
#endif
if data == "[]":
data=0.0
data1="N/A"
if data == "" :
data=0.0
data1="N/A"
if data == " ":
data=0.0
data1="N/A"
if data1!="wobble" and data==0.0:
#we have not got the result we were expecting so add a n/a
data=data1
return data
cheers
edited for correct cpu timings issue if anyone tried to run it :D
so after a long search i was able to mash something together that gets me the info needed.
import time
from ctypes import *
from ctypes.wintypes import *
import win32pdh
# with help from here http://coding.derkeiler.com/Archive/Python/comp.lang.python/2007-10/msg00717.html
# the following has been mashed together to get the info needed
def GetProcessID(name):
object = "Process"
items, instances = win32pdh.EnumObjectItems(None, None, object, win32pdh.PERF_DETAIL_WIZARD)
val = None
if name in instances :
tenQuery = win32pdh.OpenQuery()
tenarray = [ ]
item = "ID Process"
path = win32pdh.MakeCounterPath( ( None, object, name, None, 0, item ) )
tenarray.append( win32pdh.AddCounter( tenQuery, path ) )
win32pdh.CollectQueryData( tenQuery )
time.sleep( 0.01 )
win32pdh.CollectQueryData( tenQuery )
for tencounter in tenarray:
type, val = win32pdh.GetFormattedCounterValue( tencounter, win32pdh.PDH_FMT_LONG )
win32pdh.RemoveCounter( tencounter )
win32pdh.CloseQuery( tenQuery )
return val
processIDs = GetProcessID('OUTLOOK') # Remember this is case sensitive
PQI = 0x400
#open a handle on to the process so that we can query it
OpenProcessHandle = windll.kernel32.OpenProcess(PQI, 0, processIDs)
# OK so now we have opened the process now we want to query it
GR_GDIOBJECTS, GR_USEROBJECTS = 0, 1
print(windll.user32.GetGuiResources(OpenProcessHandle, GR_GDIOBJECTS))
print(windll.user32.GetGuiResources(OpenProcessHandle, GR_USEROBJECTS))
#so we have what we want we now close the process handle
windll.kernel32.CloseHandle(OpenProcessHandle)
hope that helps
For GDI count, I think a simpler, cleaner monitoring script is as follows:
import time, psutil
from ctypes import *
def getPID(processName):
for proc in psutil.process_iter():
try:
if processName.lower() in proc.name().lower():
return proc.pid
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return None;
def getGDIcount(PID):
PH = windll.kernel32.OpenProcess(0x400, 0, PID)
GDIcount = windll.user32.GetGuiResources(PH, 0)
windll.kernel32.CloseHandle(PH)
return GDIcount
PID = getPID('Outlook')
while True:
GDIcount = getGDIcount(PID)
print(f"{time.ctime()}, {GDIcount}")
time.sleep(1)

Categories