Original issue is now resolved, many thanks to eryksun.
Fixed code is below, I now have a different issue that I will ask about in another thread if I cannot figure it out.
Error 6 is invalid handle, however, the handle appears to be good, I believe the error is coming from the second parameter.
status = advapi32.SetServiceStatus(g_hServiceStatus, pointer(m_oServiceStatus))
if 0 == status:
dwStatus = winKernel.GetLastError()
Note: if I make the pointer None, then it doesn't fail (but obviously doesn't do anything useful either).
python -V
Python 3.6.6
Larger snippet:
from ctypes import *
from ctypes.wintypes import *
winKernel = ctypes.WinDLL('kernel32', use_last_error=True)
advapi32 = ctypes.WinDLL('advapi32', use_last_error=True)
global g_ServiceName
g_ServiceName = "StatusMonitor"
global g_lpcstrServiceName
g_lpcstrServiceName = LPCSTR(b"StatusMonitor")
class _SERVICE_STATUS(Structure):
_pack_ = 4
_fields_ = [
("dwServiceType", DWORD),
("dwCurrentState", DWORD),
("dwControlsAccepted", DWORD),
("dwWin32ExitCode", DWORD),
("dwServiceSpecificExitCode", DWORD),
("dwCheckPoint", DWORD),
("dwWaitHint", DWORD)
]
LPSERVICE_STATUS = POINTER(_SERVICE_STATUS)
global m_oServiceStatus
m_oServiceStatus = _SERVICE_STATUS(0, 0, 0, 0, 0, 0, 0)
global g_hServiceStatus
g_hServiceStatus = SERVICE_STATUS_HANDLE(None)
<lots of code snipped>
def status_report(dwCurrentState, dwWin32ExitCode, dwWaitHint):
global g_dwCheckPoint
global g_isService
try:
# Fill in the SERVICE_STATUS structure.
m_oServiceStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS
m_oServiceStatus.dwCurrentState = dwCurrentState
m_oServiceStatus.dwWin32ExitCode = dwWin32ExitCode
m_oServiceStatus.dwWaitHint = dwWaitHint
if dwCurrentState == SERVICE_START_PENDING:
m_oServiceStatus.dwControlsAccepted = 0
else:
m_oServiceStatus.dwControlsAccepted = 1
if (dwCurrentState == SERVICE_STOPPED) or (dwCurrentState == SERVICE_RUNNING):
m_oServiceStatus.dwCheckPoint = 0
else:
g_dwCheckPoint += 1
m_oServiceStatus.dwCheckPoint = g_dwCheckPoint
status = advapi32.SetServiceStatus(g_hServiceStatus, pointer(m_oServiceStatus))
if 0 == status:
dwStatus = winKernel.GetLastError()
#logging.info("SetServiceStatus(" + str(g_hServiceStatus) + ", status=" + str(dwStatus) + ")")
logging.info("status_report(" + str(g_hServiceStatus) + ", " + str(dwCurrentState) + ", " + str(dwWin32ExitCode) + ", " + str(dwWaitHint) + ")")
dwStatus = None
if g_isService:
# Report the status of the service to the SCM.
ptrServiceStatus = LPSERVICE_STATUS(m_oServiceStatus)
logging.info("m_oServiceStatus struct: " + str(m_oServiceStatus) + ", ref: " + str(byref(m_oServiceStatus)))
logging.info(" " + "ptr: " + str(str(pointer(m_oServiceStatus))) + " PTR: " + str(ptrServiceStatus))
advapi32.SetServiceStatus.restype = BOOL
advapi32.SetServiceStatus.argtypes = [SERVICE_STATUS_HANDLE, LPSERVICE_STATUS]
status = advapi32.SetServiceStatus(g_hServiceStatus, ptrServiceStatus)
if 0 == status:
dwStatus = ctypes.get_last_error()
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
logging.error("status_report " + str(e) + " line: " + str(exc_tb.tb_lineno))
return dwStatus
advapi32.RegisterServiceCtrlHandlerExA.restype = SERVICE_STATUS_HANDLE
advapi32.RegisterServiceCtrlHandlerExA.argtypes = [LPCSTR, LPHANDLER_FUNCTION_EX, LPVOID]
g_hServiceStatus = advapi32.RegisterServiceCtrlHandlerExA(g_lpcstrServiceName, LPHANDLER_FUNCTION_EX(svc_control_handler_ex), LPVOID(None))
logging.info("control handler " + str(g_hServiceStatus))
logging.info("control handler called count " + str(g_nServiceControlHandlerCalled))
m_oServiceStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS;
m_oServiceStatus.dwServiceSpecificExitCode = 0;
# set the service state as pending
dwStatus = status_report(SERVICE_START_PENDING, NO_ERROR, 3000);
logging.info("service_main: status_report(" + str(g_hServiceStatus) + "), status=" + str(dwStatus))
log_service_status(m_oServiceStatus)
Updated logging result:
INFO service_start
INFO service_start: StopEventHandle 952
INFO service_main called JimsStatusMonitor control handler called count 0
INFO control handler 2787686645712
INFO control handler called count 0
INFO status_report(2787686645712, 2, 0, 3000)128
INFO m_oServiceStatus struct: <__main__._SERVICE_STATUS object at 0x000002890FC666C8>, ref: <cparam 'P' (000002890FCA8A30)>
INFO ptr: <__main__.LP__SERVICE_STATUS object at 0x000002890FC66848> PTR: <__main__.LP__SERVICE_STATUS object at 0x000002890FC66648>
INFO service_main: status_report(2787686645712), status=None
INFO 16, 2, 0, 0, 0, 1, 3000
I must be missing something obvious, but I don't see it. I tried different pack to the structure, but with no improvement.
I also tried using byref() instead of pointer() and just passing the structure, but none of those worked. I believe using pointer() is correct here because there is another API to set the dispatch table that is working using pointer().
Note that I am specifically using FFI for this as I found the existing packages lacking for what I am trying to do. This Python solution is based on a C++ solution that I wrote that works, I just need to understand whatever nuance to the FFI that is causing it to fail.
I should add that the service is actually running at this point, I just can't transition it out of Starting state with this issue.
Hopefully someone can tell me what I am doing wrong?
Thanks in advance,
-Dave
With many thanks to eryksun, I was able to resolve the original issue.
The main issue was that I was assuming the Windows APIs were fully defined because it seemed like they were working without restype and argstype defined.
Needed the following:
advapi32.RegisterServiceCtrlHandlerExA.restype = SERVICE_STATUS_HANDLE
advapi32.RegisterServiceCtrlHandlerExA.argtypes = [LPCSTR, LPHANDLER_FUNCTION_EX, LPVOID]
g_hServiceStatus = advapi32.RegisterServiceCtrlHandlerExA(g_lpcstrServiceName, LPHANDLER_FUNCTION_EX(svc_control_handler_ex), LPVOID(None))
advapi32.SetServiceStatus.restype = BOOL
advapi32.SetServiceStatus.argtypes = [SERVICE_STATUS_HANDLE, LPSERVICE_STATUS]
status = advapi32.SetServiceStatus(g_hServiceStatus, ptrServiceStatus)
With those defined correctly, there were still two remaining issues that I was able to figure out from the documentation.
The first was I missed that restype is the first argument to WINFUNCTYPE(), given the responses from eryksun, it was more obvious to me, and that explained why my definition for my service_main() wasn't working as expected.
The second was a bit more subtle and is found at the end of the callback documentation here:
Important note for callback functions:
Make sure you keep references to CFUNCTYPE objects as long as they are
used from C code. ctypes doesn't, and if you don't, they may be
garbage collected, crashing your program when a callback is made.
Note that the original code that was failing can be found in the Python forum here.
Related
I'm working on a bazel rule (using version 5.2.0) that uses SWIG (version 4.0.1) to make a python library from C++ code, adapted from a rule in the tensorflow library. The problem I've run into is that, depending on the contents of ctx.file.source.path, the swig invocation might produce a necessary .h file. If it does, the rule below works great. If it doesn't, I get:
ERROR: BUILD:31:11: output 'foo_swig_h.h' was not created
ERROR: BUILD:31:11: SWIGing foo.i. failed: not all outputs were created or valid
If the h_out stuff is removed from _py_swig_gen_impl, the rule below works great when swig doesn't produce the .h file. But, if swig does produce one, bazel seems to ignore it and it isn't available for native.cc_binary to compile, resulting in gcc failing with a 'no such file or directory' error on an #include <foo_swig_cc.h> line in foo_swig_cc.cc.
(The presence or absence of the .h file in the output is determined by whether the .i file at ctx.file.source.path uses SWIG's "directors" feature.)
def _include_dirs(deps):
return depset(transitive = [dep[CcInfo].compilation_context.includes for dep in deps]).to_list()
def _headers(deps):
return depset(transitive = [dep[CcInfo].compilation_context.headers for dep in deps]).to_list()
# Bazel rules for building swig files.
def _py_swig_gen_impl(ctx):
module_name = ctx.attr.module_name
cc_out = ctx.actions.declare_file(module_name + "_swig_cc.cc")
h_out = ctx.actions.declare_file(module_name + "_swig_h.h")
py_out = ctx.actions.declare_file(module_name + ".py")
args = ["-c++", "-python", "-py3"]
args += ["-module", module_name]
args += ["-I" + x for x in _include_dirs(ctx.attr.deps)]
args += ["-I" + x.dirname for x in ctx.files.swig_includes]
args += ["-o", cc_out.path]
args += ["-outdir", py_out.dirname]
args += ["-oh", h_out.path]
args.append(ctx.file.source.path)
outputs = [cc_out, h_out, py_out]
ctx.actions.run(
executable = "swig",
arguments = args,
mnemonic = "Swig",
inputs = [ctx.file.source] + _headers(ctx.attr.deps) + ctx.files.swig_includes,
outputs = outputs,
progress_message = "SWIGing %{input}.",
)
return [DefaultInfo(files = depset(direct = [cc_out, py_out]))]
_py_swig_gen = rule(
attrs = {
"source": attr.label(
mandatory = True,
allow_single_file = True,
),
"swig_includes": attr.label_list(
allow_files = [".i"],
),
"deps": attr.label_list(
allow_files = True,
providers = [CcInfo],
),
"module_name": attr.string(mandatory = True),
},
implementation = _py_swig_gen_impl,
)
def py_wrap_cc(name, source, module_name = None, deps = [], copts = [], **kwargs):
if module_name == None:
module_name = name
python_deps = [
"#local_config_python//:python_headers",
"#local_config_python//:python_lib",
]
# First, invoke the _py_wrap_cc rule, which runs swig. This outputs:
# `module_name.cc`, `module_name.py`, and, sometimes, `module_name.h` files.
swig_rule_name = "swig_gen_" + name
_py_swig_gen(
name = swig_rule_name,
source = source,
swig_includes = ["//third_party/swig_rules:swig_includes"],
deps = deps + python_deps,
module_name = module_name,
)
# Next, we need to compile the `module_name.cc` and `module_name.h` files
# from the previous rule. The `module_name.py` file already generated
# expects there to be a `_module_name.so` file, so we name the cc_binary
# rule this way to make sure that's the resulting file name.
cc_lib_name = "_" + module_name + ".so"
native.cc_binary(
name = cc_lib_name,
srcs = [":" + swig_rule_name],
linkopts = ["-dynamic", "-L/usr/local/lib/"],
linkshared = True,
deps = deps + python_deps,
)
# Finally, package everything up as a python library that can be depended
# on. Note that this rule uses the user-given `name`.
native.py_library(
name = name,
srcs = [":" + swig_rule_name],
srcs_version = "PY3",
data = [":" + cc_lib_name],
imports = ["./"],
)
My question, broadly, how I might best handle this with a single rule. I've tried adding a ctx.actions.write before the ctx.actions.run, thinking that I could generate a dummy '.h' file that would be overwritten if needed. That gives me:
ERROR: BUILD:41:11: for foo_swig_h.h, previous action: action 'Writing file foo_swig_h.h', attempted action: action 'SWIGing foo.i.'
My next idea is to remove the h_out stuff and then try to capture the h file for the cc_binary rule with some kind of glob invocation.
I've seen two approaches: add an attribute to indicate whether it applies, or write a wrapper script to generate it unconditionally.
Adding an attribute means something like "has_h": attr.bool(), and then use that in _py_swig_gen_impl to make the ctx.actions.declare_file(module_name + "_swig_h.h") conditional.
The wrapper script option means using something like this for the executable:
#!/bin/bash
set -e
touch the_path_of_the_header
exec swig "$#"
That will unconditionally create the output, and then swig will overwrite it if applicable. If it's not applicable, then passing around an empty header file in the Bazel rules should be harmless.
For posterity, this is what my _py_swig_gen_impl looks like after implementing #Brian's suggestion above:
def _py_swig_gen_impl(ctx):
module_name = ctx.attr.module_name
cc_out = ctx.actions.declare_file(module_name + "_swig_cc.cc")
h_out = ctx.actions.declare_file(module_name + "_swig_h.h")
py_out = ctx.actions.declare_file(module_name + ".py")
include_dirs = _include_dirs(ctx.attr.deps)
headers = _headers(ctx.attr.deps)
args = ["-c++", "-python", "-py3"]
args += ["-module", module_name]
args += ["-I" + x for x in include_dirs]
args += ["-I" + x.dirname for x in ctx.files.swig_includes]
args += ["-o", cc_out.path]
args += ["-outdir", py_out.dirname]
args += ["-oh", h_out.path]
args.append(ctx.file.source.path)
outputs = [cc_out, h_out, py_out]
# Depending on the contents of `ctx.file.source`, swig may or may not
# output a .h file needed by subsequent rules. Bazel doesn't like optional
# outputs, so instead of invoking swig directly we're going to make a
# lightweight executable script that first `touch`es the .h file that may
# get generated, and then execute that. This means we may be propagating
# an empty .h file around as a "dependency" sometimes, but that's okay.
swig_script_file = ctx.actions.declare_file("swig_exec.sh")
ctx.actions.write(
output = swig_script_file,
is_executable = True,
content = "#!/bin/bash\n\nset -e\ntouch " + h_out.path + "\nexec swig \"$#\"",
)
ctx.actions.run(
executable = swig_script_file,
arguments = args,
mnemonic = "Swig",
inputs = [ctx.file.source] + headers + ctx.files.swig_includes,
outputs = outputs,
progress_message = "SWIGing %{input}.",
)
return [
DefaultInfo(files = depset(direct = outputs)),
]
The ctx.actions.write generates the suggested bash script:
#!/bin/bash
set -e
touch %{h_out.path}
exec swig "$#"
Which guarantees that the expected h_out will always be output by ctx.actions.run, whether or not swig generates it.
I'm working on a project using nlohmann's json C++ implementation.
How can one easily explore nlohmann's JSON keys/vals in GDB ?
I tried to use this STL gdb wrapping since it provides helpers to explore standard C++ library structures that nlohmann's JSON lib is using.
But I don't find it convenient.
Here is a simple use case:
json foo;
foo["flex"] = 0.2;
foo["awesome_str"] = "bleh";
foo["nested"] = {{"bar", "barz"}};
What I would like to have in GDB:
(gdb) p foo
{
"flex" : 0.2,
"awesome_str": "bleh",
"nested": etc.
}
Current behavior
(gdb) p foo
$1 = {
m_type = nlohmann::detail::value_t::object,
m_value = {
object = 0x129ccdd0,
array = 0x129ccdd0,
string = 0x129ccdd0,
boolean = 208,
number_integer = 312266192,
number_unsigned = 312266192,
number_float = 1.5427999782486669e-315
}
}
(gdb) p foo.at("flex")
Cannot evaluate function -- may be inlined // I suppose it depends on my compilation process. But I guess it does not invalidate the question.
(gdb) p *foo.m_value.object
$2 = {
_M_t = {
_M_impl = {
<std::allocator<std::_Rb_tree_node<std::pair<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const, nlohmann::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, bool, long long, unsigned long long, double, std::allocator, nlohmann::adl_serializer> > > >> = {
<__gnu_cxx::new_allocator<std::_Rb_tree_node<std::pair<std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const, nlohmann::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> >, bool, long long, unsigned long long, double, std::allocator, nlohmann::adl_serializer> > > >> = {<No data fields>}, <No data fields>},
<std::_Rb_tree_key_compare<std::less<void> >> = {
_M_key_compare = {<No data fields>}
},
<std::_Rb_tree_header> = {
_M_header = {
_M_color = std::_S_red,
_M_parent = 0x4d72d0,
_M_left = 0x4d7210,
_M_right = 0x4d7270
},
_M_node_count = 5
}, <No data fields>}
}
}
I found my own answer reading further the GDB capabilities and stack overflow questions concerning print of std::string.
The short path is the easiest.
The other path was hard, but I'm glad I managed to do this. There is lots of room for improvements.
there is an open issue for this particular matter here https://github.com/nlohmann/json/issues/1952*
Short path v3.1.2
I simply defined a gdb command as follows:
# this is a gdb script
# can be loaded from gdb using
# source my_script.txt (or. gdb or whatever you like)
define pjson
# use the lohmann's builtin dump method, ident 4 and use space separator
printf "%s\n", $arg0.dump(4, ' ', true).c_str()
end
# configure command helper (text displayed when typing 'help pjson' in gdb)
document pjson
Prints a lohmann's JSON C++ variable as a human-readable JSON string
end
Using it in gdb:
(gdb) source my_custom_script.gdb
(gdb) pjson foo
{
"flex" : 0.2,
"awesome_str": "bleh",
"nested": {
"bar": "barz"
}
}
Short path v3.7.0 [EDIT] 2019-onv-06
One may also use the new to_string() method,but I could not get it to work withing GDB with a live inferior process. Method below still works.
# this is a gdb script
# can be loaded from gdb using
# source my_script.txt (or. gdb or whatever you like)
define pjson
# use the lohmann's builtin dump method, ident 4 and use space separator
printf "%s\n", $arg0.dump(4, ' ', true, json::error_handler_t::strict).c_str()
end
# configure command helper (text displayed when typing 'help pjson' in gdb)
document pjson
Prints a lohmann's JSON C++ variable as a human-readable JSON string
end
April 18th 2020: WORKING FULL PYTHON GDB (with live inferior process and debug symbols)
Edit 2020-april-26: the code (offsets) here are out of blue and NOT compatible for all platforms/JSON lib compilations. The github project is much more mature regarding this matter (3 platforms tested so far). Code is left there as is since I won't maintain 2 codebases.
versions:
https://github.com/nlohmann/json version 3.7.3
GNU gdb (GDB) 8.3 for GNAT Community 2019 [rev=gdb-8.3-ref-194-g3fc1095]
c++ project built with GPRBUILD/ GNAT Community 2019 (20190517) (x86_64-pc-mingw32)
The following python code shall be loaded within gdb. I use a .gdbinit file sourced in gdb.
Github repo: https://github.com/LoneWanderer-GH/nlohmann-json-gdb
GDB script
Feel free to adopt the loading method of your choice (auto, or not, or IDE plugin, whatever)
set print pretty
# source stl_parser.gdb # if you like the good work done with those STL containers GDB parsers
source printer.py # the python file is given below
python gdb.printing.register_pretty_printer(gdb.current_objfile(), build_pretty_printer())
Python script
import gdb
import platform
import sys
import traceback
# adapted from https://github.com/hugsy/gef/blob/dev/gef.py
# their rights are theirs
HORIZONTAL_LINE = "_" # u"\u2500"
LEFT_ARROW = "<-" # "\u2190 "
RIGHT_ARROW = "->" # " \u2192 "
DOWN_ARROW = "|" # "\u21b3"
nlohmann_json_type_namespace = \
r"nlohmann::basic_json<std::map, std::vector, std::__cxx11::basic_string<char, std::char_traits<char>, " \
r"std::allocator<char> >, bool, long long, unsigned long long, double, std::allocator, nlohmann::adl_serializer>"
# STD black magic
MAGIC_STD_VECTOR_OFFSET = 16 # win 10 x64 values, beware on your platform
MAGIC_OFFSET_STD_MAP = 32 # win 10 x64 values, beware on your platform
""""""
# GDB black magic
""""""
nlohmann_json_type = gdb.lookup_type(nlohmann_json_type_namespace).pointer()
# for in memory direct jumps. cast to type is still necessary yet to obtain values, but this could be changed by chaning the types to simpler ones ?
std_rb_tree_node_type = gdb.lookup_type("std::_Rb_tree_node_base::_Base_ptr").pointer()
std_rb_tree_size_type = gdb.lookup_type("std::size_t").pointer()
""""""
# nlohmann_json reminder. any interface change should be reflected here
# enum class value_t : std::uint8_t
# {
# null, ///< null value
# object, ///< object (unordered set of name/value pairs)
# array, ///< array (ordered collection of values)
# string, ///< string value
# boolean, ///< boolean value
# number_integer, ///< number value (signed integer)
# number_unsigned, ///< number value (unsigned integer)
# number_float, ///< number value (floating-point)
# discarded ///< discarded by the the parser callback function
# };
""""""
enum_literals_namespace = ["nlohmann::detail::value_t::null",
"nlohmann::detail::value_t::object",
"nlohmann::detail::value_t::array",
"nlohmann::detail::value_t::string",
"nlohmann::detail::value_t::boolean",
"nlohmann::detail::value_t::number_integer",
"nlohmann::detail::value_t::number_unsigned",
"nlohmann::detail::value_t::number_float",
"nlohmann::detail::value_t::discarded"]
enum_literal_namespace_to_literal = dict([(e, e.split("::")[-1]) for e in enum_literals_namespace])
INDENT = 4 # beautiful isn't it ?
def std_stl_item_to_int_address(node):
return int(str(node), 0)
def parse_std_str_from_hexa_address(hexa_str):
# https://stackoverflow.com/questions/6776961/how-to-inspect-stdstring-in-gdb-with-no-source-code
return '"{}"'.format(gdb.parse_and_eval("*(char**){}".format(hexa_str)).string())
class LohmannJSONPrinter(object):
"""Print a nlohmann::json in GDB python
BEWARE :
- Contains shitty string formatting (defining lists and playing with ",".join(...) could be better; ident management is stoneage style)
- Parsing barely tested only with a live inferior process.
- It could possibly work with a core dump + debug symbols. TODO: read that stuff
https://doc.ecoscentric.com/gnutools/doc/gdb/Core-File-Generation.html
- Not idea what happens with no symbols available, lots of fields are retrieved by name and should be changed to offsets if possible
- NO LIB VERSION MANAGEMENT. TODO: determine if there are serious variants in nlohmann data structures that would justify working with strucutres
- PLATFORM DEPENDANT TODO: remove the black magic offsets or handle them in a nicer way
NB: If you are python-kaizer-style-guru, please consider helping or teaching how to improve all that mess
"""
def __init__(self, val, indent_level=0):
self.val = val
self.field_type_full_namespace = None
self.field_type_short = None
self.indent_level = indent_level
self.function_map = {"nlohmann::detail::value_t::null": self.parse_as_leaf,
"nlohmann::detail::value_t::object": self.parse_as_object,
"nlohmann::detail::value_t::array": self.parse_as_array,
"nlohmann::detail::value_t::string": self.parse_as_str,
"nlohmann::detail::value_t::boolean": self.parse_as_leaf,
"nlohmann::detail::value_t::number_integer": self.parse_as_leaf,
"nlohmann::detail::value_t::number_unsigned": self.parse_as_leaf,
"nlohmann::detail::value_t::number_float": self.parse_as_leaf,
"nlohmann::detail::value_t::discarded": self.parse_as_leaf}
def parse_as_object(self):
assert (self.field_type_short == "object")
o = self.val["m_value"][self.field_type_short]
# traversing tree is a an adapted copy pasta from STL gdb parser
# (http://www.yolinux.com/TUTORIALS/src/dbinit_stl_views-1.03.txt and similar links)
# Simple GDB Macros writen by Dan Marinescu (H-PhD) - License GPL
# Inspired by intial work of Tom Malnar,
# Tony Novac (PhD) / Cornell / Stanford,
# Gilad Mishne (PhD) and Many Many Others.
# Contact: dan_c_marinescu#yahoo.com (Subject: STL)
#
# Modified to work with g++ 4.3 by Anders Elton
# Also added _member functions, that instead of printing the entire class in map, prints a member.
node = o["_M_t"]["_M_impl"]["_M_header"]["_M_left"]
# end = o["_M_t"]["_M_impl"]["_M_header"]
tree_size = o["_M_t"]["_M_impl"]["_M_node_count"]
# in memory alternatives:
_M_t = std_stl_item_to_int_address(o.referenced_value().address)
_M_t_M_impl_M_header_M_left = _M_t + 8 + 16 # adding bits
_M_t_M_impl_M_node_count = _M_t + 8 + 16 + 16 # adding bits
node = gdb.Value(long(_M_t_M_impl_M_header_M_left)).cast(std_rb_tree_node_type).referenced_value()
tree_size = gdb.Value(long(_M_t_M_impl_M_node_count)).cast(std_rb_tree_size_type).referenced_value()
i = 0
if tree_size == 0:
return "{}"
else:
s = "{\n"
self.indent_level += 1
while i < tree_size:
# STL GDB scripts write "+1" which in my w10 x64 GDB makes a +32 bits move ...
# may be platform dependant and should be taken with caution
key_address = std_stl_item_to_int_address(node) + MAGIC_OFFSET_STD_MAP
# print(key_object['_M_dataplus']['_M_p'])
k_str = parse_std_str_from_hexa_address(hex(key_address))
# offset = MAGIC_OFFSET_STD_MAP
value_address = key_address + MAGIC_OFFSET_STD_MAP
value_object = gdb.Value(long(value_address)).cast(nlohmann_json_type)
v_str = LohmannJSONPrinter(value_object, self.indent_level + 1).to_string()
k_v_str = "{} : {}".format(k_str, v_str)
end_of_line = "\n" if tree_size <= 1 or i == tree_size else ",\n"
s = s + (" " * (self.indent_level * INDENT)) + k_v_str + end_of_line # ",\n"
if std_stl_item_to_int_address(node["_M_right"]) != 0:
node = node["_M_right"]
while std_stl_item_to_int_address(node["_M_left"]) != 0:
node = node["_M_left"]
else:
tmp_node = node["_M_parent"]
while std_stl_item_to_int_address(node) == std_stl_item_to_int_address(tmp_node["_M_right"]):
node = tmp_node
tmp_node = tmp_node["_M_parent"]
if std_stl_item_to_int_address(node["_M_right"]) != std_stl_item_to_int_address(tmp_node):
node = tmp_node
i += 1
self.indent_level -= 2
s = s + (" " * (self.indent_level * INDENT)) + "}"
return s
def parse_as_str(self):
return parse_std_str_from_hexa_address(str(self.val["m_value"][self.field_type_short]))
def parse_as_leaf(self):
s = "WTFBBQ !"
if self.field_type_short == "null" or self.field_type_short == "discarded":
s = self.field_type_short
elif self.field_type_short == "string":
s = self.parse_as_str()
else:
s = str(self.val["m_value"][self.field_type_short])
return s
def parse_as_array(self):
assert (self.field_type_short == "array")
o = self.val["m_value"][self.field_type_short]
start = o["_M_impl"]["_M_start"]
size = o["_M_impl"]["_M_finish"] - start
# capacity = o["_M_impl"]["_M_end_of_storage"] - start
# size_max = size - 1
i = 0
start_address = std_stl_item_to_int_address(start)
if size == 0:
s = "[]"
else:
self.indent_level += 1
s = "[\n"
while i < size:
# STL GDB scripts write "+1" which in my w10 x64 GDB makes a +16 bits move ...
offset = i * MAGIC_STD_VECTOR_OFFSET
i_address = start_address + offset
value_object = gdb.Value(long(i_address)).cast(nlohmann_json_type)
v_str = LohmannJSONPrinter(value_object, self.indent_level + 1).to_string()
end_of_line = "\n" if size <= 1 or i == size else ",\n"
s = s + (" " * (self.indent_level * INDENT)) + v_str + end_of_line
i += 1
self.indent_level -= 2
s = s + (" " * (self.indent_level * INDENT)) + "]"
return s
def is_leaf(self):
return self.field_type_short != "object" and self.field_type_short != "array"
def parse_as_aggregate(self):
if self.field_type_short == "object":
s = self.parse_as_object()
elif self.field_type_short == "array":
s = self.parse_as_array()
else:
s = "WTFBBQ !"
return s
def parse(self):
# s = "WTFBBQ !"
if self.is_leaf():
s = self.parse_as_leaf()
else:
s = self.parse_as_aggregate()
return s
def to_string(self):
try:
self.field_type_full_namespace = self.val["m_type"]
str_val = str(self.field_type_full_namespace)
if not str_val in enum_literal_namespace_to_literal:
return "TIMMY !"
self.field_type_short = enum_literal_namespace_to_literal[str_val]
return self.function_map[str_val]()
# return self.parse()
except:
show_last_exception()
return "NOT A JSON OBJECT // CORRUPTED ?"
def display_hint(self):
return self.val.type
# adapted from https://github.com/hugsy/gef/blob/dev/gef.py
# inspired by https://stackoverflow.com/questions/44733195/gdb-python-api-getting-the-python-api-of-gdb-to-print-the-offending-line-numbe
def show_last_exception():
"""Display the last Python exception."""
print("")
exc_type, exc_value, exc_traceback = sys.exc_info()
print(" Exception raised ".center(80, HORIZONTAL_LINE))
print("{}: {}".format(exc_type.__name__, exc_value))
print(" Detailed stacktrace ".center(80, HORIZONTAL_LINE))
for (filename, lineno, method, code) in traceback.extract_tb(exc_traceback)[::-1]:
print("""{} File "{}", line {:d}, in {}()""".format(DOWN_ARROW, filename, lineno, method))
print(" {} {}".format(RIGHT_ARROW, code))
print(" Last 10 GDB commands ".center(80, HORIZONTAL_LINE))
gdb.execute("show commands")
print(" Runtime environment ".center(80, HORIZONTAL_LINE))
print("* GDB: {}".format(gdb.VERSION))
print("* Python: {:d}.{:d}.{:d} - {:s}".format(sys.version_info.major, sys.version_info.minor,
sys.version_info.micro, sys.version_info.releaselevel))
print("* OS: {:s} - {:s} ({:s}) on {:s}".format(platform.system(), platform.release(),
platform.architecture()[0],
" ".join(platform.dist())))
print(horizontal_line * 80)
print("")
exit(-6000)
def build_pretty_printer():
pp = gdb.printing.RegexpCollectionPrettyPrinter("nlohmann_json")
pp.add_printer(nlohmann_json_type_namespace, "^{}$".format(nlohmann_json_type_namespace), LohmannJSONPrinter)
return pp
######
# executed at autoload (or to be executed by in GDB)
# gdb.printing.register_pretty_printer(gdb.current_objfile(),build_pretty_printer())
BEWARE :
- Contains shitty string formatting (defining lists and playing with ",".join(...) could be better; ident management is stoneage style)
- Parsing barely tested only with a live inferior process.
- It could possibly work with a core dump + debug symbols. TODO: read that stuff
https://doc.ecoscentric.com/gnutools/doc/gdb/Core-File-Generation.html
- Not idea what happens with no symbols available, lots of fields are retrieved by name and should be changed to offsets if possible
- NO LIB VERSION MANAGEMENT. TODO: determine if there are serious variants in nlohmann data structures that would justify working with structures
- PLATFORM DEPENDANT TODO: remove the black magic offsets or handle them in a nicer way
NB: If you are python-kaizer-style-guru, please consider helping or teaching how to improve all that mess
some (light tests):
gpr file:
project Debug_Printer is
for Source_Dirs use ("src", "include");
for Object_Dir use "obj";
for Main use ("main.cpp");
for Languages use ("C++");
package Naming is
for Spec_Suffix ("c++") use ".hpp";
end Naming;
package Compiler is
for Switches ("c++") use ("-O3", "-Wall", "-Woverloaded-virtual", "-g");
end Compiler;
package Linker is
for Switches ("c++") use ("-g");
end Linker;
end Debug_Printer;
main.cpp
#include // i am using the standalone json.hpp from the repo release
#include
using json = nlohmann::json;
int main() {
json fooz;
fooz = 0.7;
json arr = {3, "25", 0.5};
json one;
one["first"] = "second";
json foo;
foo["flex"] = 0.2;
foo["bool"] = true;
foo["int"] = 5;
foo["float"] = 5.22;
foo["trap "] = "you fell";
foo["awesome_str"] = "bleh";
foo["nested"] = {{"bar", "barz"}};
foo["array"] = { 1, 0, 2 };
std::cout << "fooz" << std::endl;
std::cout << fooz.dump(4) << std::endl << std::endl;
std::cout << "arr" << std::endl;
std::cout << arr.dump(4) << std::endl << std::endl;
std::cout << "one" << std::endl;
std::cout << one.dump(4) << std::endl << std::endl;
std::cout << "foo" << std::endl;
std::cout << foo.dump(4) << std::endl << std::endl;
json mixed_nested;
mixed_nested["Jean"] = fooz;
mixed_nested["Baptiste"] = one;
mixed_nested["Emmanuel"] = arr;
mixed_nested["Zorg"] = foo;
std::cout << "5th element" << std::endl;
std::cout << mixed_nested.dump(4) << std::endl << std::endl;
return 0;
}
outputs:
(gdb) source .gdbinit
Breakpoint 1, main () at F:\DEV\Projets\nlohmann.json\src\main.cpp:45
(gdb) p mixed_nested
$1 = {
"Baptiste" : {
"first" : "second"
},
"Emmanuel" : [
3,
"25",
0.5,
],
"Jean" : 0.69999999999999996,
"Zorg" : {
"array" : [
1,
0,
2,
],
"awesome_str" : "bleh",
"bool" : true,
"flex" : 0.20000000000000001,
"float" : 5.2199999999999998,
"int" : 5,
"nested" : {
"bar" : "barz"
},
"trap " : "you fell",
},
}
Edit 2019-march-24 : add precision given by employed russian.
Edit 2020-april-18 : after a long night of struggling with python/gdb/stl I had something working by the ways of the GDB documentation for python pretty printers. Please forgive any mistakes or misconceptions, I banged my head a whole night on this and everything is flurry-blurry now.
Edit 2020-april-18 (2): rb tree node and tree_size could be traversed in a more "in-memory" way (see above)
Edit 2020-april-26: add warning concerning the GDB python pretty printer.
My solution was to edit the ~/.gdbinit file.
define jsontostring
printf "%s\n", $arg0.dump(2, ' ', true, nlohmann::detail::error_handler_t::strict).c_str()
end
This makes the "jsontostring" command available on every gdb session without the need of sourcing any files.
(gdb) jsontostring object
I'm looking to do something in this example: Python - How to get the start/base address of a process?. I'm having the same issue as the person in that topic, in that the pointers cheat engine provides is in reference to the base address of the process itself.
I've looked around and it looks like the best solution is to use ctypes and the MODULEENTRY32 to store snapshots of processes and analyze their modBaseAddr.
Here is my current code
import os.path, ctypes, ctypes.wintypes
from ctypes import *
from ctypes.wintypes import *
PROCESS_QUERY_INFORMATION = (0x0400)
PROCESS_VM_OPERATION = (0x0008)
PROCESS_VM_READ = (0x0010)
PROCESS_VM_WRITE = (0x0020)
TH32CS_SNAPMODULE = (0x00000008)
CreateToolhelp32Snapshot= ctypes.windll.kernel32.CreateToolhelp32Snapshot
Process32First = ctypes.windll.kernel32.Process32First
Process32Next = ctypes.windll.kernel32.Process32Next
Module32First = ctypes.windll.kernel32.Module32First
Module32Next = ctypes.windll.kernel32.Module32Next
GetLastError = ctypes.windll.kernel32.GetLastError
OpenProcess = ctypes.windll.kernel32.OpenProcess
GetPriorityClass = ctypes.windll.kernel32.GetPriorityClass
CloseHandle = ctypes.windll.kernel32.CloseHandle
class MODULEENTRY32(Structure):
_fields_ = [ ( 'dwSize' , DWORD ) ,
( 'th32ModuleID' , DWORD ),
( 'th32ProcessID' , DWORD ),
( 'GlblcntUsage' , DWORD ),
( 'ProccntUsage' , DWORD ) ,
( 'modBaseAddr' , POINTER(BYTE)) ,
( 'modBaseSize' , DWORD ) ,
( 'hModule' , HMODULE ) ,
( 'szModule' , c_char * 256 ),
( 'szExePath' , c_char * 260 ) ]
def GetBaseAddr(ProcId, ProcName):
me32 = MODULEENTRY32()
me32.dwSize = sizeof(me32)
hSnapshot = CreateToolhelp32Snapshot( TH32CS_SNAPMODULE, ProcId)
if GetLastError() != 0:
CloseHandle(hSnapshot)
print 'Handle Error %s' % WinError()
return 'Error'
else:
if Module32First(hSnapshot, byref(me32)):
if me32.szModule == ProcName:
CloseHandle(hSnapshot)
return id(me32.modBaseAddr)
else:
Module32Next(hSnapshot, byref(me32))
while int(GetLastError())!= 18:
if me32.szModule == ProcName:
CloseHandle(hSnapshot)
return id(me32.modBaseAddr)
else:
Module32Next(hSnapshot, byref(me32))
CloseHandle(hSnapshot)
print 'Couldn\'t find Process with name %s' % ProcName
else:
print 'Module32First is False %s' % WinError()
CloseHandle(hSnapshot)
def GetProcessIdByName( pName):
if pName.endswith('.exe'):
pass
else:
pName = pName+'.exe'
ProcessIds, BytesReturned = EnumProcesses()
for index in range(BytesReturned / ctypes.sizeof(ctypes.wintypes.DWORD)):
ProcessId = ProcessIds[index]
hProcess = ctypes.windll.kernel32.OpenProcess(PROCESS_QUERY_INFORMATION, False, ProcessId)
if hProcess:
ImageFileName = (ctypes.c_char*MAX_PATH)()
if ctypes.windll.psapi.GetProcessImageFileNameA(hProcess, ImageFileName, MAX_PATH)>0:
filename = os.path.basename(ImageFileName.value)
if filename == pName:
return ProcessId
CloseHandle(hProcess)
def EnumProcesses():
count = 32
while True:
ProcessIds = (ctypes.wintypes.DWORD*count)()
cb = ctypes.sizeof(ProcessIds)
BytesReturned = ctypes.wintypes.DWORD()
if ctypes.windll.Psapi.EnumProcesses(ctypes.byref(ProcessIds), cb, ctypes.byref(BytesReturned)):
if BytesReturned.value<cb:
return ProcessIds, BytesReturned.value
break
else:
count *= 2
else:
return None
if __name__ == '__main__':
ProcId = GetProcessIdByName('RocketLeague.exe')
#print ProcId
print hex(GetBaseAddr(ProcId, 'RocketLeague.exe'))
#print hex(GetBaseAddr(8252,'RocketLeague.exe'))
Now my understanding of memory isn't the greatest, but I'd figure that the base address should be static while a program is running. When I do get this code to run, the ModBaseAddr I get back changes every time I run it. Another weird Issue I'm having is that without that print ProcId statement, running the program returns an ERROR_ACCESS_DENIED (error 5) from line 41 (This has something to do with the CreateToolhelp32Snapshot function I assume as I have admin rights on the computer). With the print statement, however, the program runs through giving me a different ModBaseAddr every time. If I feed the GetBaseAddr function the ProcessId manually it also works without the print statement, again however, it's giving me a random address every time.
If anyone could provide me any help or point me in the right direction I'd really appreciate it!
Clarification: MODULEENTRY32 stores information about modules, not processes. when you call CreateToolhelp32Snapshot using TH32CS_SNAPMODULE you are getting modules loaded by the process, not processes themselves.
Instead of getting the MODULEENTRY32 in combination with EnumProcesses you can instead use CreateToolHelp32Snapshot with TH32CS_SNAPPROCESS to get a list of processes in the form of PROCESSENRTY32 structs, which also contains the process identifier.
Despite being a user with administrator privileges, you must also run the process as an administrator.
You should also ensure you're initializing your MODULEENTRY32 to {0} for proper error handling and not running into an issue of the returned value being subject to undefined behavior of uninitialized memory.
I do not know the specific cause of your issue but I have used a source code for this purpose that is very robust that may be a plug and play alternative to what you're currently using, the important snippet will follow, but the full source is available here.
def ListProcessModules( ProcessID ):
hModuleSnap = c_void_p(0)
me32 = MODULEENTRY32()
me32.dwSize = sizeof( MODULEENTRY32 )
hModuleSnap = CreateToolhelp32Snapshot( TH32CS_SNAPMODULE, ProcessID )
ret = Module32First( hModuleSnap, pointer(me32) )
if ret == 0 :
print 'ListProcessModules() Error on Module32First[%d]' % GetLastError()
CloseHandle( hModuleSnap )
return False
while ret :
print " MODULE NAME: %s"% me32.szModule
print " executable = %s"% me32.szExePath
print " process ID = 0x%08X"% me32.th32ProcessID
print " ref count (g) = 0x%04X"% me32.GlblcntUsage
print " ref count (p) = 0x%04X"% me32.ProccntUsage
print " base address = 0x%08X"% me32.modBaseAddr
print " base size = %d"% me32.modBaseSize
ret = Module32Next( hModuleSnap , pointer(me32) )
CloseHandle( hModuleSnap )
return True
I am writing a simple python program on a raspberry pi and I am quite new to python programming. I have defined a function called GetMessage which has no parameters and returns a variable which I called data, but I am getting an error which states
File "Raspberry_pi.py", line 39
return none
^
SyntaxError: invalid syntax
import os
import glob
import time
import RPi.GPIO as GPIO
from math import *
from bluetooth import *
from RPIO import PWM
os.system('sudo hciconfig hci0 pisca')
os.system('sudo hciconfig hci0 name "De Quadcoptur"')
servo = PWM.Servo()
StartSpin()
server_sock=BluetoothSocket( RFCOMM )
server_sock.bind(("",PORT_ANY))
server_sock.listen(1)
port = server_sock.getsockname()[1]
GetMessage()
DecodeInput()
uuid = "94f39d29-7d6d-437d-973b-fba39e49d4ee"
def GetMessage():
advertise_service( server_sock, "XT1032", #phone bluetooth name
service_id = uuid,
service_classes = [ uuid, SERIAL_PORT_CLASS ],
profiles = [ SERIAL_PORT_PROFILE ],
#protocols = [ OBEX_UUID ]
)
client_sock, client_info = server_sock.accept()
try:
data = client_sock.recv(1024)
if len(data) == 0: break
print "received [%s]" % data
client_sock.close()
server_sock.close()
except IOError:
pass
break
return data
def StartSpin():
# Set servo on GPIO17 to 1200µs (1.2ms)
servo.set_servo(17, 1000)
servo.set_servo(18, 1000)
servo.set_servo(19, 1000)
servo.set_servo(20, 1000)
time.sleep(1)
servo.stop_servo(17)
servo.stop_servo(18)
servo.stop_servo(19)
servo.stop_servo(20)
#Check if more pulses is faster
time.sleep(2000)
PWM.add_channel_pulse(0, 17, start = 1000, width = 100)
PWM.add_channel_pulse(0, 17, start = 1000, width = 100)
PWM.add_channel_pulse(0, 17, start = 1000, width = 100)
PWM.add_channel_pulse(0, 17, start = 1000, width = 100)
PWM.add_channel_pulse(0, 17, start = 1000, width = 100)
servo.stop_servo(17)
servo.stop_servo(18)
servo.stop_servo(19)
servo.stop_servo(20)
return None
def DecodeInput():
data = GetMessage()
if(data == 'start')
StartSpin()
return 0
else if(data[0] == 'U')
data.strip('U')
UpPower = int(data)
SetUpPower(UpPower)
else if(data[0] == 'P')
data.strip('P')
PitchPower = int(data)
SetPitchPower
else
data.strip('P')
RollPower = int(data)
SetPower(UpPower, PitchPower, RollPower)
return None
def SetPower(UpPower, PitchPower, RollPower):
#Make Arbitrary Values
Motor1Power = UpPower #Front Left
Motor2Power = UpPower #Front Right
Motor3Power = UpPower #Back Left
Motor4Power = UpPower #Back Right
PitchPower = PitchPower /2
RollPower = RollPower /2
if(PitchPower < 25)
Motor1Power = Motor1Power + abs(25-PitchPower)
Motor2Power = Motor1Power + abs(25-PitchPower)
else
Motor3Power = Motor3Power + (PitchPower-25)
Motor4Power = Motor4Power + (PitchPower-25)
if(RollPower < 25)
Motor1Power = Motor1Power + abs(25-RollPower)
Motor3Power = Motor3Power + abs(25-RollPower)
else
Motor2Power = Motor2Power + (RollPower - 25)
Motor4Power = Motor4Power + (RollPower - 25)
What is causing this error and how can I fix it?
Edit: I have defined data as a global variable and the error now is
File "Raspberry_pi.py", line 39
return data
^
SyntaxError: invalid syntax
There are a number of syntax problems in your code. Because of the nature of SyntaxError exceptions (which are raised when the interpreter doesn't understand the code syntax), the error messages may not identify the right line as the source of the problem.
The first syntax error I see is that you're using break in the GetMessage function without it being in a loop. A break statement is only useful within a for or while block, and using one elsewhere (in an except block in this case) is a syntax error.
The next set of errors have to do with missing colons. Each of the conditional branches in DecodeInput and SetPower need to have a colon after the condition: if condition1:, elif condition2:, else:
It's also an error to use else if rather than elif (you could make it work if you added a colon, a newline and an extra level of indentation after else:, then used a separate if statement, but that would be wasteful of space).
There are some additional issues, but they're not syntax errors. For instance, you're calling your functions from top-level code before they've been defined, and DecodeInput has a line with the bare expression SetPower which doesn't do anything useful (you probably want to call SetPower with some argument).
Hopefully this will get you on the right track.
Once you get your colons fixed, you'll probably run into a problem with your GetMessage syntax. You cannot break unless you're inside of a loop. If you intend to return from an exception, you don't need the pass call. An example (simplified from your code) of how this method could (should?) look:
def GetMessage():
data = None
try:
data = [1,2]
if len(data) == 0:
return None
except IOError:
return None
return data
Clearly you'll want to replace the bulk of the method with your own code, and determine if you really want to return from the function at the points where you put breaks.
Think this is my first question I have asked on here normally find all the answers I need (so thanks in advance)
ok my problem I have written a python program that will in threads monitor a process and output the results to a csv file for later. This code is working great I am using win32pdhutil for the counters and WMI, Win32_PerfRawData_PerfProc_Process for the CPU %time. I have now been asked to monitor a WPF application and specifically monitor User objects and GDI objects.
This is where I have a problem, it is that i can't seem to find any python support for gathering metrics on these two counters. these two counters are easily available in the task manager I find it odd that there is very little information on these two counters. I am specifically looking at gathering these to see if we have a memory leak, I don't want to install anything else on the system other than python that is already installed. Please can you peeps help with finding a solution.
I am using python 3.3.1, this will be running on a windows platform (mainly win7 and win8)
This is the code i am using to gather the data
def gatherIt(self,whoIt,whatIt,type,wiggle,process_info2):
#this is the data gathering function thing
data=0.0
data1="wobble"
if type=="counter":
#gather data according to the attibutes
try:
data = win32pdhutil.FindPerformanceAttributesByName(whoIt, counter=whatIt)
except:
#a problem occoured with process not being there not being there....
data1="N/A"
elif type=="cpu":
try:
process_info={}#used in the gather CPU bassed on service
for x in range(2):
for procP in wiggle.Win32_PerfRawData_PerfProc_Process(name=whoIt):
n1 = int(procP.PercentProcessorTime)
d1 = int(procP.Timestamp_Sys100NS)
#need to get the process id to change per cpu look...
n0, d0 = process_info.get (whoIt, (0, 0))
try:
percent_processor_time = (float (n1 - n0) / float (d1 - d0)) *100.0
#print whoIt, percent_processor_time
except ZeroDivisionError:
percent_processor_time = 0.0
# pass back the n0 and d0
process_info[whoIt] = (n1, d1)
#end for loop (this should take into account multiple cpu's)
# end for range to allow for a current cpu time rather that cpu percent over sampleint
if percent_processor_time==0.0:
data=0.0
else:
data=percent_processor_time
except:
data1="N/A"
else:
#we have done something wrong so data =0
data1="N/A"
#endif
if data == "[]":
data=0.0
data1="N/A"
if data == "" :
data=0.0
data1="N/A"
if data == " ":
data=0.0
data1="N/A"
if data1!="wobble" and data==0.0:
#we have not got the result we were expecting so add a n/a
data=data1
return data
cheers
edited for correct cpu timings issue if anyone tried to run it :D
so after a long search i was able to mash something together that gets me the info needed.
import time
from ctypes import *
from ctypes.wintypes import *
import win32pdh
# with help from here http://coding.derkeiler.com/Archive/Python/comp.lang.python/2007-10/msg00717.html
# the following has been mashed together to get the info needed
def GetProcessID(name):
object = "Process"
items, instances = win32pdh.EnumObjectItems(None, None, object, win32pdh.PERF_DETAIL_WIZARD)
val = None
if name in instances :
tenQuery = win32pdh.OpenQuery()
tenarray = [ ]
item = "ID Process"
path = win32pdh.MakeCounterPath( ( None, object, name, None, 0, item ) )
tenarray.append( win32pdh.AddCounter( tenQuery, path ) )
win32pdh.CollectQueryData( tenQuery )
time.sleep( 0.01 )
win32pdh.CollectQueryData( tenQuery )
for tencounter in tenarray:
type, val = win32pdh.GetFormattedCounterValue( tencounter, win32pdh.PDH_FMT_LONG )
win32pdh.RemoveCounter( tencounter )
win32pdh.CloseQuery( tenQuery )
return val
processIDs = GetProcessID('OUTLOOK') # Remember this is case sensitive
PQI = 0x400
#open a handle on to the process so that we can query it
OpenProcessHandle = windll.kernel32.OpenProcess(PQI, 0, processIDs)
# OK so now we have opened the process now we want to query it
GR_GDIOBJECTS, GR_USEROBJECTS = 0, 1
print(windll.user32.GetGuiResources(OpenProcessHandle, GR_GDIOBJECTS))
print(windll.user32.GetGuiResources(OpenProcessHandle, GR_USEROBJECTS))
#so we have what we want we now close the process handle
windll.kernel32.CloseHandle(OpenProcessHandle)
hope that helps
For GDI count, I think a simpler, cleaner monitoring script is as follows:
import time, psutil
from ctypes import *
def getPID(processName):
for proc in psutil.process_iter():
try:
if processName.lower() in proc.name().lower():
return proc.pid
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return None;
def getGDIcount(PID):
PH = windll.kernel32.OpenProcess(0x400, 0, PID)
GDIcount = windll.user32.GetGuiResources(PH, 0)
windll.kernel32.CloseHandle(PH)
return GDIcount
PID = getPID('Outlook')
while True:
GDIcount = getGDIcount(PID)
print(f"{time.ctime()}, {GDIcount}")
time.sleep(1)