Retrieve User Entry IDs from MAPI - python

I extended the win32comext MAPI with the Interface IExchangeModifyTable to edit ACLs via the MAPI. I can modify existing ACL entries, but I stuck in adding new entries. I need the users entry ID to add it, according this C example
(Example Source from MSDN)
STDMETHODIMP AddUserPermission(
LPSTR szUserAlias,
LPMAPISESSION lpSession,
LPEXCHANGEMODIFYTABLE lpExchModTbl,
ACLRIGHTS frights)
{
HRESULT hr = S_OK;
LPADRBOOK lpAdrBook;
ULONG cbEid;
LPENTRYID lpEid = NULL;
SPropValue prop[2] = {0};
ROWLIST rowList = {0};
char szExName[MAX_PATH];
// Replace with "/o=OrganizationName/ou=SiteName/cn=Recipients/cn="
char* szServerDN = "/o=org/ou=site/cn=Recipients/cn=";
strcpy(szExName, szServerDN);
strcat(szExName, szUserAlias);
// Open the address book.
hr = lpSession->OpenAddressBook(0,
0,
MAPI_ACCESS_MODIFY,
&lpAdrBook );
if ( FAILED( hr ) ) goto cleanup;
// Obtain the entry ID for the recipient.
hr = HrCreateDirEntryIdEx(lpAdrBook,
szExName,
&cbEid,
&lpEid);
if ( FAILED( hr ) ) goto cleanup;
prop[0].ulPropTag = PR_MEMBER_ENTRYID;
prop[0].Value.bin.cb = cbEid;
prop[0].Value.bin.lpb = (BYTE*)lpEid;
prop[1].ulPropTag = PR_MEMBER_RIGHTS;
prop[1].Value.l = frights;
rowList.cEntries = 1;
rowList.aEntries->ulRowFlags = ROW_ADD;
rowList.aEntries->cValues = 2;
rowList.aEntries->rgPropVals = &prop[0];
hr = lpExchModTbl->ModifyTable(0, &rowList);
if(FAILED(hr)) goto cleanup;
printf("Added user permission. \n");
cleanup:
if (lpAdrBook)
lpAdrBook->Release();
return hr;
}
I can open the Address Book, but HrCreateDirEntryIdEx is not provided in the pywin32 mapi. I found it in the exchange extension, which does not compile on my system, the missing library problem. Do you have any idea to retrieve the users entry ID?
Thank.
Patrick

I got this piece of code and it works fine
from binascii import b2a_hex, a2b_hex
import active_directory as ad
# entry_type, see http://msdn.microsoft.com/en-us/library/cc840018.aspx
# + AB_DT_CONTAINER 0x000000100
# + AB_DT_TEMPLATE 0x000000101
# + AB_DT_OOUSER 0x000000102
# + AB_DT_SEARCH 0x000000200
# ab_flags, maybe see here: https://svn.openchange.org/openchange/trunk/libmapi/mapidefs.h
def gen_exchange_entry_id(user_id, ab_flags=0, entry_type = 0):
muidEMSAB = "DCA740C8C042101AB4B908002B2FE182"
version = 1
# Find user and bail out if it's not there
ad_obj = ad.find_user(user_id)
if not ad_obj:
return None
return "%08X%s%08X%08X%s00" % (
ab_flags,
muidEMSAB,
version,
entry_type,
b2a_hex(ad_obj.legacyExchangeDN.upper()).upper(),
)
data = gen_exchange_entry_id("myusername")
print data
print len(a2b_hex(data))

Related

Why is flatbuffers output different from C + + in Python?

I use the same protocol files, but I find that they have different output in Python and C++.
My protocol file:
namespace serial.proto.api.login;
table LoginReq {
account:string; //账号
passwd:string; //密码
device:string; //设备信息
token:string;
}
table LoginRsp {
account:string; //账号
passwd:string; //密码
device:string; //设备信息
token:string;
}
table LogoutReq {
account:string;
}
table LogoutRsp {
account:string;
}
My python code:
builder = flatbuffers.Builder()
account = builder.CreateString('test')
paswd = builder.CreateString('test')
device = builder.CreateString('test')
token = builder.CreateString('test')
LoginReq.LoginReqStart(builder)
LoginReq.LoginReqAddPasswd(builder, paswd)
LoginReq.LoginReqAddToken(builder, token)
LoginReq.LoginReqAddDevice(builder, device)
LoginReq.LoginReqAddAccount(builder, account)
login = LoginReq.LoginReqEnd(builder)
builder.Finish(login)
buf = builder.Output()
print(buf)
with open("layer.bin1","wb") as f:
f.write(buf)
My C++ code:
flatbuffers::FlatBufferBuilder builder;
auto account = builder.CreateString("test");
auto device = builder.CreateString("test");
auto passwd = builder.CreateString("test");
auto token = builder.CreateString("test");
auto l = CreateLoginReq(builder, account = account, passwd = passwd, device = device, token = token);
builder.Finish(l);
auto buf = builder.GetBufferPointer();
flatbuffers::SaveFile("layer.bin", reinterpret_cast<char *>(buf), builder.GetSize(), true);
output:
md5 layer.bin
MD5 (layer.bin) = 496e5031dda0f754fb4462fadce9e975
Flatbuffers generated by different implementations (i.e. generators) don't necessarily have the same binary layout, but can still be equivalent. It depends on how the implementation decide to write out the contents. So taking the hash of the binary is not going to tell you equivalence.

How to do cryptographic signature in Rust to avoid Python call

Why in the bottom section of this code, do I need to use the following pattern:
let a = urlpath.to_string();
let b = nonce.to_string();
let c = ordertype.to_string();
let d = pair.to_string();
let e = price.to_string();
let f = type_.to_string();
let g = volume.to_string();
let h = api_sec.to_string();
let kwargs = vec![("cmd", "account_balance"), ("urlpath", &a), ("nonce", &b), ("ordertype", &c), ("pair", &d), ("price", &e), ("type", &f), ("volume", &g), ("secret", &h)];
If I replace the variable &a in the vec! with &urlpath.to_string() then it fails saying a temporary value is being dropped and it's later used.
But doesn't that expression evaluate to the same thing, regardless if I add the additional let statements? How can I make this more Rust idiomatic?
use std::{time::{SystemTime, UNIX_EPOCH}};
use pyo3::types::IntoPyDict;
fn main() -> PyResult<()> {
let urlpath = "/0/private/Balance";
println!("{}", urlpath);
let api_sec = "<REPLACE>";
println!("{}", api_sec);
let nonce = SystemTime::now().duration_since(UNIX_EPOCH).expect("Time went backwards").as_millis();
println!("{}", nonce);
let ordertype = "limit";
println!("{}", ordertype);
let pair = "XBTUSD";
println!("{}", pair);
let price: i32 = 37500;
println!("{}", price);
let type_ = "buy";
println!("{}", type_);
let volume = 1.25;
println!("{}", volume);
Python::with_gil(|py| {
let fun: Py<PyAny> = PyModule::from_code(
py,
"
import urllib.parse
import hashlib
import hmac
import base64
def get_kraken_signature(*args, **kwargs):
if args != ():
print('called with args', args)
if kwargs != {}:
print('called with kwargs', kwargs)
if args == () and kwargs == {}:
print('called with no arguments')
if kwargs[\"cmd\"] == \"account_balance\":
urlpath = kwargs[\"urlpath\"]
data = {
\"nonce\": kwargs[\"nonce\"],
}
secret = kwargs[\"secret\"]
elif kwargs[\"cmd\"] == \"send_order\":
urlpath = kwargs[\"urlpath\"]
data = {
\"nonce\": kwargs[\"nonce\"],
\"ordertype\": kwargs[\"ordertype\"],
\"pair\": kwargs[\"pair\"],
\"price\": kwargs[\"price\"],
\"type\": kwargs[\"type\"],
\"volume\": kwargs[\"volume\"],
}
secret = kwargs[\"secret\"]
else:
exit(0)
postdata = urllib.parse.urlencode(data)
encoded = (str(data['nonce']) + postdata).encode()
message = urlpath.encode() + hashlib.sha256(encoded).digest()
mac = hmac.new(base64.b64decode(secret), message, hashlib.sha512)
sigdigest = base64.b64encode(mac.digest())
print(\"API-Sign: {}\".format(sigdigest.decode()))
return sigdigest.decode()
",
"",
"",
)?.getattr("get_kraken_signature")?.into();
let a = urlpath.to_string();
let b = nonce.to_string();
let c = ordertype.to_string();
let d = pair.to_string();
let e = price.to_string();
let f = type_.to_string();
let g = volume.to_string();
let h = api_sec.to_string();
let kwargs = vec![("cmd", "account_balance"), ("urlpath", &a), ("nonce", &b), ("ordertype", &c), ("pair", &d), ("price", &e), ("type", &f), ("volume", &g), ("secret", &h)];
let result = fun.call(py, (), Some(kwargs.into_py_dict(py)))?;
println!("{}", result);
Ok(())
})
}
BONUS: Second part of the question, is how can I rewrite the Python portion in idiomatic Rust? I have tried and failed, so would be helpful if any crypto experts can assist.
First Part: (Explanation)
Since a is the owner of the value and you are passing reference using the owner which will remain in scope even after execution but in the case when you directly pass &urlpath.to_string() there isn't any owner and as soon the execution ends the value would be dropped and there will be a dangling reference which is the cause for the message.
Second Part: (Python to rust conversion)
I am not a crypto expert but I tried to convert the same script you provided without the condition part and matched the output in python and rust.
extern crate url;
extern crate base64;
// use std::time::{SystemTime, UNIX_EPOCH};
use url::form_urlencoded;
use sha2::{Sha256, Digest};
extern crate ring;
extern crate data_encoding;
use ring::hmac;
use data_encoding::BASE64;
use std::collections::HashMap;
fn main() {
let urlpath = String::from("/0/private/Balance");
// let nonce = SystemTime::now()
// .duration_since(UNIX_EPOCH)
// .expect("Time went backwards")
// .as_millis();
let nonce: &str = &(1645371362680 as i64).to_string();
let mut data = HashMap::new();
data.insert("nonce", nonce);
let postdata: String = form_urlencoded::Serializer::new(String::new())
.extend_pairs(data.iter())
.finish();
let encoded = format!("{}{}", nonce, postdata);
let message: Vec<u8> = [urlpath.as_bytes(), Sha256::digest(encoded.as_bytes()).as_ref()].concat();
let secret_key = String::from("secret");
let signed_key = hmac::Key::new(hmac::HMAC_SHA512, secret_key.as_bytes());
let signature = hmac::sign(&signed_key, &message);
let b64_encoded_sig = BASE64.encode(signature.as_ref());
println!("Output: {}", b64_encoded_sig);
}
Playground

Hashing algorithm Node js vs Python

I was trying to convert a hash algorithm which is written on Python to node.js
The python code looks something as
import uuid
import hashlib
import struct
CLIENT_ID = uuid.UUID('c5f92e0d-e762-32cd-98cb-8c546c410dbe')
SECRET = uuid.UUID('2cf26ff5-bd06-3245-becf-4d5a3baa704f')
data = CLIENT_ID.bytes_le + SECRET.bytes_le + struct.pack("I", 2017) + struct.pack("I", 9) + struct.pack("I", 2)
token = str(uuid.UUID(bytes_le=hashlib.sha256(data).digest()[0:16]))
The token generated is 32d86f00-eb49-2739-e957-91513d2b9969
Here the date values struct.pack values are generated using datetime but for convenient I have hard coded here.
I tried to convert the same by looking at the python doc for the respective libraries and did so far as
let CLIENT_ID = new Buffer('c5f92e0d-e762-32cd-98cb-8c546c410dbe');
let SECRET = new Buffer('2cf26ff5-bd06-3245-becf-4d5a3baa704f');
let d = new Buffer(2);
let m = new Buffer(9);
let y = new Buffer(2017);
let data = CLIENT_ID+SECRET+y+m+d;
const uuidv4 = require('uuid/v4');
const hash = crypto.createHash('sha256');
let token = uuidv4({random: hash.update(data, 'utf8').digest().slice(0, 16)}, 0);
And the hash it generates is b7b82474-eab4-4295-8318-cc258577ff9b
So, basically I am miserably missing something for the nodejs part.
Could you please guide me on where what went wrong. Thanks for the help
There's a lot of missed parts actually it tuned out.
###node parts:
new Buffer('c5')
does not represent <Buffer c5>, but <Buffer 63 35>.
To write c5 you would need to use Buffer.from([0xc5]) or Buffer.from([197]) (dec).
new Buffer(2)
does not represent <Buffer 02>, it just allocates 2 bytes.
CLIENT_ID+SECRET+y+m+d
concatenation of buffers does not work that way.
Use array of buffers and Buffer.concat([buffers]) to concatenate buffers.
###uuid parts:
it turned out that uuid operates modified version of buffers (bytes_le part in python code)
#the most interesting part:
in the python version of uuid, if no version argument is passed to uuid.UUID(...), uuid would generate an ID without fixing bits
According to the RFC-4122 4.4 uuid should fix that bits.
uuid.py skips RFC-4122 4.4
node-uuid/v4.js fixes required bits
that way even with the same results for sha256 hashing, the results between python and node implementation still would differ
python: 32d86f00-eb49-2739-e957-91513d2b9969
node: 32d86f00-eb49-4739-a957-91513d2b9969
^ ^
So, I see here 2 options
to pass version to python uuid (only for the last uuid call uuid.UUID(bytes_le=..., version=4)), that way python would return 32d86f00-eb49-4739-a957-91513d2b9969
if there's no way to change source code in python project, I guess there's an option to fork uuid and remove two lines of code in node-uuid/v4.js?
##See node version of your code below:
const uuidv4 = require('uuid/v4');
const crypto = require('crypto');
const hash = crypto.createHash('sha256');
const client_id_hex_str = "c5f92e0d-e762-32cd-98cb-8c546c410dbe".replace(/-/g, "");
const secret_hex_str = "2cf26ff5-bd06-3245-becf-4d5a3baa704f".replace(/-/g, "");
let CLIENT_ID = Buffer.from(to_bytes_le(to_bytes(client_id_hex_str, null, 16, 'big')));
let SECRET = Buffer.from(to_bytes_le(to_bytes(secret_hex_str, null, 16, 'big')));
let d = Buffer.from(to_bytes(null, 2, 4));
let m = Buffer.from(to_bytes(null, 9, 4));
let y = Buffer.from(to_bytes(null, 2017, 4));
let data = Buffer.concat([CLIENT_ID, SECRET, y, m, d]);
let hashBytes = hash.update(data, 'utf8').digest().slice(0, 16);
hashBytes = [].slice.call(hashBytes, 0);
hashBytes = Buffer.from(to_bytes_le(hashBytes));
let token = uuidv4({random: hashBytes});
console.log(token);
// https://stackoverflow.com/questions/16022556/has-python-3-to-bytes-been-back-ported-to-python-2-7
function to_bytes(hexString, number, length, endianess) {
if (hexString == null && number == null) {
throw new Error("Missing hex string or number.");
}
if (!length || isNaN(length)) {
throw new Error("Missing or invalid bytes array length number.");
}
if (hexString && typeof hexString != "string") {
throw new Error("Invalid format for hex value.");
}
if (hexString == null) {
if (isNaN(number)) {
throw new Error("Invalid number.");
}
hexString = number.toString(16);
}
let byteArray = [];
if (hexString.length % 2 !== 0) {
hexString = '0' + hexString;
}
const bitsLength = length * 2
hexString = ("0".repeat(bitsLength) + hexString).slice(-1 * bitsLength);
for (let i = 0; i < hexString.length; i += 2) {
const byte = hexString[i] + hexString[i + 1];
byteArray.push(parseInt(byte, 16));
}
if (endianess !== "big") {
byteArray = byteArray.reverse();
}
return byteArray;
}
// https://github.com/python/cpython/blob/master/Lib/uuid.py#L258
function to_bytes_le(bytes) {
const p1 = bytes.slice(0, 4).reverse();
const p2 = bytes.slice(4, 6).reverse();
const p3 = bytes.slice(6, 8).reverse();
const p4 = bytes.slice(8);
const bytes_le = [].concat.apply([], [p1, p2, p3, p4]);
return bytes_le;
}
Do you want the hashing of the data to be the same as the Python code above?
If not, you can take a look at the the Sha256 module below in NodeJS
https://www.npmjs.com/package/sha256

Extending Postgresql Collector

I am getting two different results with the same query.
I am extending Diamond PostgresqlCollector https://github.com/python-diamond/Diamond/blob/master/src/collectors/postgres/postgres.py in order to track a new metric.
Specifically, I am trying to implement the bloat estimate queries specified here: https://github.com/ioguix/pgsql-bloat-estimation/blob/master/table/table_bloat.sql
Where I am having trouble is that when I run the query from the psql command prompt I get results which include the 'public' schemaname. But when the query is run by diamond there are no results that include 'public'. Instead, enteries are only available for pg_catalog and information_schema. I see this by checking the logs /var/log/upstart/diamond.log
The only cause I can imagine is a permissions error for the 'diamond' user, but I can see at the psql command line that the user diamond exists, and has Superuser privilege. And I get results from pg_catalog. So I can get some stats, but not from the public schema of the database I'm most interested in.
Has anyone extended postgresql collector and seen this behavior or have a suggestion of what to try next?
Adding relevant files here. The system I am testing on is a vagrant machine, but I am using a puppet file to replicate the production environment as close as possible.
/etc/diamond/diamond.conf
[server]
pid_file = /var/run/diamond.pid
collectors_path = /usr/share/diamond/collectors/, /usr/local/share/diamond/collectors/
collectors_config_path = /etc/diamond/collectors/
handlers_path = /usr/share/diamond/handlers/
handlers_config_path = /etc/diamond/handlers/
handlers = diamond.handler.archive.ArchiveHandler
[handlers]
# logging handlers
keys = console
[[default]]
[[GraphitePickleHandler]]
host = graphite-01.local
port = 2014
timeout = 15
batch = 10
# ArchiveHandler writes stats to a local logfile.
# Much easier for testing and debugging.
[[ArchiveHandler]]
keys = watched_file
# File to write archive log files
log_file = /var/log/diamond/archive.log
[collectors]
[[default]]
hostname_method = fqdn_rev
interval = 60
[[CPUCollector]]
enabled = True
percore = True
[[DiskSpaceCollector]]
enabled = False
[[DiskUsageCollector]]
enabled = False
[[LoadAverageCollector]]
enabled = True
[[MemoryCollector]]
enabled = True
[[VMStatCollector]]
enabled = False
[[UserScriptsCollector]]
enabled = True
[loggers]
keys = root
[formatters]
keys = default
[logger_root]
level = INFO
handlers = console
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = default
[handler_watched_file]
class = handlers.WatchedFileHandler
level = DEBUG
formatter = default
[formatter_default]
format = [%(asctime)s] [%(levelname)s] [%(threadName)s] %(message)s
[configs]
path = "/etc/diamond/configs/"
extension = ".conf"
/etc/diamond/configs/postgres-service.conf
[collectors]
# Custom internal Postgresql collector. See diamond-service/files/collectors/custompg/custompg.py
[[CustomPostgresqlCollector]]
enabled = True
interval = 10
extended = True
metrics_blacklist = [^.]+\.indexes.*
pg_version = 9.3
user = diamond
# has_admin currently only controls if diamond should report how many WAL
# files exist on disk (although the query has a bug in it). However, as an
# unprivileged user, diamond can only see queries that are running as the same
# user. So in order to get the full picture of running queries on a multi-user
# system, diamond should have superuser privileges.
has_admin = False
/usr/local/share/diamond/collectors/custompg/custompg.py
import os
import sys
# Make sure we can import the existing postgres collector
try:
import postgres
from postgres import QueryStats, metrics_registry, registry
except ImportError:
# It's likely that this is being imported in a test or script
# outside of the normal diamond runpath.
# In these instances, try to add COLLECTOR_PATH to path and import again.
# i.e. export PYTHONPATH=$PYTHONPATH:/usr/share/diamond/collectors/postgres
raise ImportError("Unable to import built-in postgres collector."
"Make sure the collector path is added to PYTHONPATH.")
class CustomPostgresqlCollector(postgres.PostgresqlCollector):
"""
Collector subclass to differentiate enabling/disabling
company-specific Postgres metric collection.
"""
#Even though nothing is being extended, this class is
# still needed for the additional queries to get picked up
# by Diamond.
pass
class NonVacuumLongestRunningQueries(QueryStats):
"""
Differentiate between vacuum and non-vacuum queries.
The built-in longest running queries metric collection
doesn't account for/filter vacuum operations.
"""
path = "%(datname)s.non_vacuum.longest_running.%(metric)s"
multi_db = True
# This query is a modified version of
# https://github.com/python-diamond/Diamond/blob/0fda1835308255e3ac4b287724340baf16b27bb1/src/collectors/postgres/postgres.py#L506-L519
base_query = """
SELECT 'query',
COALESCE(max(extract(epoch FROM CURRENT_TIMESTAMP-query_start)),0)
FROM pg_stat_activity
WHERE %s
AND %s
UNION ALL
SELECT 'transaction',
COALESCE(max(extract(epoch FROM CURRENT_TIMESTAMP-xact_start)),0)
FROM pg_stat_activity
WHERE 1=1
AND %s
"""
exclude_vacuum_queries = "query NOT LIKE '%VACUUM%'"
# Two query versions in case collector needs to run on Postgres < 9.2
query = base_query % ("current_query NOT LIKE '<IDLE%'",
exclude_vacuum_queries,
exclude_vacuum_queries)
post_92_query = base_query % ("state NOT LIKE 'idle%'",
exclude_vacuum_queries,
exclude_vacuum_queries)
class UserTableVacuumStats(QueryStats):
"""Additional per-table vacuuming stats."""
path = "%(datname)s.tables.%(schemaname)s.%(relname)s.vacuum.%(metric)s"
multi_db = True
# http://www.postgresql.org/docs/9.3/static/monitoring-stats.html#PG-STAT-ALL-TABLES-VIEW
# Also filter out generally non-volatile system tables.
base_query = """
SELECT relname, schemaname, vacuum_count, autovacuum_count
FROM pg_stat_all_tables
WHERE schemaname NOT IN ('pg_catalog', 'information_schema');
"""
query = base_query
class TableBloatSize(QueryStats):
""" Track estimated table bloat size using modified query written by ioguix:
https://github.com/ioguix/pgsql-bloat-estimation/blob/master/table/table_bloat.sql
WARNING: executed with a non-superuser role, the query inspects only
tables you are granted to read.
"""
path = "%(datname)s.tables.%(schemaname)s.%(relname)s.%(metric)s"
multi_db = True
query = """
SELECT schemaname, relname, (tblpages-est_tblpages_ff)*bs AS bloat_size
FROM (
SELECT ceil( reltuples / ( (bs-page_hdr)/tpl_size ) ) + ceil( toasttuples / 4 ) AS est_tblpages,
ceil( reltuples / ( (bs-page_hdr)*fillfactor/(tpl_size*100) ) ) + ceil( toasttuples / 4 ) AS est_tblpages_ff,
tblpages, fillfactor, bs, tblid, schemaname, relname, heappages, toastpages
FROM (
SELECT
( 4 + tpl_hdr_size + tpl_data_size + (2*ma)
- CASE WHEN tpl_hdr_size%ma = 0 THEN ma ELSE tpl_hdr_size%ma END
- CASE WHEN ceil(tpl_data_size)::int%ma = 0 THEN ma ELSE ceil(tpl_data_size)::int%ma END
) AS tpl_size, (heappages + toastpages) AS tblpages, heappages,
toastpages, reltuples, toasttuples, bs, page_hdr, tblid, schemaname, relname, fillfactor
FROM (
SELECT
tbl.oid AS tblid, ns.nspname AS schemaname, tbl.relname AS relname, tbl.reltuples,
tbl.relpages AS heappages, coalesce(toast.relpages, 0) AS toastpages,
coalesce(toast.reltuples, 0) AS toasttuples,
coalesce(substring(
array_to_string(tbl.reloptions, ' ')
FROM '%fillfactor=#"__#"%' FOR '#')::smallint, 100) AS fillfactor,
current_setting('block_size')::numeric AS bs,
CASE WHEN version()~'mingw32' OR version()~'64-bit|x86_64|ppc64|ia64|amd64' THEN 8 ELSE 4 END AS ma,
24 AS page_hdr,
23 + CASE WHEN MAX(coalesce(null_frac,0)) > 0 THEN ( 7 + count(*) ) / 8 ELSE 0::int END
+ CASE WHEN tbl.relhasoids THEN 4 ELSE 0 END AS tpl_hdr_size,
sum( (1-coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 1024) ) AS tpl_data_size
FROM pg_attribute AS att
JOIN pg_class AS tbl ON att.attrelid = tbl.oid
JOIN pg_namespace AS ns ON ns.oid = tbl.relnamespace
JOIN pg_stats AS s ON s.schemaname=ns.nspname
AND s.tablename = tbl.relname AND s.inherited=false AND s.attname=att.attname
LEFT JOIN pg_class AS toast ON tbl.reltoastrelid = toast.oid
WHERE att.attnum > 0 AND NOT att.attisdropped
AND tbl.relkind = 'r'
GROUP BY 1,2,3,4,5,6,7,8,9,10, tbl.relhasoids
ORDER BY 2,3
) AS s
) AS s2
) AS s3
WHERE schemaname='public';
"""
class BtreeBloatSize(QueryStats):
""" Track estimated index bloat size using modified query written by ioguix:
https://github.com/ioguix/pgsql-bloat-estimation/blob/master/btree/btree_bloat.sql
WARNING: executed with a non-superuser role, the query inspect only index on tables you are granted to read.
WARNING: rows with is_na = 't' are known to have bad statistics ("name" type is not supported). Not relevant to Public schema
"""
path = "%(datname)s.tables.%(schemaname)s.%(relname)s.%(indexrelname)s.%(metric)s"
multi_db = True
query = """
SELECT nspname AS schemaname, relname, indexrelname,
bs*(relpages-est_pages_ff) AS bloat_size
FROM (
SELECT coalesce(1 +
ceil(reltuples/floor((bs-pageopqdata-pagehdr)*fillfactor/(100*(4+nulldatahdrwidth)::float))), 0
) AS est_pages_ff,
bs, nspname, relname, indexrelname, relpages, fillfactor
FROM (
SELECT maxalign, bs, nspname, relname, indexrelname, reltuples, relpages, relam, fillfactor,
( index_tuple_hdr_bm +
maxalign - CASE -- Add padding to the index tuple header to align on MAXALIGN
WHEN index_tuple_hdr_bm%maxalign = 0 THEN maxalign
ELSE index_tuple_hdr_bm%maxalign
END
+ nulldatawidth + maxalign - CASE -- Add padding to the data to align on MAXALIGN
WHEN nulldatawidth = 0 THEN 0
WHEN nulldatawidth::integer%maxalign = 0 THEN maxalign
ELSE nulldatawidth::integer%maxalign
END
)::numeric AS nulldatahdrwidth, pagehdr, pageopqdata
FROM (
SELECT
i.nspname, i.relname, i.indexrelname, i.reltuples, i.relpages, i.relam,
current_setting('block_size')::numeric AS bs, fillfactor,
CASE
-- MAXALIGN: 4 on 32bits, 8 on 64bits (and mingw32 ?)
WHEN version() ~ 'mingw32' OR version() ~ '64-bit|x86_64|ppc64|ia64|amd64' THEN 8
ELSE 4
END AS maxalign,
/* per page header, fixed size: 20 for 7.X, 24 for others */
24 AS pagehdr,
/* per page btree opaque data */
16 AS pageopqdata,
/* per tuple header: add IndexAttributeBitMapData if some cols are null-able */
CASE WHEN max(coalesce(s.null_frac,0)) = 0
-- IndexTupleData size
THEN 2
/* IndexTupleData size + IndexAttributeBitMapData size ( max num filed per index + 8 - 1 /8) */
ELSE 2 + (( 32 + 8 - 1 ) / 8)
END AS index_tuple_hdr_bm,
/* data len: we remove null values save space using it fractionnal part from stats */
sum( (1-coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 1024)) AS nulldatawidth
FROM pg_attribute AS a
JOIN (
SELECT nspname, tbl.relname AS relname, idx.relname AS indexrelname, idx.reltuples, idx.relpages, idx.relam,
indrelid, indexrelid, indkey::smallint[] AS attnum,
coalesce(substring(
array_to_string(idx.reloptions, ' ')
from 'fillfactor=([0-9]+)')::smallint, 90) AS fillfactor
FROM pg_index
JOIN pg_class idx ON idx.oid=pg_index.indexrelid
JOIN pg_class tbl ON tbl.oid=pg_index.indrelid
JOIN pg_namespace ON pg_namespace.oid = idx.relnamespace
WHERE pg_index.indisvalid AND tbl.relkind = 'r' AND idx.relpages > 0
) AS i ON a.attrelid = i.indexrelid
JOIN pg_stats AS s ON s.schemaname = i.nspname
AND ((s.tablename = i.relname AND s.attname = pg_catalog.pg_get_indexdef(a.attrelid, a.attnum, TRUE)) -- stats from tbl
OR (s.tablename = i.indexrelname AND s.attname = a.attname))-- stats from functionnal cols
JOIN pg_type AS t ON a.atttypid = t.oid
WHERE a.attnum > 0
GROUP BY 1, 2, 3, 4, 5, 6, 7, 8, 9
) AS s1
) AS s2
JOIN pg_am am ON s2.relam = am.oid WHERE am.amname = 'btree'
) AS sub
WHERE nspname='public'
ORDER BY 1,2,3;
"""
# Add the new metric queries to the
# registered set used by the collecting method.
metrics_registry.update({
'NonVacuumLongestRunningQueries': NonVacuumLongestRunningQueries,
'UserTableVacuumStats': UserTableVacuumStats,
'TableBloatSize': TableBloatSize,
'BtreeBloatSize': BtreeBloatSize,
})
registry['extended'] += ['NonVacuumLongestRunningQueries',
'UserTableVacuumStats',
'TableBloatSize',
'BtreeBloatSize']

How to access wmi in python?

So I am trying to access the data from here
in Python. As you can see, it uses wmi. I have tried to use wmi in python before but I am having trouble interpreting the data they are giving me. Please be patient with me as I am a noob to how wmi works. It says that the wmi data is stored in root/OpenHardwareMontor and that it uses two different wmi classes(Hardware and Sensor). But all this information is going over my head.
could someone please give me some sample code to read some data from this?
For example, the code to check cpu core 1 frequency.
EDIT: i have sort of got it working. i run this code:
for Temperature in c.sensor():
print Temperature.identifier
print Temperature.value
and i get this:
/hdd/0/load/0
37.6608924866
/intelcpu/0/temperature/1
53.0
/intelcpu/0/temperature/0
42.0
/ram/data/1
2.88324356079
/intelcpu/0/load/2
1.53846144676
/hdd/0/temperature/0
43.0
/intelcpu/0/load/0
2.30768918991
/intelcpu/0/clock/1
1463.29663086
/intelcpu/0/clock/0
133.02696228
/intelcpu/0/clock/2
1463.29663086
/ram/load/0
49.224521637
/ram/data/0
2.79517364502
/intelcpu/0/load/1
3.07692289352
how can i request only the value associated with the identifier /intelcpu/0/temperature/1 ignoring all other values?
The most simple example to use WMI:
c = wmi.WMI()
wql = "Select * From Win32_SerialPort"
for item in c.query(wql):
print item
Output Example:
instance of Win32_SerialPort
{
Availability = 2;
Binary = TRUE;
Caption = "SpectrumAnalyzer1 (COM15)";
ConfigManagerErrorCode = 0;
ConfigManagerUserConfig = FALSE;
CreationClassName = "Win32_SerialPort";
Description = "SpectrumAnalyzer1";
DeviceID = "COM15";
MaxBaudRate = 128000;
MaximumInputBufferSize = 0;
MaximumOutputBufferSize = 0;
Name = "SpectrumAnalyzer1 (COM15)";
OSAutoDiscovered = TRUE;
PNPDeviceID = "USB\\VID_10C4&PID_ED00\\1269376";
PowerManagementCapabilities = {1};
PowerManagementSupported = FALSE;
ProviderType = "RS232 Serial Port";
SettableBaudRate = TRUE;
SettableDataBits = TRUE;
SettableFlowControl = TRUE;
SettableParity = TRUE;
SettableParityCheck = TRUE;
SettableRLSD = TRUE;
SettableStopBits = TRUE;
Status = "OK";
StatusInfo = 3;
Supports16BitMode = FALSE;
SupportsDTRDSR = TRUE;
SupportsElapsedTimeouts = TRUE;
SupportsIntTimeouts = TRUE;
SupportsParityCheck = TRUE;
SupportsRLSD = TRUE;
SupportsRTSCTS = TRUE;
SupportsSpecialCharacters = TRUE;
SupportsXOnXOff = TRUE;
SupportsXOnXOffSet = TRUE;
SystemCreationClassName = "Win32_ComputerSystem";
SystemName = ".......";
};
You can access each item by:
myQuery = c.query(wql)
myQuery.Availability
Output:
2
For more information, try the WMI cookbook.
Edit #1:
Using if statements and in you can do what you want.
for Temperature in c.sensor():
if "/intelcpu/0/temperature/1" in Temperature.identifier:
print Temperature.identifier
print Temperature.value

Categories