python, zenoss old dictionary new dictionary - python

I have a dictionary with the following structure as:
Data2 = {
'1.1.1': {'unitReturnAirTemperature': 224, 'unitAirTemperature': 224, 'unitHumidity': 430, 'unitReturnAirHumidity': 431},
'1.1': {'unitName': 'Unit-01'},
'1.2': {'unitName': 'Unit-02'},
'1.2.1': {'unitReturnAirTemperature': 215, 'unitAirTemperature': 224, 'unitHumidity': 431, 'unitReturnAirHumidity': 399}
}
And I would like to get the following:
Data3 = {
'1.1.1': {'unitReturnAirTemperature': 224, 'unitAirTemperature': 224, 'unitHumidity': 430, 'unitReturnAirHumidity': 431, 'unitName': 'Unit-01'},
'1.2.1': {'unitReturnAirTemperature': 215, 'unitAirTemperature': 224, 'unitHumidity': 431, 'unitReturnAirHumidity': 399, 'unitName': 'Unit-02'}
}
The new dictionary (Data3) should be based on the data from Data2 dictionary.

Related

Getting AttributeError while calling RandomForest()

I have been trying to do hyperopt tuning using the following models but I keep getting this traceback. I have tried changing the parameters, added different code for the n_estimators but to no use. I am not able to solve it with any of the solutions that are available online.
# Defining Search Space
space = hp.choice('classifiers', [
{
'model': LogisticRegression(),
'params': {
'model__penalty': hp.choice('lr.penalty', ['l2']),
'model__C': hp.choice('lr.C', np.arange(0.005,1.0,0.01))
}
},
{
'model': BernoulliNB(),
'params': {}
},
{
'model': tree.DecisionTreeClassifier(),
'params': {
'model__max_depth' : hp.choice('tree.max_depth',
range(5, 30, 1)),
}
},
{
'model': xgb.XGBClassifier(),
'params': {
'model__max_depth' : hp.choice('xgb.max_depth',
range(5, 30, 1)),
'model__learning_rate': hp.loguniform ('learning_rate', 0.01, 0.5),
'model__gamma': hp.loguniform('xbg.gamma', 0.0, 2.0),
'model__random_state' : 42
}
},
# {
# 'model': GradientBoostingClassifier(),
# 'params': {
# 'model__n_estimators': hp.uniformint('n_estimators', 100, 500),
# 'model__max_depth': hp.uniformint('max_depth', 2, 20),
# 'model__random_state' : 42
# }
# },
{
'model': RandomForestClassifier(),
'params': {
'model__n_estimators' : hp.randint('rf.n_estimators_', [100, 200, 300, 400]),
'model__max_depth': hp.uniformint('rf.max_depth', 2, 20),
'model__min_samples_split':hp.uniformint('rf.min_samples_split', 2, 10),
'model__bootstrap': hp.choice('rf.bootstrap', [True, False]),
'model__max_features': hp.choice('rf.max_features', ['auto', 'sqrt']),
'model__random_state' : np.random.RandomState(42)
}
}
])
Traceback (most recent call last):
File "<input>", line 4, in <module>
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/hyperopt/pyll_utils.py", line 18, in wrapper
return f(label, *args, **kwargs)
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/hyperopt/pyll_utils.py", line 72, in hp_choice
return scope.switch(ch, *options)
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/hyperopt/pyll/base.py", line 188, in __call__
return self.symbol_table._new_apply(
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/hyperopt/pyll/base.py", line 61, in _new_apply
pos_args = [as_apply(a) for a in args]
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/hyperopt/pyll/base.py", line 61, in <listcomp>
pos_args = [as_apply(a) for a in args]
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/hyperopt/pyll/base.py", line 211, in as_apply
named_args = [(k, as_apply(v)) for (k, v) in items]
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/hyperopt/pyll/base.py", line 211, in <listcomp>
named_args = [(k, as_apply(v)) for (k, v) in items]
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/hyperopt/pyll/base.py", line 217, in as_apply
rval = Literal(obj)
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/hyperopt/pyll/base.py", line 534, in __init__
o_len = len(obj)
File "/Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/sklearn/ensemble/_base.py", line 195, in __len__
return len(self.estimators_)
AttributeError: 'RandomForestClassifier' object has no attribute 'estimators_'
I have tried everything at this point and would appreciate any/all help. Thank you!

how to add dictionary object name to json object

I have 3 python dictionaries as below:
gender = {'Female': 241, 'Male': 240}
marital_status = {'Divorced': 245, 'Engaged': 243, 'Married': 244, 'Partnered': 246, 'Single': 242}
family_type = {'Extended': 234, 'Joint': 235, 'Nuclear': 233, 'Single Parent': 236}
I add them to a list:
lst = [gender, marital_status, family_type]
And create a JSON object which I need to save as a JSON file using pd.to_json using:
jf = json.dumps(lst, indent = 4)
When we look at jf object:
print(jf)
[
{
"Female": 241,
"Male": 240
},
{
"Divorced": 245,
"Engaged": 243,
"Married": 244,
"Partnered": 246,
"Single": 242
},
{
"Extended": 234,
"Joint": 235,
"Nuclear": 233,
"Single Parent": 236
}
]
Is there a way to make the dictionary name as key and get output as below:
{
"gender": {
"Female": 241,
"Male": 240
},
"marital_status": {
"Divorced": 245,
"Engaged": 243,
"Married": 244,
"Partnered": 246,
"Single": 242
},
"family_type": {
"Extended": 234,
"Joint": 235,
"Nuclear": 233,
"Single Parent": 236
}
}
You'll have to do this manually by creating a dictionary and mapping the name to the sub_dictionary yourself.
my_data = {'gender': gender, 'marital_status':marital_status, 'family_type': family_type}
Edit: example of adding to an outfile using json.dump
with open('myfile.json','w') as wrtier:
json.dump(my_data, writer)
As per your requirement you can done it like this by replacing line lst
dict_req = {"gender":gender, "marital_status":marital_status, "family_type":family_type}

how to take input from dictionaries ? and print that desired value ..i want to use input function and print "zinger burer"

how to take input from dictionaries ? and print that desired value ..i want to use input function and print "zinger burer"
menu = {
"burger": {
"zinger burger": 230,
"zinger cheese burger": 260,
"thames special burger": 320,
"beef burger": 250,
"tower burger": 320,
"fish burger": 260,
"fish cheese burger": 290,
"fire stone burger": 170,
"crispy burger": 170,
"chicker burger": 180,
"tikka burger": 170,
"shami burger": 170,
},
"steaks": {
"Arizon steak": 650,
"Mushroom steak": 650,
"Pepper steak ": 650,
"Polo tuscany": 650,
}
}
Using the [] operator will allow you to get data from dictionaries
menutype = input("burger or steaks? ")
foodtype = input("what specifically? ")
print(menu[menutype][foodtype])
If I read between the lines, you'd like to take e.g. s = 'zinger burger' and return something like 230. If so:
s = 'zinger burger'
sub = {k: p for _, d in menu.items() for k, p in d.items() if s.lower() in k.lower()}
sub
# out:
{'zinger burger': 230}
# and
s = 'Fish'
# out:
{'fish burger': 260, 'fish cheese burger': 290}
When there are multiple matches, you have some choices, including:
Take the result where the key is closest to your s input (by length, or by Levenshtein distance),
Take the maximum.

Error opening a dictionary from an external json file

I wanted python to open my dictionary out of an external json file.
This is with python 3.7.2 in anaconda spyder on windows.
This is the written thing in my file:
r{"0" : {"-25 : 144, 0 : 182, 25 : 224, 50 : 272"}}
r{"1000" : {"-25 : 157, 0 : 198, 25 : 245, 50 : 297"}}
r{"2000" : {"-25 : 172, 0 : 216, 25 : 267, 50 : 324"}}
r{"3000" : {"-25 : 188, 0 : 236, 25 : 292, 50 : 354"}}
r{"4000" : {"-25 : 205, 0 : 258, 25 : 319, 50 : 387"}}
r{"5000" : {"-25 : 224, 0 : 283, 25 : 349, 50 : 423"}}
r{"6000" : {"-25 : 246, 0 : 309, 25 : 381, 50 : 463"}}
r{"7000" : {"-25 : 269, 0 : 339, 25 : 418, 50 : 507"}}
r{"8000" : {"-25 : 295, 0 : 371, 25 : 458, 50 : 555"}}
r{"9000" : {"-25 : 323, 0 : 407, 25 : 502, 50 : 609"}}
r{"10000" : {"-25 : 354, 0 : 446, 25 : 551, 50 : 668"}}
I wanted to open my file with the following code:
togr650 = {}
with open("to_gr_650.json", "r") as config_file:
togr650 = json.load(config_file)
print(togr650)
Unfortunatelly the following error appeard:
Traceback (most recent call last):
File "<ipython-input-29-098635d60a19>", line 1, in <module>
runfile('C:/Users/***/Desktop/take_off_distance.py', wdir='C:/Users/***/Desktop')
File "C:\Users\***\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile
execfile(filename, namespace)
File "C:\Users\***\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/***/Desktop/take_off_distance.py", line 26, in <module>
togr650 = json.load(config_file)
File "C:\Users\***\Anaconda3\lib\json\__init__.py", line 296, in load
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw)
File "C:\Users\***\Anaconda3\lib\json\__init__.py", line 348, in loads
return _default_decoder.decode(s)
File "C:\Users\***\Anaconda3\lib\json\decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "C:\Users\***\Anaconda3\lib\json\decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
JSONDecodeError: Expecting value
How do I have to change my code to let it work propperly?

Partial updating of object in elastic search using python

So the puamapi/apiobjects_american/4901 object looks like this:
{
"_id": "4701",
"_index": "puamapi",
"_source": {
"CatRais": null,
"Classification": "Photographs",
"Constituents": [],
"CreditLine": "Gift of H. Kelley Rollings, Class of 1948, and Mrs. Rollings",
"CuratorApproved": 0,
"DateBegin": 1921,
"DateEnd": 1921,
"Dated": "1921",
"Department": "Photography",
"DimensionsLabel": "image: 19.3 x 24.6 cm (7 5/8 x 9 11/16 in.)\r\nsheet: 20.2 x 25.4 cm (7 15/16 x 10 in.)",
"Edition": null,
"Medium": "Gelatin silver print",
"ObjectID": 4701,
"ObjectNumber": "1995-341",
"ObjectStatus": "Accessioned Object",
"Restrictions": "Restricted",
"SortNumber": " 1995 341",
"SysTimeStamp": "AAAAAAAAC3k="
},
"_type": "apiobjects_american",
"_version": 4,
"found": true
}
I want to do a partial update on the object, where we add a constituent to the constituent array.
The record looks like this:
{'params': {'item': [{'ConstituentID': 5}]}, 'script': 'if (ctx._source[Constituents] == null) {ctx._source.Constituents = item } else { ctx._source.Constituents+= item }'}
And then I add with an elastic search instance in python:
es.update(index="puamapi", doc_type="apiobjects_american", id=4901, body=record)
But, I'm getting this error
Traceback (most recent call last):
File "json_to_elasticsearch.py", line 138, in <module>
load_xrefs(api_xrefs)
File "json_to_elasticsearch.py", line 118, in load_xrefs
load_xref(table, xref_map[table][0], xref_map[table][1], json.load(file)["RECORDS"])
File "json_to_elasticsearch.py", line 109, in load_xref
es.update(index=database, doc_type=table1, id=id1, body=record)
File "/usr/local/lib/python2.7/dist-packages/elasticsearch/client/utils.py", line 69, in _wrapped
return func(*args, params=params, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/elasticsearch/client/__init__.py", line 460, in update
doc_type, id, '_update'), params=params, body=body)
File "/usr/local/lib/python2.7/dist-packages/elasticsearch/transport.py", line 329, in perform_request
status, headers, data = connection.perform_request(method, url, params, body, ignore=ignore, timeout=timeout)
File "/usr/local/lib/python2.7/dist-packages/elasticsearch/connection/http_urllib3.py", line 109, in perform_request
self._raise_error(response.status, raw_data)
File "/usr/local/lib/python2.7/dist-packages/elasticsearch/connection/base.py", line 108, in _raise_error
raise HTTP_EXCEPTIONS.get(status_code, TransportError)(status_code, error_message, additional_info)
elasticsearch.exceptions.RequestError: TransportError(400, u'illegal_argument_exception', u'[Bastion][127.0.0.1:9300][indices:data/write/update[s]]')
Any insights would be appreciated. Thanks!

Categories