I need read bytes from file and compare them with dictionary (encode from Kamenicky to CP1250). This code throws error "TypeError: string indices must be integers, not str". Please do not mention "hardcoded" paths as they are here for testing purposes only. Please can you help me and tell me what is wrong in this Python code?
def Convert(file):
kamenicky = bytes( {128 : 185,
129 : 252,
130 : 233,
131 : 239,
132 : 228,
133 : 192,
134 : 141,
135 : 232,
136 : 236,
137 : 189,
138 : 188,
139 : 237,
140 : 190,
141 : 229,
142 : 181,
143 : 178,
144 : 186,
145 : 158,
146 : 142,
147 : 244,
148 : 246,
149 : 211,
150 : 249,
151 : 218,
152 : 253,
153 : 214,
154 : 220,
155 : 138,
156 : 188,
157 : 221,
158 : 216,
159 : 157,
160 : 225,
161 : 237,
162 : 243,
163 : 250,
164 : 242,
165 : 210,
166 : 217,
167 : 212,
168 : 154,
169 : 248,
170 : 224})
out = ""
with open("test.csv", 'rb') as f:
for byte in f.read():
if byte in kamenicky:
out += kamenicky[byte]
else:
out += byte
w = open("new.csv", 'wb')
w.write(out)
w.close()
Almost same code WORKING in C#:
class Kamenicky
{
Dictionary<byte, byte> kamenicky = new Dictionary<byte, byte> {
{128, 185},
{129, 252},
{130, 233},
{131, 239},
{132, 228},
{133, 192},
{134, 141},
{135, 232},
{136, 236},
{137, 189},
{138, 188},
{139, 237},
{140, 190},
{141, 229},
{142, 181},
{143, 178},
{144, 186},
{145, 158},
{146, 142},
{147, 244},
{148, 246},
{149, 211},
{150, 249},
{151, 218},
{152, 253},
{153, 214},
{154, 220},
{155, 138},
{156, 188},
{157, 221},
{158, 216},
{159, 157},
{160, 225},
{161, 237},
{162, 243},
{163, 250},
{164, 242},
{165, 210},
{166, 217},
{167, 212},
{168, 154},
{169, 248},
{170, 224}
};
public void KamenickyToCP1250(string file)
{
List<Byte> temp = new List<byte>();
byte[] ByteFile = File.ReadAllBytes(file);
foreach (byte BYTE in ByteFile)
{
if (kamenicky.ContainsKey(BYTE)) { temp.Add(kamenicky[BYTE]); continue; }
temp.Add(BYTE);
}
File.Delete(file);
File.WriteAllBytes(file, temp.ToArray());
}
}
You need to convert byte from a single-character string to an integer. One way to do it as follows:
with open("test.csv", 'rb') as f:
for ch in f.read():
byte = ord(ch)
...
Related
I have 3 python dictionaries as below:
gender = {'Female': 241, 'Male': 240}
marital_status = {'Divorced': 245, 'Engaged': 243, 'Married': 244, 'Partnered': 246, 'Single': 242}
family_type = {'Extended': 234, 'Joint': 235, 'Nuclear': 233, 'Single Parent': 236}
I add them to a list:
lst = [gender, marital_status, family_type]
And create a JSON object which I need to save as a JSON file using pd.to_json using:
jf = json.dumps(lst, indent = 4)
When we look at jf object:
print(jf)
[
{
"Female": 241,
"Male": 240
},
{
"Divorced": 245,
"Engaged": 243,
"Married": 244,
"Partnered": 246,
"Single": 242
},
{
"Extended": 234,
"Joint": 235,
"Nuclear": 233,
"Single Parent": 236
}
]
Is there a way to make the dictionary name as key and get output as below:
{
"gender": {
"Female": 241,
"Male": 240
},
"marital_status": {
"Divorced": 245,
"Engaged": 243,
"Married": 244,
"Partnered": 246,
"Single": 242
},
"family_type": {
"Extended": 234,
"Joint": 235,
"Nuclear": 233,
"Single Parent": 236
}
}
You'll have to do this manually by creating a dictionary and mapping the name to the sub_dictionary yourself.
my_data = {'gender': gender, 'marital_status':marital_status, 'family_type': family_type}
Edit: example of adding to an outfile using json.dump
with open('myfile.json','w') as wrtier:
json.dump(my_data, writer)
As per your requirement you can done it like this by replacing line lst
dict_req = {"gender":gender, "marital_status":marital_status, "family_type":family_type}
I use Google Vision API on my project. The OCR result returns a JSON file that represents all the items the API recognized with coordinates. I want to add a feature that runs through the whole JOSN to find the item I want and then store the coordinate and the description into an array/list.
This is the returned JSON format:
{
"textAnnotations": [
{
"description": "a",
"boundingPoly": {
"vertices": [
{
"x": 235,
"y": 409
},
{
"x": 247,
"y": 408
},
{
"x": 250,
"y": 456
},
{
"x": 238,
"y": 457
}
]
}
},
{
"description": "b",
"boundingPoly": {
"vertices": [
{
"x": 235,
"y": 409
},
{
"x": 247,
"y": 408
},
{
"x": 250,
"y": 456
},
{
"x": 238,
"y": 457
}
]
}
},{c...},{d...},{e...}
],
"fullTextAnnotation": {
"pages": "not important",
"text": "a\nb\nc\nd\ne\n"
}
}
My aim is to find 2 items and calculate whether they are parallel. For example, I want to find out b or c or d or e is parallel with a, and I have already stored the coordinate of a into a list with this method:
def getJson():
try:
f = open('json_file.json', 'r', encoding="utf-8")
string = f.read()
origin_data = json.loads(string)
return origin_data
except Exception as e:
print(e)
print(traceback.format_exc())
def get_keywords_coordinates(origin_data):
__nodes = [__node for __node in origin_data['textAnnotations'] if __node['description'] == "a"]
__keyword_coords = []
for __lv in range(0, 4):
__tempx = __node['boundingPoly']['vertices'][__lv]['x']
__keyword_coords.append(__tempx)
__tempy = __node['boundingPoly']['vertices'][__lv]['y']
__keyword_coords.append(__tempy)
return __keyword_coords
which keyword_coords is the list that contains the coordinate, which looks like this:
keyword_coords[235, 409, 247, 408, 250, 456, 238, 457]
I will put it and another keyword coordinate into a function to do that calculation but I have no idea how to get the coordinate of b, c, d, and e one by one (abcde is just an example, the real situation will not be able to define the item name with hard code. I may let the program finds out the keywords with some regex)
How should I deal with this?
I don't know what exactly you want to do but it doesn't need regex but normal for-loop to work with items one by one.
First I would change get_keywords_coordinates to get all items and coordinates
def get_keywords_coordinates(data):
results = []
for item in data['textAnnotations']:
key = item["description"]
coords = []
for point in item["boundingPoly"]['vertices']:
coords.append(point['x'])
coords.append(point['y'])
results.append( (key, coords) )
return results
results = get_keywords_coordinates(data)
print('--- coords ---')
print(results)
Result:
--- coords ---
[
('a', [235, 409, 247, 408, 250, 456, 238, 457]),
('b', [335, 409, 347, 408, 350, 456, 338, 457]),
('c', [435, 409, 447, 408, 450, 456, 438, 457])
]
And I would get some selected itme (i.e. first item with a) and create list without this item
selected = results[0]
#rest = results[1:]
rest = results.copy() # more useful if I would selected item with different index
rest.remove(selected) # more useful if I would selected item with different index
print('--- items ---')
print('selected:', selected)
print('rest :', rest)
print('---')
Result:
--- items ---
selected: ('a', [235, 409, 247, 408, 250, 456, 238, 457])
rest : [('b', [335, 409, 347, 408, 350, 456, 338, 457]), ('c', [435, 409, 447, 408, 450, 456, 438, 457])]
And I could use for-loop to compare selected item with other items - one by one
for item in rest:
print('compare', selected[0], 'with', item[0])
print(selected[0], selected[1])
print(item[0], item[1])
Result:
compare a with b
a [235, 409, 247, 408, 250, 456, 238, 457]
b [335, 409, 347, 408, 350, 456, 338, 457]
compare a with c
a [235, 409, 247, 408, 250, 456, 238, 457]
c [435, 409, 447, 408, 450, 456, 438, 457]
Full example:
data = {
"textAnnotations": [
{
"description": "a",
"boundingPoly": {
"vertices": [
{
"x": 235,
"y": 409
},
{
"x": 247,
"y": 408
},
{
"x": 250,
"y": 456
},
{
"x": 238,
"y": 457
}
]
}
},
{
"description": "b",
"boundingPoly": {
"vertices": [
{
"x": 335,
"y": 409
},
{
"x": 347,
"y": 408
},
{
"x": 350,
"y": 456
},
{
"x": 338,
"y": 457
}
]
}
},
{
"description": "c",
"boundingPoly": {
"vertices": [
{
"x": 435,
"y": 409
},
{
"x": 447,
"y": 408
},
{
"x": 450,
"y": 456
},
{
"x": 438,
"y": 457
}
]
}
},
],
"fullTextAnnotation": {
"pages": "not important",
"text": "a\nb\nc\nd\ne\n"
}
}
def get_keywords_coordinates(data):
results = []
for item in data['textAnnotations']:
key = item["description"]
coords = []
for point in item["boundingPoly"]['vertices']:
coords.append(point['x'])
coords.append(point['y'])
results.append( (key, coords) )
return results
results = get_keywords_coordinates(data)
print('--- coords ---')
print(results)
selected = results[0]
#rest = results[1:]
rest = results.copy()
rest.remove(selected)
print('--- keywords ---')
print('selected:', selected)
print('rest :', rest)
print('---')
for item in rest:
print('compare', selected[0], 'with', item[0])
print(selected[0], selected[1])
print(item[0], item[1])
I wanted python to open my dictionary out of an external json file.
This is with python 3.7.2 in anaconda spyder on windows.
This is the written thing in my file:
r{"0" : {"-25 : 144, 0 : 182, 25 : 224, 50 : 272"}}
r{"1000" : {"-25 : 157, 0 : 198, 25 : 245, 50 : 297"}}
r{"2000" : {"-25 : 172, 0 : 216, 25 : 267, 50 : 324"}}
r{"3000" : {"-25 : 188, 0 : 236, 25 : 292, 50 : 354"}}
r{"4000" : {"-25 : 205, 0 : 258, 25 : 319, 50 : 387"}}
r{"5000" : {"-25 : 224, 0 : 283, 25 : 349, 50 : 423"}}
r{"6000" : {"-25 : 246, 0 : 309, 25 : 381, 50 : 463"}}
r{"7000" : {"-25 : 269, 0 : 339, 25 : 418, 50 : 507"}}
r{"8000" : {"-25 : 295, 0 : 371, 25 : 458, 50 : 555"}}
r{"9000" : {"-25 : 323, 0 : 407, 25 : 502, 50 : 609"}}
r{"10000" : {"-25 : 354, 0 : 446, 25 : 551, 50 : 668"}}
I wanted to open my file with the following code:
togr650 = {}
with open("to_gr_650.json", "r") as config_file:
togr650 = json.load(config_file)
print(togr650)
Unfortunatelly the following error appeard:
Traceback (most recent call last):
File "<ipython-input-29-098635d60a19>", line 1, in <module>
runfile('C:/Users/***/Desktop/take_off_distance.py', wdir='C:/Users/***/Desktop')
File "C:\Users\***\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 827, in runfile
execfile(filename, namespace)
File "C:\Users\***\Anaconda3\lib\site-packages\spyder_kernels\customize\spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "C:/Users/***/Desktop/take_off_distance.py", line 26, in <module>
togr650 = json.load(config_file)
File "C:\Users\***\Anaconda3\lib\json\__init__.py", line 296, in load
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw)
File "C:\Users\***\Anaconda3\lib\json\__init__.py", line 348, in loads
return _default_decoder.decode(s)
File "C:\Users\***\Anaconda3\lib\json\decoder.py", line 337, in decode
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
File "C:\Users\***\Anaconda3\lib\json\decoder.py", line 355, in raw_decode
raise JSONDecodeError("Expecting value", s, err.value) from None
JSONDecodeError: Expecting value
How do I have to change my code to let it work propperly?
I have a dictionary with the following structure as:
Data2 = {
'1.1.1': {'unitReturnAirTemperature': 224, 'unitAirTemperature': 224, 'unitHumidity': 430, 'unitReturnAirHumidity': 431},
'1.1': {'unitName': 'Unit-01'},
'1.2': {'unitName': 'Unit-02'},
'1.2.1': {'unitReturnAirTemperature': 215, 'unitAirTemperature': 224, 'unitHumidity': 431, 'unitReturnAirHumidity': 399}
}
And I would like to get the following:
Data3 = {
'1.1.1': {'unitReturnAirTemperature': 224, 'unitAirTemperature': 224, 'unitHumidity': 430, 'unitReturnAirHumidity': 431, 'unitName': 'Unit-01'},
'1.2.1': {'unitReturnAirTemperature': 215, 'unitAirTemperature': 224, 'unitHumidity': 431, 'unitReturnAirHumidity': 399, 'unitName': 'Unit-02'}
}
The new dictionary (Data3) should be based on the data from Data2 dictionary.
So the puamapi/apiobjects_american/4901 object looks like this:
{
"_id": "4701",
"_index": "puamapi",
"_source": {
"CatRais": null,
"Classification": "Photographs",
"Constituents": [],
"CreditLine": "Gift of H. Kelley Rollings, Class of 1948, and Mrs. Rollings",
"CuratorApproved": 0,
"DateBegin": 1921,
"DateEnd": 1921,
"Dated": "1921",
"Department": "Photography",
"DimensionsLabel": "image: 19.3 x 24.6 cm (7 5/8 x 9 11/16 in.)\r\nsheet: 20.2 x 25.4 cm (7 15/16 x 10 in.)",
"Edition": null,
"Medium": "Gelatin silver print",
"ObjectID": 4701,
"ObjectNumber": "1995-341",
"ObjectStatus": "Accessioned Object",
"Restrictions": "Restricted",
"SortNumber": " 1995 341",
"SysTimeStamp": "AAAAAAAAC3k="
},
"_type": "apiobjects_american",
"_version": 4,
"found": true
}
I want to do a partial update on the object, where we add a constituent to the constituent array.
The record looks like this:
{'params': {'item': [{'ConstituentID': 5}]}, 'script': 'if (ctx._source[Constituents] == null) {ctx._source.Constituents = item } else { ctx._source.Constituents+= item }'}
And then I add with an elastic search instance in python:
es.update(index="puamapi", doc_type="apiobjects_american", id=4901, body=record)
But, I'm getting this error
Traceback (most recent call last):
File "json_to_elasticsearch.py", line 138, in <module>
load_xrefs(api_xrefs)
File "json_to_elasticsearch.py", line 118, in load_xrefs
load_xref(table, xref_map[table][0], xref_map[table][1], json.load(file)["RECORDS"])
File "json_to_elasticsearch.py", line 109, in load_xref
es.update(index=database, doc_type=table1, id=id1, body=record)
File "/usr/local/lib/python2.7/dist-packages/elasticsearch/client/utils.py", line 69, in _wrapped
return func(*args, params=params, **kwargs)
File "/usr/local/lib/python2.7/dist-packages/elasticsearch/client/__init__.py", line 460, in update
doc_type, id, '_update'), params=params, body=body)
File "/usr/local/lib/python2.7/dist-packages/elasticsearch/transport.py", line 329, in perform_request
status, headers, data = connection.perform_request(method, url, params, body, ignore=ignore, timeout=timeout)
File "/usr/local/lib/python2.7/dist-packages/elasticsearch/connection/http_urllib3.py", line 109, in perform_request
self._raise_error(response.status, raw_data)
File "/usr/local/lib/python2.7/dist-packages/elasticsearch/connection/base.py", line 108, in _raise_error
raise HTTP_EXCEPTIONS.get(status_code, TransportError)(status_code, error_message, additional_info)
elasticsearch.exceptions.RequestError: TransportError(400, u'illegal_argument_exception', u'[Bastion][127.0.0.1:9300][indices:data/write/update[s]]')
Any insights would be appreciated. Thanks!