this is my code:
prettyPicture(clf, features_test, labels_test)
output_image("F:/test.png", "png", open("F:/test.png", "rb").read())
def output_image(name, format, bytes):
image_start = "BEGIN_IMAGE_f9825uweof8jw9fj4r8"
image_end = "END_IMAGE_0238jfw08fjsiufhw8frs"
data = {}
data['name'] = name
data['format'] = format
data['bytes'] = base64.encodestring(bytes)
print(image_start + json.dumps(data) + image_end)
this errors is:
Traceback (most recent call last):
File "studentMain.py", line 41, in <module>
output_image("F:/test.png", "png", open("F:/test.png", "rb").read())
File "F:\Demo\class_vis.py", line 69, in output_image
print(image_start + json.dumps(data) + image_end)
File "C:\Users\Tony\AppData\Local\Programs\Python\Python36-
32\lib\json\__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "C:\Users\Tony\AppData\Local\Programs\Python\Python36-
32\lib\json\encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "C:\Users\Tony\AppData\Local\Programs\Python\Python36-
32\lib\json\encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "C:\Users\Tony\AppData\Local\Programs\Python\Python36-
32\lib\json\encoder.py", line 180, in default
o.__class__.__name__)
TypeError: Object of type 'bytes' is not JSON serializable
The issue here is that base64.encodestring() returns a bytes object, not a string.
Try:
data['bytes'] = base64.encodestring(bytes).decode('ascii')
Check out this question and answer for a good explanation of why this is:
Why does base64.b64encode() return a bytes object?
Also see: How to encode bytes in JSON? json.dumps() throwing a TypeError
You're only missing one aspect here: When you use .encodestring, you have a bytes object as result, and bytes are not json serializable in python 3.
You can solve it just encoding your data["bytes"]:
data['bytes'] = base64.encodestring(bytes).decode("utf-8")
I'm assuming you'll always receive a bytes object at the "bytes" variable, otherwise you should add a checker for the type of the object, and not encoding when it's already a string.
Related
import argparse
from PIL import Image
import pytesseract
import numpy as np
import json
def image_to_text(image):
pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
result = pytesseract.image_to_string(image, lang='eng')
return result
if __name__ == "__main__":
ap = argparse.ArgumentParser("Image content into text")
ap.add_argument("--input", required = True, help='path to input image')
args = ap.parse_args()
img = Image.open(args.input)
print(img.size)
xy_coords = np.flip(np.column_stack(np.where(np.array(img) >= 0)), axis = 1)
value = np.hstack([xy_coords])
print(value)
list = value
js = json.dumps(list)
jsonfile = open("coordinates.json","w")
jsonfile.write(js)
jsonfile.close()
print(image_to_text(img))
Error:
Traceback (most recent call last):
File "C:\Users\Chaitanya\Desktop\image\code.py", line 26, in <module>
js = json.dumps(list)
File "C:\Users\Chaitanya\AppData\Local\Programs\Python\Python37\lib\json\__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "C:\Users\Chaitanya\AppData\Local\Programs\Python\Python37\lib\json\encoder.py", line 199, in encode
chunks = self.iterencode(o, _one_shot=True)
File "C:\Users\Chaitanya\AppData\Local\Programs\Python\Python37\lib\json\encoder.py", line 257, in iterencode
return _iterencode(o, 0)
File "C:\Users\Chaitanya\AppData\Local\Programs\Python\Python37\lib\json\encoder.py", line 179, in default
raise TypeError(f'Object of type {o.__class__.__name__} '
TypeError: Object of type ndarray is not JSON serializable
First of all, never use a function name as a variable name. By assigning a variable as list you just overridden your list function which you'll need. So use some better variable names.
The problem:
The error says it all.
TypeError: Object of type ndarray is not JSON serializable
A json data can carry values of type of:
string
number
object
array
true
false
null
see Introducing JSON
In your code you are trying to add a list or the data as below:
value = np.hstack([xy_coords])
print(value)
list = value
js = json.dumps(list)
You are trying to dump the list, which is pointed to the value which is a numpy.ndarray not a list. Json does support the list type but does not support numpy.ndarray.
So change the type of variable named list to list.
Im trying to decode an id_token using python/jwt
import urllib.request
from authlib.jose import jwt
import jwt
from jwt.algorithms import RSAAlgorithm
import json
r='eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImp0aSI6ImFkOWYwNTBmLTY3NWMtNDM1Yi04Yjg4LTQ4YmU2ZTc2ZTM1OCIsImlhdCI6MTU3MjYxNzUzNiwiZXhwIjoxNTcyNjIxMTM2fQ.wCs-4FxR6FPLRpjvbL_zkv_88yvx4PJgRnOo9PVisXs'
url = 'https://dev-297076.oktapreview.com/oauth2/aush9prq3i2F4fAwq0h7/v1/keys'
r = urllib.request.urlopen(url)
str_response = r.read().decode('utf-8')
obj = json.loads(str_response)['keys']
obj2 = json.dumps(str_response)
public_key = RSAAlgorithm.from_jwk(obj)
decoded = jwt.decode(IDjwt, public_key, algorithms='RS256')
The error is:
Traceback (most recent call last):
File "C:/Programs/Python/Python37-32/jwt_test1.py", line 18, in <module>
public_key = RSAAlgorithm.from_jwk(obj)
File "C:\Programs\Python\Python37-32\lib\site-packages\jwt\algorithms.py", line 252, in from_jwk obj = json.loads(jwk)
File "C:\Programs\Python\Python37-32\lib\json\__init__.py", line 341, in loads raise TypeError(f'the JSON object must be str, bytes or bytearray, '
TypeError: the JSON object must be str, bytes or bytearray, not list
Any help is greatly appreciated
So I have been trying to use base64 to decode a value and then be able to use those decoded strings to print out each for themselves.
Basically my decoded base64 is:
{
"trailerColor": "FF0017",
"complete": 59,
"bounds": [
25,
65,
62,
5
],
"Stamina": 0,
"cardId": "d4fc5458-3481-4ce6-be32-acd03c2cfd16",
}
Im using this code which gets the metadata that I wish and then convert it into a UTF-8 basically with the code below.
resp = requests.get(url, headers=headers, json=json, timeout=6)
getmetadata = resp.json()['objects'][1]['metadata']
newdata = base64.b64decode(getmetadata).decode('UTF-8')
print(newdata)
However usually if I did newdata['trailerColor'] it should be able to print out only trailerColor if I do that but what i'm getting for error is:
TypeError: string indices must be integers
How can I solve this by printing whatever I want out through that json?
EDIT:
Process Process-1:
Traceback (most recent call last):
File "C:\Users\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 249, in _bootstrap
self.run()
File "C:\Users\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 93, in run
self._target(*self._args, **self._kwargs)
File "C:\Users\TEST.py", line 194, in script
print(newdata['complete'])
TypeError: string indices must be integers
resp = requests.get(url, headers=headers, json=json, timeout=6)
getmetadata = resp.json()['objects'][1]['metadata']
newdata = base64.b64decode(getmetadata).decode('UTF-8')
data = json.loads(newdata)
print(data['complete'])
base64.b64decode(str).decode(str) returns a string str. If that string should be json then you can use json.loads to transform the string str to a dict where you can get a single value by it's key.
This question already has answers here:
Unsupported operation :not writeable python
(2 answers)
Closed 5 years ago.
I have strings as follows in my python list (taken from command prompt):
>>> o['records'][5790]
(5790, 'Vlv-Gate-Assy-Mdl-\xe1M1-2-\xe19/16-10K-BB Credit Memo ', 60,
True, '40141613')
>>>
I have tried suggestions as mentioned here: Changing default encoding of Python?
Further changed the default encoding to utf-16 too. But still json.dumps() threw and exception as follows:
>>> write(o)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "okapi_create_master.py", line 49, in write
o = json.dumps(output)
File "C:\Python27\lib\json\__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "C:\Python27\lib\json\encoder.py", line 201, in encode
chunks = self.iterencode(o, _one_shot=True)
File "C:\Python27\lib\json\encoder.py", line 264, in iterencode
return _iterencode(o, 0)
UnicodeDecodeError: 'utf8' codec can't decode byte 0xe1 in position 25: invalid
continuation byte
Can't figure what kind of transformation is required for such strings so that json.dumps() works.
\xe1 is not decodable using utf-8, utf-16 encoding.
>>> '\xe1'.decode('utf-8')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python27\lib\encodings\utf_8.py", line 16, in decode
return codecs.utf_8_decode(input, errors, True)
UnicodeDecodeError: 'utf8' codec can't decode byte 0xe1 in position 0: unexpected end of data
>>> '\xe1'.decode('utf-16')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python27\lib\encodings\utf_16.py", line 16, in decode
return codecs.utf_16_decode(input, errors, True)
UnicodeDecodeError: 'utf16' codec can't decode byte 0xe1 in position 0: truncated data
Try latin-1 encoding:
>>> record = (5790, 'Vlv-Gate-Assy-Mdl-\xe1M1-2-\xe19/16-10K-BB Credit Memo ',
... 60, True, '40141613')
>>> json.dumps(record, encoding='latin1')
'[5790, "Vlv-Gate-Assy-Mdl-\\u00e1M1-2-\\u00e19/16-10K-BB Credit Memo ", 60, true, "40141613"]'
Or, specify ensure_ascii=False, json.dumps to make json.dumps not try to decode the string.
>>> json.dumps(record, ensure_ascii=False)
'[5790, "Vlv-Gate-Assy-Mdl-\xe1M1-2-\xe19/16-10K-BB Credit Memo ", 60, true, "40141613"]'
I had a similar problem, and came up with the following approach to either guarantee unicodes or byte strings, from either input. In short, include and use the following lambdas:
# guarantee unicode string
_u = lambda t: t.decode('UTF-8', 'replace') if isinstance(t, str) else t
_uu = lambda *tt: tuple(_u(t) for t in tt)
# guarantee byte string in UTF8 encoding
_u8 = lambda t: t.encode('UTF-8', 'replace') if isinstance(t, unicode) else t
_uu8 = lambda *tt: tuple(_u8(t) for t in tt)
Applied to your question:
import json
o = (5790, u"Vlv-Gate-Assy-Mdl-\xe1M1-2-\xe19/16-10K-BB Credit Memo ", 60,
True, '40141613')
as_json = json.dumps(_uu8(*o))
as_obj = json.loads(as_json)
print "object\n ", o
print "json (type %s)\n %s " % (type(as_json), as_json)
print "object again\n ", as_obj
=>
object
(5790, u'Vlv-Gate-Assy-Mdl-\xe1M1-2-\xe19/16-10K-BB Credit Memo ', 60, True, '40141613')
json (type <type 'str'>)
[5790, "Vlv-Gate-Assy-Mdl-\u00e1M1-2-\u00e19/16-10K-BB Credit Memo ", 60, true, "40141613"]
object again
[5790, u'Vlv-Gate-Assy-Mdl-\xe1M1-2-\xe19/16-10K-BB Credit Memo ', 60, True, u'40141613']
Here's some more reasoning about this.
This question already has answers here:
Unsupported operation :not writeable python
(2 answers)
Closed 5 years ago.
I have strings as follows in my python list (taken from command prompt):
>>> o['records'][5790]
(5790, 'Vlv-Gate-Assy-Mdl-\xe1M1-2-\xe19/16-10K-BB Credit Memo ', 60,
True, '40141613')
>>>
I have tried suggestions as mentioned here: Changing default encoding of Python?
Further changed the default encoding to utf-16 too. But still json.dumps() threw and exception as follows:
>>> write(o)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "okapi_create_master.py", line 49, in write
o = json.dumps(output)
File "C:\Python27\lib\json\__init__.py", line 231, in dumps
return _default_encoder.encode(obj)
File "C:\Python27\lib\json\encoder.py", line 201, in encode
chunks = self.iterencode(o, _one_shot=True)
File "C:\Python27\lib\json\encoder.py", line 264, in iterencode
return _iterencode(o, 0)
UnicodeDecodeError: 'utf8' codec can't decode byte 0xe1 in position 25: invalid
continuation byte
Can't figure what kind of transformation is required for such strings so that json.dumps() works.
\xe1 is not decodable using utf-8, utf-16 encoding.
>>> '\xe1'.decode('utf-8')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python27\lib\encodings\utf_8.py", line 16, in decode
return codecs.utf_8_decode(input, errors, True)
UnicodeDecodeError: 'utf8' codec can't decode byte 0xe1 in position 0: unexpected end of data
>>> '\xe1'.decode('utf-16')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Python27\lib\encodings\utf_16.py", line 16, in decode
return codecs.utf_16_decode(input, errors, True)
UnicodeDecodeError: 'utf16' codec can't decode byte 0xe1 in position 0: truncated data
Try latin-1 encoding:
>>> record = (5790, 'Vlv-Gate-Assy-Mdl-\xe1M1-2-\xe19/16-10K-BB Credit Memo ',
... 60, True, '40141613')
>>> json.dumps(record, encoding='latin1')
'[5790, "Vlv-Gate-Assy-Mdl-\\u00e1M1-2-\\u00e19/16-10K-BB Credit Memo ", 60, true, "40141613"]'
Or, specify ensure_ascii=False, json.dumps to make json.dumps not try to decode the string.
>>> json.dumps(record, ensure_ascii=False)
'[5790, "Vlv-Gate-Assy-Mdl-\xe1M1-2-\xe19/16-10K-BB Credit Memo ", 60, true, "40141613"]'
I had a similar problem, and came up with the following approach to either guarantee unicodes or byte strings, from either input. In short, include and use the following lambdas:
# guarantee unicode string
_u = lambda t: t.decode('UTF-8', 'replace') if isinstance(t, str) else t
_uu = lambda *tt: tuple(_u(t) for t in tt)
# guarantee byte string in UTF8 encoding
_u8 = lambda t: t.encode('UTF-8', 'replace') if isinstance(t, unicode) else t
_uu8 = lambda *tt: tuple(_u8(t) for t in tt)
Applied to your question:
import json
o = (5790, u"Vlv-Gate-Assy-Mdl-\xe1M1-2-\xe19/16-10K-BB Credit Memo ", 60,
True, '40141613')
as_json = json.dumps(_uu8(*o))
as_obj = json.loads(as_json)
print "object\n ", o
print "json (type %s)\n %s " % (type(as_json), as_json)
print "object again\n ", as_obj
=>
object
(5790, u'Vlv-Gate-Assy-Mdl-\xe1M1-2-\xe19/16-10K-BB Credit Memo ', 60, True, '40141613')
json (type <type 'str'>)
[5790, "Vlv-Gate-Assy-Mdl-\u00e1M1-2-\u00e19/16-10K-BB Credit Memo ", 60, true, "40141613"]
object again
[5790, u'Vlv-Gate-Assy-Mdl-\xe1M1-2-\xe19/16-10K-BB Credit Memo ', 60, True, u'40141613']
Here's some more reasoning about this.