Decoding base64 string return None - python

I tried to generate uid for a user confirmation email.
'uid':urlsafe_base64_encode(force_bytes(user.pk)),
so, it's works nice, it returns something like "Tm9uZQ"
Then, when I tried to decode it, using force_text(urlsafe_base64_decode(uidb64))
it return None.
The next string
urlsafe_base64_decode(uidb64)
also, return b'None'
I tried to google it, and see different implementations, but copy-paste code not works.
I write something like
b64_string = uidb64
b64_string += "=" * ((4 - len(b64_string) % 4) % 4)
print(b64_string)
print(force_text(base64.urlsafe_b64decode(b64_string)))
and the result still None:
Tm9uZQ==
None
I don't understand how the default decode doesn't work.

"Tm9uZQ==" is the base64 encoding of the string "None",
>>> from base64 import b64encode, b64decode
>>> s = b'None'
>>>
>>> b64encode(s)
b'Tm9uZQ=='
>>> b64decode(b64encode(s))
b'None'
>>>
It could be possible that some of your data is missing. E.g. user.pk is not set. I think that force_bytes is turning a None user.pk into the bytestring b'None', from the Django source,
def force_bytes(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_bytes, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
# Handle the common case first for performance reasons.
if isinstance(s, bytes):
if encoding == 'utf-8':
return s
else:
return s.decode('utf-8', errors).encode(encoding, errors)
if strings_only and is_protected_type(s):
return s
if isinstance(s, memoryview):
return bytes(s)
return str(s).encode(encoding, errors)
You might be able to prevent None being turned into b'None' by setting strings_only=True when calling force_bytes.

Related

Verifying SendGrid's Signed Event Webhook in Django

I am trying to get signed from sengrid Webhook:
https://docs.sendgrid.com/for-developers/tracking-events/getting-started-event-webhook-security-features
from sendgrid.helpers.eventwebhook import EventWebhook, EventWebhookHeader
def is_valid_signature(request):
#event_webhook_signature=request.META['HTTP_X_TWILIO_EMAIL_EVENT_WEBHOOK_SIGNATURE']
#event_webhook_timestamp=request.META['HTTP_X_TWILIO_EMAIL_EVENT_WEBHOOK_TIMESTAMP']
event_webhook = EventWebhook()
key=settings.SENDGRID_HEADER
ec_public_key = event_webhook.convert_public_key_to_ecdsa(key)
text=json.dumps(str(request.body))
return event_webhook.verify_signature(
text,
request.headers[EventWebhookHeader.SIGNATURE],
request.headers[EventWebhookHeader.TIMESTAMP],
ec_public_key
)
When I send test example from sengrid, always return False. I compared keys and all is correct, so, I think that the problem is the sintax of the payload:
"b[{\"email\":\"example#test.com\",\"timestamp\":1648560198,\"smtp-id\":\"\\\\u003c14c5d75ce93.dfd.64b469#ismtpd-555\\\\u003e\",\"event\":\"processed\",\"category\":[\"cat facts\"],\"sg_event_id\":\"G6NRn4zC5sGxoV2Hoz7gpw==\",\"sg_message_id\":\"14c5d75ce93.dfd.64b469.filter0001.16648.5515E0B88.0\"},{other tests},\\r\\n]\\r\\n"
I think the issue is that you are calling:
text = json.dumps(str(request.body))
json.dumps serializes an object to a JSON formatted string, but str(request.body) is already a string.
Try just
text = str(request.body)
I found the solution, my function is now like this:
def is_valid_signature(request):
#event_webhook_signature=request.META['HTTP_X_TWILIO_EMAIL_EVENT_WEBHOOK_SIGNATURE']
#event_webhook_timestamp=request.META['HTTP_X_TWILIO_EMAIL_EVENT_WEBHOOK_TIMESTAMP']
event_webhook = EventWebhook()
key=settings.SENDGRID_HEADER
ec_public_key = event_webhook.convert_public_key_to_ecdsa(key)
return event_webhook.verify_signature(
request.body.decode('latin-1'),
request.headers[EventWebhookHeader.SIGNATURE],
request.headers[EventWebhookHeader.TIMESTAMP],
ec_public_key
)
I had to decode in Latin-1, because we have my codification in UTF-8.
Thanks
( not failing on missing headers , utf8 decoding , types converted to strings)
def flask_verifySendgridSignedWebhook(myrequest , expectedKey ):
try:
if(myrequest.is_json):
sg_verify=EventWebhook()
msgbody=""
#print("JSON FOUND")
if(myrequest.data):
msgbody=myrequest.get_data().decode('utf-8')
##print(msgbody)
if(sg_verify.verify_signature( msgbody , str( myrequest.headers.get(EventWebhookHeader.SIGNATURE)),
str( myrequest.headers.get(EventWebhookHeader.TIMESTAMP)),
sg_verify.convert_public_key_to_ecdsa(expectedKey) )):
return True
else:
#print("NO JSON SENT")
return False
except:
return False

Allowing empty dates with Marshmallow

I try to get data from a webpage. This page contains several release information, but allow values not to be set. I.e. the date for testing from/to might be an empty string.
Now I try to deserialize all my data sucked from the page to insert it to a database and face problems handling empty dates.
from marshmallow import fields, Schema, ValidationError
class TestSchema(Schema):
training_necessary = fields.Function(deserialize=lambda x: True if x == 'Yes' else False)
test_from = fields.Date()
test_to = fields.Date()
data = dict(training_necessary='Yes', test_from='', test_to='')
try:
validated = TestSchema().load(data)
except ValidationError as err:
print(f"{err}")
Result:
{'test_to': ['Not a valid date.'], 'test_from': ['Not a valid date.']}
I already tried several combinations of allow_none=True or default='' but none of them helped my to get through. So, how to manage to allow empty dates? Setting a default to somewhat like 1970-01-01 won't help in that case.
Any hints?
Regards, Thomas
+++ EDIT: SOLUTION +++
Here's the working code I ended up after Jérômes helpful tipp:
from marshmallow import fields, Schema, ValidationError, pre_load
class TestSchema(Schema):
training_necessary = fields.Function(deserialize=lambda x: True if x == 'Yes' else False)
test_from = fields.Date(allow_none=True)
test_to = fields.Date(allow_none=True)
#pre_load(pass_many=False)
def string_to_none(self, data, many, **kwargs):
turn_to_none = lambda x: None if x == '' else x
for k, v in data.items():
data[k] = turn_to_none(v)
return data
data = dict(training_necessary='Yes', test_from='', test_to='')
try:
validated = TestSchema().load(data)
except ValidationError as err:
print(f"{err}")
I would pass no value at all.
data = dict(training_necessary='Yes')
Or I'd make the date fields allow_none and I'd pass None, not an empty string.
data = dict(training_necessary='Yes', test_from=None, test_to=None)
If the issue is that your input contains empty strings, I'd say this is a client issue, but you can add a pre_load method to delete empty strings from the input before deserializing. This is more or less equivalent to modifying the values you scrape from the page before feeding them to marshmallow.

Retrieving encryption key from database in python PyNaCl, how do i convert back into a PublicKey or a PrivateKey object?

I'm trying to store the private/public keys as UTF-8 strings in a database. The problem is that when I bring them back into code, they are not the correct type. As bytes they print the same, as per the following code:
import nacl.utils
from nacl.public import PrivateKey, SealedBox
from nacl.encoding import Base64Encoder
import base64
prvkbob = PrivateKey.generate()
pubkbob = prvkbob.public_key
prvk_db = prvkbob.encode(Base64Encoder).decode('utf8')
pubk_db = pubkbob.encode(Base64Encoder).decode('utf8')
prvk = base64.b64decode(prvk_db.encode('utf8'))
shdk = base64.b64decode(pubk_db.encode('utf8'))
print(prvkbob)
print(prvk)
print(pubkbob)
print(shdk)
# It works with the original key
sealed_box = SealedBox(prvkbob)
# Error on key returned from database
sealed_box = SealedBox(prvk)
How do I initialize them as PublicKey or PrivateKey objects?
I might be a little late to the party, but I ran in to the similar problem where it says:
nacl.exceptions.TypeError: Box must be created from a PrivateKey and a
PublicKey
This is easily fixed by instantiation a Public or private key instance using the following lines:
imported_private_key = nacl.public.PrivateKey(bytes_that_are_a_key)
imported_public_key = nacl.public.PublicKey(bytes_that_are_a_key)
I hope it might help you or anyone else with the same problem
Since you explicitly decoded keys in utf-8 (.decode('utf8')), you must first encode those in with the very same encoding (as you did). As #DisplayName said, you just then need to instantiate PrivateKey and PublicKey
Since you plan to store the Base64 representations of those keys, you could do the following. Here are keys generated the ways you wanted:
john_private = "OSEuOrw7BDANm2b0lwddBXUxN6OFGBLBDoFbqnkdMNU="
john_public = "bQNbTjHETLTc/RNJYa1mTDg0fQF70GsuIZFsrb43DQc="
paul_private = "ry860ekZ8T1UDTzvoPSlAVMEOjcVz3ODLYbjXfySns0="
paul_public = "G8608AL7TE2n3P10OLS8V/8wCaf/mzflCS/5qw/TzG4="
The two functions work with keys and messages stored in Base64
def base64_to_bytes(key:str) -> bytes:
return base64.b64decode(key.encode('utf-8'))
def encrypt_for_user(sender_private:str, receiver_public:str, message:str) -> str:
sender_private = PrivateKey(base64_to_bytes(sender_private))
receiver_public = PublicKey(base64_to_bytes(receiver_public))
sender_box = Box(sender_private, receiver_public)
return base64.b64encode(sender_box.encrypt(bytes(message, "utf-8"))).decode('utf-8')
def decrypt_for_user(receiver_private:str, sender_public:str, message:str) -> str:
receiver_private = PrivateKey(base64_to_bytes(receiver_private))
sender_public = PublicKey(base64_to_bytes(sender_public))
receiver_box = Box(receiver_private, sender_public)
return receiver_box.decrypt(base64.b64decode(message.encode('utf-8'))).decode('utf-8')
John sends a message to Paul:
message = encrypt_for_user(john_private,paul_public,"Hi Paul, 'up?")
print(message)
9BxTezSQVlxPU5evODskj4EIb5hXqIPnkQVuhpY2qoYvcnIaBgUVhkbN8baSytsmF4RSXdI=
Paul decrypts it:
decrypt_for_user(paul_private, john_public, message)
"Hi Paul, 'up?"

How do I check if a string is valid JSON in Python?

In Python, is there a way to check if a string is valid JSON before trying to parse it?
For example working with things like the Facebook Graph API, sometimes it returns JSON, sometimes it could return an image file.
You can try to do json.loads(), which will throw a ValueError if the string you pass can't be decoded as JSON.
In general, the "Pythonic" philosophy for this kind of situation is called EAFP, for Easier to Ask for Forgiveness than Permission.
Example Python script returns a boolean if a string is valid json:
import json
def is_json(myjson):
try:
json.loads(myjson)
except ValueError as e:
return False
return True
Which prints:
print is_json("{}") #prints True
print is_json("{asdf}") #prints False
print is_json('{ "age":100}') #prints True
print is_json("{'age':100 }") #prints False
print is_json("{\"age\":100 }") #prints True
print is_json('{"age":100 }') #prints True
print is_json('{"foo":[5,6.8],"foo":"bar"}') #prints True
Convert a JSON string to a Python dictionary:
import json
mydict = json.loads('{"foo":"bar"}')
print(mydict['foo']) #prints bar
mylist = json.loads("[5,6,7]")
print(mylist)
[5, 6, 7]
Convert a python object to JSON string:
foo = {}
foo['gummy'] = 'bear'
print(json.dumps(foo)) #prints {"gummy": "bear"}
If you want access to low-level parsing, don't roll your own, use an existing library: http://www.json.org/
Great tutorial on python JSON module: https://pymotw.com/2/json/
Is String JSON and show syntax errors and error messages:
sudo cpan JSON::XS
echo '{"foo":[5,6.8],"foo":"bar" bar}' > myjson.json
json_xs -t none < myjson.json
Prints:
, or } expected while parsing object/hash, at character offset 28 (before "bar}
at /usr/local/bin/json_xs line 183, <STDIN> line 1.
json_xs is capable of syntax checking, parsing, prittifying, encoding, decoding and more:
https://metacpan.org/pod/json_xs
I would say parsing it is the only way you can really entirely tell. Exception will be raised by python's json.loads() function (almost certainly) if not the correct format. However, the the purposes of your example you can probably just check the first couple of non-whitespace characters...
I'm not familiar with the JSON that facebook sends back, but most JSON strings from web apps will start with a open square [ or curly { bracket. No images formats I know of start with those characters.
Conversely if you know what image formats might show up, you can check the start of the string for their signatures to identify images, and assume you have JSON if it's not an image.
Another simple hack to identify a graphic, rather than a text string, in the case you're looking for a graphic, is just to test for non-ASCII characters in the first couple of dozen characters of the string (assuming the JSON is ASCII).
I came up with an generic, interesting solution to this problem:
class SafeInvocator(object):
def __init__(self, module):
self._module = module
def _safe(self, func):
def inner(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
return None
return inner
def __getattr__(self, item):
obj = getattr(self.module, item)
return self._safe(obj) if hasattr(obj, '__call__') else obj
and you can use it like so:
safe_json = SafeInvocator(json)
text = "{'foo':'bar'}"
item = safe_json.loads(text)
if item:
# do something
An effective, and reliable way to check for valid JSON. If the 'get' accessor does't throw an AttributeError then the JSON is valid.
import json
valid_json = {'type': 'doc', 'version': 1, 'content': [{'type': 'paragraph', 'content': [{'text': 'Request for widget', 'type': 'text'}]}]}
invalid_json = 'opo'
def check_json(p, attr):
doc = json.loads(json.dumps(p))
try:
doc.get(attr) # we don't care if the value exists. Only that 'get()' is accessible
return True
except AttributeError:
return False
To use, we call the function and look for a key.
# Valid JSON
print(check_json(valid_json, 'type'))
Returns 'True'
# Invalid JSON / Key not found
print(check_json(invalid_json, 'type'))
Returns 'False'
Much simple in try block. You can then validate if the body is a valid JSON
async def get_body(request: Request):
try:
body = await request.json()
except:
body = await request.body()
return body

Get python getaddresses() to decode encoded-word encoding

msg = \
"""To: =?ISO-8859-1?Q?Caren_K=F8lter?= <ck#example.dk>, bob#example.com
Cc: "James =?ISO-8859-1?Q?K=F8lter?=" <jk#example.dk>
Subject: hello
message body blah blah blah
"""
import email.parser, email.utils
import itertools
parser = email.parser.Parser()
parsed_message = parser.parsestr(msg)
address_fields = ('to', 'cc')
addresses = itertools.chain(*(parsed_message.get_all(field) for field in address_fields if parsed_message.has_key(field)))
address_list = set(email.utils.getaddresses(addresses))
print address_list
It seems like email.utils.getaddresses() doesn't seem to automatically handle MIME RFC 2047 in address fields.
How can I get the expected result below?
actual result:
set([('', 'bob#example.com'), ('=?ISO-8859-1?Q?Caren_K=F8lter?=', 'ck#example.dk'), ('James =?ISO-8859-1?Q?K=F8lter?=', 'jk#example.dk')])
desired result:
set([('', 'bob#example.com'), (u'Caren_K\xf8lter', 'ck#example.dk'), (u'James \xf8lter', 'jk#example.dk')])
The function you want is email.header.decode_header, which returns a list of (decoded_string, charset) pairs. It's up to you to further decode them according to charset and join them back together again before passing them to email.utils.getaddresses or wherever.
You might think that this would be straightforward:
def decode_rfc2047_header(h):
return ' '.join(s.decode(charset or 'ascii')
for s, charset in email.header.decode_header(h))
But since message headers typically come from untrusted sources, you have to handle (1) badly encoded data; and (2) bogus character set names. So you might do something like this:
def decode_safely(s, charset='ascii'):
"""Return s decoded according to charset, but do so safely."""
try:
return s.decode(charset or 'ascii', 'replace')
except LookupError: # bogus charset
return s.decode('ascii', 'replace')
def decode_rfc2047_header(h):
return ' '.join(decode_safely(s, charset)
for s, charset in email.header.decode_header(h))
Yeah, the email package interface really isn't very helpful a lot of the time.
Here, you have to use email.header.decode_header manually on each address, and then, since that gives you a list of decoded tokens, you have to stitch them back together again manually:
for name, address in email.utils.getaddresses(addresses):
name= u' '.join(
unicode(b, e or 'ascii') for b, e in email.header.decode_header(name)
)
...
Thank you Gareth Rees.Your answer was helpful in solving a problem case:
Input: 'application/octet-stream;\r\n\tname="=?utf-8?B?KFVTTXMpX0FSTE8uanBn?="'
The absence of whitespace around the encoded-word caused email.Header.decode_header to overlook it. I'm too new to this to know if I've only made things worse, but this kludge, along with joining with a '' instead of ' ', fixed it:
if not ' =?' in h:
h = h.replace('=?', ' =?').replace('?=', '?= ')
Output: u'application/octet-stream; name="(USMs)_ARLO.jpg"'

Categories