Is there any idea for reading image from Firebase using OpenCV? Or do I have to download the pictures first and then do the cv.imread function from the local folder ?
Is there any way that I could just use cv.imread(link_of_picture_from_firebase)?
Here's how you can:
read a JPEG from disk,
convert to JSON,
upload to Firebase
Then you can:
retrieve the image back from Firebase
decode the JPEG data back into a Numpy array
save the retrieved image on disk
#!/usr/bin/env python3
import numpy as np
import cv2
from base64 import b64encode, b64decode
import pyrebase
config = {
"apiKey": "SECRET",
"authDomain": "SECRET",
"databaseURL": "SECRET",
"storageBucket": "SECRET",
"appId": "SECRET",
"serviceAccount": "FirebaseCredentials.json"
}
# Initialise and connect to Firebase
firebase = pyrebase.initialize_app(config)
db = firebase.database()
# Read JPEG image from disk...
# ... convert to UTF and JSON
# ... and upload to Firebase
with open("image2.jpg", 'rb') as f:
data = f.read()
str = b64encode(data).decode('UTF-8')
db.child("image").set({"data": str})
# Retrieve image from Firebase
retrieved = db.child("image").get().val()
retrData = retrieved["data"]
JPEG = b64decode(retrData)
image = cv2.imdecode(np.frombuffer(JPEG,dtype=np.uint8), cv2.IMREAD_COLOR)
cv2.imwrite('result.jpg',image)
Related
I'm struggling to download a JPG file from Amazon S3 using Python, I want to load this code onto Heroku so I need to the image to be loaded into memory rather than onto disk.
The code I'm using is:
import boto3
s3 = boto3.client(
"s3",
aws_access_key_id = access_key,
aws_secret_access_key = access_secret
)
s3.upload_fileobj(image_conv, bucket, Key = "image_3.jpg")
new_obj = s3.get_object(Bucket=bucket, Key="image_3.jpg")
image_dl = new_obj['Body'].read()
Image.open(image_dl)
I'm getting the error message:
File ..... line 2968, in open
fp = builtins.open(filename, "rb")
ValueError: embedded null byte
Calling image_dl returns a massive long list of what I assume are bytes, one small section looks like the following:
f\xbc\xdc\x8f\xfe\xb5q\xda}\xed\xcb\xdcD\xab\xe6o\x1c;\xb7\xa0\xf5\xf5\xae\xa6)\xbe\xee\xe6\xc3vn\xdfLVW:\x96\xa8\xa3}\xa4\xd8\xea\x8f*\x89\xd7\xcc\xe8\xf0\xca\xb9\x0b\xf4\x1f\xe7\x15\x93\x0f\x83ty$h\xa6\x83\xc8\x99z<K\xc3c\xd4w\xae\xa4\xc2\xfb\xcb\xee\xe0
The image before I uploaded to S3 returned the below and that's the format that I'm trying to return the image into. Is anyone able to help me on where I'm going wrong?
<PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=1440x1440 at 0x7F2BB4005EB0>
Pillow's Image class needs either a filename to open, or a file-like object that it can call read on. Since you don't have a filename, you'll need to provide a stream. It's easiest to use BytesIO to turn the byte array into a strem:
import boto3
from PIL import Image
from io import BytesIO
bucket = "--example-bucket--"
s3 = boto3.client("s3")
with open("image.jpg", "rb") as image_conv:
s3.upload_fileobj(image_conv, bucket, Key="image_3.jpg")
new_obj = s3.get_object(Bucket=bucket, Key="image_3.jpg")
image_dl = new_obj['Body'].read()
image = Image.open(BytesIO(image_dl))
print(image.width, image.height)
Try first to load raw data into a BytesIO container:
from io import StringIO
from PIL import Image
file_stream = StringIO()
s3.download_fileobj(bucket, "image_3.jpg", file_stream)
img = Image.open(file_stream)
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.download_fileobj
Given a pandas Dataframe which contains some data, what is the best to store this data to Firebase?
Should I convert the Dataframe to a local file (e.g. .csv, .txt) and then upload it on Firebase Storage, or is it also possible to directly store the pandas Dataframe without conversion? Or are there better best practices?
Update 01/03 - So far I've come with this solution, which requires writing a csv file locally, then reading it in and uploading it and then deleting the local file. I doubt however that this is the most efficient method, thus I would like to know if it can be done better and quicker?
import os
import firebase_admin
from firebase_admin import db, storage
cred = firebase_admin.credentials.Certificate(cert_json)
app = firebase_admin.initialize_app(cred, config)
bucket = storage.bucket(app=app)
def upload_df(df, data_id):
"""
Upload a Dataframe as a csv to Firebase Storage
:return: storage_ref
"""
# Storage location + extension
storage_ref = data_id + ".csv"
# Store locally
df.to_csv(data_id)
# Upload to Firebase Storage
blob = bucket.blob(storage_ref)
with open(data_id,'rb') as local_file:
blob.upload_from_file(local_file)
# Delete locally
os.remove(data_id)
return storage_ref
With python-firebase and to_dict:
postdata = my_df.to_dict()
# Assumes any auth/headers you need are already taken care of.
result = firebase.post('/my_endpoint', postdata, {'print': 'pretty'})
print(result)
# Snapshot info
You can get the data back using the snapshot info and endpoint, and reestablish the df with from_dict(). You could adapt this solution to SQL and JSON solutions, which pandas also has support for.
Alternatively and depending on where you script executes from, you might consider treating firebase as a db and using the dbapi from firebase_admin (check this out.)
As for whether it's according to best practice, it's difficult to say without knowing anything about your use case.
if you just want to reduce code length and the steps of creating and deleting files, you can use upload_from_string:
import firebase_admin
from firebase_admin import db, storage
cred = firebase_admin.credentials.Certificate(cert_json)
app = firebase_admin.initialize_app(cred, config)
bucket = storage.bucket(app=app)
def upload_df(df, data_id):
"""
Upload a Dataframe as a csv to Firebase Storage
:return: storage_ref
"""
storage_ref = data_id + '.csv'
blob = bucket.blob(storage_ref)
blob.upload_from_string(df.to_csv())
return storage_ref
https://googleapis.github.io/google-cloud-python/latest/storage/blobs.html#google.cloud.storage.blob.Blob.upload_from_string
After figuring out for hours, the following solution works for me. You need to convert your csv file to bytes & then upload it.
import pyrebase
import pandas
firebaseConfig = {
"apiKey": "xxxxx",
"authDomain": "xxxxx",
"projectId": "xxxxx",
"storageBucket": "xxxxx",
"messagingSenderId": "xxxxx",
"appId": "xxxxx",
"databaseURL":"xxxxx"
};
firebase = pyrebase.initialize_app(firebaseConfig)
storage = firebase.storage()
df = pd.read_csv("/content/Future Prices.csv")
# here is the magic. Convert your csv file to bytes and then upload it
df_string = df.to_csv(index=False)
db_bytes = bytes(df_string, 'utf8')
fileName = "Future Prices.csv"
storage.child("predictions/" + fileName).put(db_bytes)
That's all Happy Coding!
I found that starting from very modest size of dataframe (below 100KB!), and certainly for bigger ones, it's paying off to compress the data before storing. It does not have to be a dataframe, but it can be any onject (e.g. a dictionary). I used pickle below to compress. Your object can be seen on the usual firebase storage this way, and you get gains in memory and speed, both when writing and when reading, compared to uncompressed storage. For big objects it's also worth adding timeout for to avoid ConnectionError after the default timeout of 60 seconds.
import firebase_admin
from firebase_admin import credentials, initialize_app, storage
import pickle
cred = credentials.Certificate(json_cert_file)
firebase_admin.initialize_app(cred, {'storageBucket': 'YOUR_storageBucket (without gs://)'})
bucket = storage.bucket()
file_name = data_id + ".pkl"
blob = bucket.blob(file_name)
# write df to storage
blob.upload_from_string(pickle.dumps(df, timeout=300))
# read df from storage
df = pickle.loads(blob.download_as_string(timeout=300))
My goal is to generate certain files (txt/pdf/excel) on my Python server and subsequently push it to the Firebase Storage.
For the Firebase Storage integration I use the pyrebase package.
So far I have managed to generate the file locally and subsequently store it on the right path on the Firebase Storage database.
However, the files I store are always empty. What is the reason for this?
1. Generating the localFile
import os
def save_templocalfile(specs):
# Random something
localFileName = "test.txt"
localFile = open(localFileName,"w+")
for i in range(1000):
localFile.write("This is line %d\r\n" % (i+1))
return {
'localFileName': localFileName,
'localFile': localFile
}
2. Storing the localFile
# Required Libraries
import pyrebase
import time
# Firebase Setup & Admin Auth
config = {
"apiKey": "<PARAMETER>",
"authDomain": "<PARAMETER>",
"databaseURL": "<PARAMETER>",
"projectId": "<PARAMETER>",
"storageBucket": "<PARAMETER>",
"messagingSenderId": "<PARAMETER>"
}
firebase = pyrebase.initialize_app(config)
storage = firebase.storage()
def fb_upload(localFile):
# Define childref
childRef = "/test/test.txt"
storage.child(childRef).put(localFile)
# Get the file url
fbResponse = storage.child(childRef).get_url(None)
return fbResponse
The problem was that I opened my file with Write permissions only:
localFile = open(localFileName,"w+")
The solution was to close the write operation and opening it with Read permissions:
# close (Write)
localFile.close()
# Open (Read)
my_file = open(localFileName, "rb")
my_bytes = my_file.read()
# Store on FB
fbUploadObj = storage.child(storageRef).put(my_bytes)
I'm trying to figure out how to upload a Pillow Image instance to a Firebase storage bucket. Is this possible?
Here's some code:
from PIL import Image
image = Image.open(file)
# how to upload to a firebase storage bucket?
I know there's a gcloud-python library but does this support Image instances? Is converting the image to a string my only option?
The gcloud-python library is the correct library to use. It supports uploads from Strings, file pointers, and local files on the file system (see the docs).
from PIL import Image
from google.cloud import storage
client = storage.Client()
bucket = client.get_bucket('bucket-id-here')
blob = bucket.blob('image.png')
# use pillow to open and transform the file
image = Image.open(file)
# perform transforms
image.save(outfile)
of = open(outfile, 'rb')
blob.upload_from_file(of)
# or... (no need to use pillow if you're not transforming)
blob.upload_from_filename(filename=outfile)
This is how to directly upload the pillow image to firebase storage
from PIL import Image
from firebase_admin import credentials, initialize_app, storage
# Init firebase with your credentials
cred = credentials.Certificate("YOUR DOWNLOADED CREDENTIALS FILE (JSON)")
initialize_app(cred, {'storageBucket': 'YOUR FIREBASE STORAGE PATH (without gs://)'})
bucket = storage.bucket()
blob = bucket.blob('image.jpg')
bs = io.BytesIO()
im = Image.open("test_image.jpg")
im.save(bs, "jpeg")
blob.upload_from_string(bs.getvalue(), content_type="image/jpeg")
I'm trying to send a user image from my iOS app to a Python script through Firebase by creating a base64 string from the image and then posting that string to Firebase and decoding it in Python. However, a corrupted image is produced. How do I fix this? Here is my Swift code:
func imagePickerController(picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : AnyObject]) {
var byteArray = NSData()
if let file = info[UIImagePickerControllerOriginalImage] as? UIImage {
byteArray = UIImageJPEGRepresentation(file, 1.0)!
}
let b64 = byteArray.base64EncodedStringWithOptions(NSDataBase64EncodingOptions(rawValue: 0))
FIRDatabase.database().reference().child("dataUploaded").setValue(b64)
uploaded = true
dismissViewControllerAnimated(true, completion: nil)
}
And then the Python code:
from firebase import firebase
import os
from PIL import Image
import numpy as np
import io
fb = firebase.FirebaseApplication("https://xxxxxx.firebaseio.com/", None)
a = fb.get('/dataUploaded', None)
filename = 'image.png'
with open(filename, 'wb') as f:
f.write(a)