Uploading local images to microsoft cognitive face - python

Error Screenshot
import sys
import os, time
import cognitive_face as CF
import global_variables as global_var
import urllib
import sqlite3
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
Key = global_var.key
CF.Key.set(Key)
BASE_URL = global_var.BASE_URL # Replace with your regional Base URL
CF.BaseUrl.set(BASE_URL)
def get_person_id():
person_id = ''
extractId = str(sys.argv[1])[-2:]
connect = sqlite3.connect("Face-DataBase")
c = connect.cursor()
cmd = "SELECT * FROM Students WHERE ID = " + extractId
c.execute(cmd)
row = c.fetchone()
person_id = row[3]
connect.close()
return person_id
if len(sys.argv) is not 1:
currentDir = os.path.dirname(os.path.abspath(__file__))
imageFolder = os.path.join(currentDir, "dataset/" + str(sys.argv[1]))
person_id = get_person_id()
for filename in os.listdir(imageFolder):
if filename.endswith(".jpg"):
print(filename)
imgurl = urllib.request.pathname2url(os.path.join(imageFolder, filename))
imgurl = imgurl[3:]
print("imageurl = {}".format(imgurl))
res = CF.face.detect(imgurl)
if len(res) != 1:
print("No face detected in image")
else:
res = CF.person.add_face(imgurl, global_var.personGroupId, person_id)
print(res)
time.sleep(6)
else:
print("supply attributes please from dataset folder")
I hope images should be converted to byte array but I don't know how to do it. Local images have to be uploaded into cognitive API. Tried many ways but cannot solve the error.
imgurl = urllib.request.pathname2url(os.path.join(imageFolder, filename))
Above line is where error exists

Welcome to Stack Overflow, #arun.
First of all, as per here, the API you're using is deprecated, and you should switch to this one instead.
Second, in this new API, there is a method called detect_with_stream (ref here), that will make a request to the Face Recognition endpoint, using the byte stream instead of an URL (it will use different request headers than the URL-based method). This method accepts a stream of bytes containing your image. I've worked with another cognitive services API that performs text recognition, and so I've faced this problem of sending an image URL or the image byte stream. You can generate a bytestream from the file as follows:
image_data = open(image_path, "rb").read()
The variable image_data can be passed to the method.
Edit: Instructions on how to use the new API with the image bytestream
First, install the following pip package:
pip install azure-cognitiveservices-vision-face
Then, you can try this approach.
import sys
import os, time
import global_variables as global_var
from azure.cognitiveservices.vision.face import FaceClient
from msrest.authentication import CognitiveServicesCredentials
import urllib
import sqlite3
import requests
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
KEY = global_var.key
ENDPOINT = global_var.endpoint
face_client = FaceClient(ENDPOINT,CognitiveServicesCredentials(KEY))
def get_person_id():
person_id = ''
extractId = str(sys.argv[1])[-2:]
connect = sqlite3.connect("Face-DataBase")
c = connect.cursor()
cmd = "SELECT * FROM Students WHERE ID = " + extractId
c.execute(cmd)
row = c.fetchone()
person_id = row[3]
connect.close()
return person_id
if len(sys.argv) is not 1:
currentDir = os.path.dirname(os.path.abspath(__file__))
imageFolder = os.path.join(currentDir, "dataset/" + str(sys.argv[1]))
person_id = get_person_id()
for filename in os.listdir(imageFolder):
if filename.endswith(".jpg"):
print(filename)
img_data = open(filename, "rb").read()
res = face_client.face.detect_with_stream(img_data)
if not res:
print('No face detected from image {}'.format(filename))
continue
res = face_client.person_group_person.add_face_from_stream(global_var.personGroupId, person_id, img_data)
print(res)
time.sleep(6)
else:
print("supply attributes please from dataset folder")
Edit 2: Notes on traversing all the files in a directory
Ok #arun, your current problem stems from the fact that you're using os.listdir which only lists the filenames, so you don't have their paths. The quickest solution would be to open each image inside the loop with:
img_data = open(os.path.join(imageFolder, filename), "rb").read()

Related

While obtaining hash files, some folders and files from the directory are not showing up

My code was working just fine before adding the hash function. I was getting the list of all folders and files in my directory in the Pretty Table. Once I added the hash function, I got maybe 5 of the files in that directory with hashes in the table. I am not sure where I have gone wrong. Please forgive me, I am new to this. We are not learning to code from scratch, but have to modify existing codes to function the way we need it to.
# Python Standard Libaries
import os #file system methode
import hashlib #hashing function
import sys #system methods
import time #time conversions
# Python 3rd Party Libraries
from prettytable import PrettyTable # pip install prettytable
# Local Functions
def GetFileMetaData(fileName):
#obtain file system metadata
try:
metaData = os.stat(fileName) # Use the stat method to obtain meta data
fileSize = metaData.st_size # Extract fileSize and MAC Times
timeLastAccess = metaData.st_atime
timeLastModified = metaData.st_mtime
timeCreated = metaData.st_ctime
macTimeList = [timeLastModified, timeCreated, timeLastAccess] # Group the MAC Times in a List
return True, None, fileSize, macTimeList
except Exception as err:
return False, str(err), None, None
# Psuedo Constants
# Start of the Script
tbl = PrettyTable(['FilePath','FileSize','UTC-Modified', 'UTC-Accessed', 'UTC-Created', 'SHA-256 HASH'])
#file check
while True:
targetFolder = input("Enter Target Folder: ")
if os.path.isdir(targetFolder):
break
else:
print("\nInvalid Folder ... Please Try Again")
print("Walking: ", targetFolder, "\n")
print()
for currentRoot, dirList, fileList in os.walk(targetFolder):
for nextFile in fileList:
fullPath = os.path.join(currentRoot, nextFile)
absPath = os.path.abspath(fullPath)
fileSize = os.path.getsize(absPath)
success, errInfo, fileSize, macList = GetFileMetaData(absPath)
if success:
#convert to readable Greenich Time
modTime = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(macList[0]))
accTime = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(macList[1]))
creTime = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(macList[2]))
#hashing function
with open(absPath, 'rb') as target:
fileContents = target.read()
sha256Obj = hashlib.sha256()
sha256Obj.update(fileContents)
hexDigest = sha256Obj.hexdigest()
tbl.add_row( [ absPath, fileSize,modTime, accTime, creTime, hexDigest] )
tbl.align = "l" # align the columns left justified
# display the table
print (tbl.get_string(sortby="FileSize", reversesort=True))
print("\nScript-End\n")

resizing and storing image folder with filesystem using python

I want to resize images in the folder and store them into database with filesystem using postgres as database and jupyter notebook. I am using resize function for resizing my images and then saving then into database but resize function seems not working and unable to understand my mistake.
subject= input("Enter Subject Name:")
cursor.execute("DROP TABLE IF EXISTS %s"%(subject))
cursor.execute( """CREATE TABLE %s (ID SERIAL PRIMARY KEY, PHOTO BYTEA NOT NULL)"""%(subject))
conn.commit()
userfilepath=input("enter file path:")
dirs = os.listdir( userfilepath )
def resize():
for item in dirs:
if os.path.isfile(userfilepath+item):
im = Image.open(userfilepath+item)
f, e = os.userfilepath.splitext(userfilepath+item)
imResize = im.resize((200,200), Image.ANTIALIAS)
imResize.save(f + ' resized.jpg', 'JPEG', quality=90)
import cv2
import os, sys
from PIL import Image
import io
import glob
img_dir = userfilepath # Enter Directory of all images
data_path = os.path.join(img_dir,'*g')
files = glob.glob(data_path)
data = []
for f1 in files:
# img = cv2.imread(f1)
# data.append(img)
with open(f1,"rb") as file:
resize()
BinaryData=file.read()
cursor.execute("INSERT INTO {tab} (photo)
VALUES({})".format(psycopg2.Binary(BinaryData, ) , tab=subject ) )
conn.commit()
#Insert_blob(img_dir)
Feel free to change the variables at the top since you seem to want to get them from user input. I've chosen to hardcode them for the purpose of this example.
Have a look at the code below
import os
userfilepath = "files"
dirs = os.listdir("files")
def do_open(_file):
print(f"Opening {_file}")
def do_resize(_file):
print(f"resizing: {_file}")
def resize():
for item in dirs:
f = userfilepath + item
print(f)
if os.path.isfile(f):
im = do_open(userfilepath + item)
resize()
# output
# filesf1.txt
# filesf2.txt
def resize_well():
for item in dirs:
f = os.path.join(userfilepath, item)
print(f)
if os.path.isfile(f):
im = do_open(f)
resize_well()
# output
# files\f1.txt
# Opening files\f1.txt
# files\f2.txt
# Opening files\f2.txt
In resize_well() os.path.join() creates a proper path, where as using string concatenation misses out the / or \ delimiter on linux and windows respectively.
Your code isn't passing the if statement because userfilepath+item doesn't exist, but probably userfilepath/item does.

How do I download videos from Pexels API?

I have this code that can pull images off of Pexels, but I don't know how to change it to video. I haven't seen anyone do this before and any help greatly appreciated. I tried switching all the photo tags to videos but that seemed not to work. I've also tried adding more libraries but that doesn't seem to work either.
import argparse
import json
import os
import time
import requests
import tqdm
from pexels_api import API
PEXELS_API_KEY = os.environ['PEXELS_KEY']
MAX_IMAGES_PER_QUERY = 100
RESULTS_PER_PAGE = 10
PAGE_LIMIT = MAX_IMAGES_PER_QUERY / RESULTS_PER_PAGE
def get_sleep(t):
def sleep():
time.sleep(t)
return sleep
def main(args):
sleep = get_sleep(args.sleep)
api = API(PEXELS_API_KEY)
query = args.query
page = 1
counter = 0
photos_dict = {}
# Step 1: Getting urls and meta information
while page <= PAGE_LIMIT:
api.search(query, page=page, results_per_page=RESULTS_PER_PAGE)
photos = api.get_entries()
for photo in tqdm.tqdm(photos):
photos_dict[photo.id] = vars(photo)['_Photo__photo']
counter += 1
if not api.has_next_page:
break
page += 1
sleep()
print(f"Finishing at page: {page}")
print(f"Images were processed: {counter}")
# Step 2: Downloading
if photos_dict:
os.makedirs(args.path, exist_ok=True)
# Saving dict
with open(os.path.join(args.path, f'{query}.json'), 'w') as fout:
json.dump(photos_dict, fout)
for val in tqdm.tqdm(photos_dict.values()):
url = val['src'][args.resolution]
fname = os.path.basename(val['src']['original'])
image_path = os.path.join(args.path, fname)
if not os.path.isfile(image_path): # ignore if already downloaded
response = requests.get(url, stream=True)
with open(image_path, 'wb') as outfile:
outfile.write(response.content)
else:
print(f"File exists: {image_path}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--query', type=str, required=True)
parser.add_argument('--path', type=str, default='./results_pexels')
parser.add_argument('--resolution', choices=['original', 'large2x', 'large',
'medium', 'small', 'portrait',
'landscape', 'tiny'], default='original')
parser.add_argument('--sleep', type=float, default=0.1)
args = parser.parse_args()
main(args)
sorry for bumping into the question. I just faced a similar situation when downloading the videos from Pexels using the python API, pexelsPy. This may be helpful:
I retrieved the ID of the videos and then created the downloading URL that has the following structure: "https://www.pexels.com/video/"+ ID +"/download".
See the following example:
def download_video(type_of_videos):
video_tag = random.choice(type_of_videos)
PEXELS_API = '-' #please add your API Key here
api = API(PEXELS_API)
retrieved_videos = read_already_download_files('downloaded_files.txt')
video_found_flag = True
num_page = 1
while video_found_flag:
api.search_videos(video_tag, page=num_page, results_per_page=10)
videos = api.get_videos()
for data in videos:
if data.width > data.height: #look for horizontal orientation videos
if data.url not in retrieved_videos:
# write_file('downloaded_files.txt', data.url)
url_video = 'https://www.pexels.com/video/' + str(data.id) + '/download' #create the url with the video id
r = requests.get(url_video)
with open(data.url.split('/')[-2]+'.mp4', 'wb') as outfile:
outfile.write(r.content)
return data.url.split('/')[-2]+'.mp4' #download the video
num_page += 1
download_video function takes an array of strings with several tags, e.g.: ['happy','sad','relax']. Then it randomly chooses one of these tags.
PEXELS_API should contain your API Key.
read_already_download_files('downloaded_files.txt'): Retrieves already downloaded files to check if the current found file is already downloaded.
from pypexels import PyPexels
import requests
api_key = 'api id'
# instantiate PyPexels object
py_pexel = PyPexels(api_key=api_key)
search_videos_page = py_pexel.videos_search(query="love", per_page=40)
# while True:
for video in search_videos_page.entries:
print(video.id, video.user.get('name'), video.url)
data_url = 'https://www.pexels.com/video/' + str(video.id) + '/download'
r = requests.get(data_url)
print(r.headers.get('content-type'))
with open('sample.mp4', 'wb') as outfile:
outfile.write(r.content)
# if not search_videos_page.has_next:
break
# search_videos_page = search_videos_page.get_next_page()
I just tried to do the same. When I was looking for it, I wanted a simple example. All other fancy stuff I was sure I could add myself. So, I built upon inou's answer. The shown example is very basic and requests one page with only 5 results using the 'Tiger' tag in the search query. I download the first video using its id provided by the response and simply write it to the source folder. The api is provided by pexelsPy and the request is executed using the standard requests package. To get access to the API, you need to create a key on pexels website (see here). Once you get your own API key, you should be able to simply substitute the shown example key and run the code as a test.
import pexelsPy
import requests
PEXELS_API = '16gv62567257256iu78krtuzwqsddudrtjberzabzwzjsrtgswnr'
api = pexelsPy.API(PEXELS_API)
api.search_videos('Tiger', page=1, results_per_page=5)
videos = api.get_videos()
url_video = 'https://www.pexels.com/video/' + str(videos[0].id) + '/download'
r = requests.get(url_video)
with open('test.mp4', 'wb') as outfile:
outfile.write(r.content)
You can download multiple videos with this code :
import pexelsPy
import requests
PEXELS_API = '-'
api = pexelsPy.API(PEXELS_API)
api.search_videos('nature', page=2, results_per_page=100, orientation='landscape')
videos = api.get_videos()
for i, video in enumerate(videos):
url_video = 'https://www.pexels.com/video/' + str(video.id) + '/download'
r = requests.get(url_video)
with open(f'test_{i}.mp4', 'wb') as outfile:
outfile.write(r.content)
This will download 100 videos, with each video being written to a separate file named test_0.mp4, test_1.mp4, ..., test_99.mp4.

Load LSUN dataset with tensorflow

recently I try to find the right way to read LSUN dataset which is in the form of lmdb. However, I do not find any useful information. I want to know how to read image data from lmdb and what the advantage is in that way. Thank you!
Finally, I use the following code to extract LUSN images from lmbd file.
import os
import lmdb
from PIL import Image
import tempfile
def _export_mdb_images(db_path, out_dir=None, flat=True, limit=-1, size=256):
out_dir = out_dir
env = lmdb.open(
db_path, map_size=1099511627776,
max_readers=1000, readonly=True
)
count = 0
with env.begin(write=False) as txn:
cursor = txn.cursor()
for key, val in cursor:
key = str(key, 'utf-8')
# decide image out directory
if not flat:
image_out_dir = os.path.join(out_dir, '/'.join(key[:6]))
else:
image_out_dir = out_dir
# create the directory if an image out directory doesn't exist
if not os.path.exists(image_out_dir):
os.makedirs(image_out_dir)
with tempfile.NamedTemporaryFile('wb') as temp:
temp.write(val)
temp.flush()
temp.seek(0)
image_out_path = os.path.join(image_out_dir, key + '.jpg')
Image.open(temp.name).resize((size, size)).save(image_out_path)
count += 1
if count == limit:
break
if count % 1000 == 0:
print('Finished', count, 'images')
print("start")
db_path = "path to lmbd"
out_dir = os.path.join(db_path, "data")
_export_mdb_images(db_path, out_dir)

Why can't I upload a glitched image to Tumblr with Python?

My goal is to have a program that downloads an image, glitches it and then uploads the glitched image. So far my code looks like this:
import urllib
import random
import pytumblr
from tumblr_keys import
from BeautifulSoup import BeautifulStoneSoup
# Authenticate via OAuth
client = pytumblr.TumblrRestClient(
consumer_key,
consumer_secret,
token_key,
token_secret
)
def download_an_image(image_url):
filename = image_url.split('/')[-1]
#filefinal = filename[:-4 ] + '.png'
urllib.urlretrieve(image_url, filename)
return filename
def get_random_start_and_end_points_in_file(file_data):
start_point = random.randint(2600, len(file_data))
end_point = start_point + random.randint(0, len(file_data) - start_point)
return start_point, end_point
def splice_a_chunk_in_a_file(file_data):
start_point, end_point = get_random_start_and_end_points_in_file(file_data)
section = file_data[start_point:end_point]
repeated = ''
for i in range(1, random.randint(1,2)):
repeated += section
new_start_point, new_end_point = get_random_start_and_end_points_in_file(file_data)
file_data = file_data[:new_start_point] + repeated + file_data[new_end_point:]
return file_data
def glitch_an_image(local_image):
file_handler = open(local_image, 'r')
file_data = file_handler.read()
file_handler.close()
for i in range(1, random.randint(0,2)):
file_data = splice_a_chunk_in_a_file(file_data)
file_handler = open(local_image, 'w')
file_handler.write(file_data)
file_handler.close
return local_image
if __name__ == '__main__':
image_url = "https://41.media.tumblr.com/179e82abf626f870cb0b8fe93919eb67/tumblr_o4t9wtxwO31vq0p00o1_1280.png"
local_image = download_an_image(image_url)
image_glitch_file = glitch_an_image(local_image)
client.create_photo('glitchingimages', state="published", tags=["glitch"], data= image_glitch_file)
To make sure the downloaded picture is always saved as a .png-file I tried running the second line in the "def download_an_image(image_url):" section. For some reason, Tumblr still would not let me upload the glitched image. I even tried uploading it and it gave me an error. But I was able to upload it to Flickr. Only if I export the .png-file as .png again, I can upload it to Tumblr.
Do you know a way to avoid exporting the image manually? Is there maybe a better way to make sure the downloaded image is save as a .png-file?
Thank you!

Categories