Eigenfaces training image pixel size error - python

Hello to all senior programmer! I have an error on eigenfaces image training part.
The error is : OpenCV Error: Unsupported format or combination of formats (In the Eigenfaces method all input samples (training images) must be of equal size! Expected 27889 pixels, but was 27556 pixels.) in cv::face::Eigenfaces::train, file C:\projects\opencv-python\opencv_contrib\modules\face\src\eigen_faces.cpp, line 68
Which mean my pictures don't be in equal size. I try cv2.rezise() when I capture picture from camera but it still doesn't work.
here is my capture code :
import cv2
cam = cv2.VideoCapture(0)
detector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
Id = input('enter your id: ')
sampleNum = 0
while(True):
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
sampleNum = sampleNum+1
cv2.imwrite("dataSet/user."+Id+'.'+str(sampleNum)+".jpg",cv2.resize
(gray[y:y+h,x:x+w],(70,70)))
cv2.imshow('frame',img)
if cv2.waitKey(100) & 0xFF == ord('q'):#waitKey is for delay in video capture
break
elif sampleNum >= 50:#how many picture capture?
break
cam.release()
cv2.destroyAllWindows()
and here is training part:
import cv2,os
import numpy as np
recognizer = cv2.face.EigenFaceRecognizer_create()
detector= cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
def getImagesAndLabels(path):
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
Ids=[]
for imagePath in imagePaths:
pilImage = Image.open(imagePath).convert('L')
imageNp = np.array(pilImage,'uint8')
Id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(imageNp)
for (x,y,w,h) in faces:
faceSamples.append(imageNp[y:y+h,x:x+w])
Ids.append(Id)
return faceSamples,Ids
faces,Ids = getImagesAndLabels('dataSet')
recognizer.train(faces, np.array(Ids))
recognizer.write('trainner/trainnerEi.yml')
PS. I adapt this code from LBPHFaceRecognizer
Thank you!*3

Okay, so EigenFaces only works if the dimension of all the images is same in pixel space
Which means if one image used in training is of size 28x28 then every other image in the training as well as in testing has to be of size 28x28
If the image size is not same then opencv will throw you that error
The error simply says that one of the image was of 27889 dimensions in pixel space and other was 27556 dimensions pixel space.
I would recommend you to use cv2.resize() function to make all images of the same size
Use the below code as a reference for you training part:
import cv2,os
import numpy as np
recognizer = cv2.face.EigenFaceRecognizer_create()
detector= cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
def getImagesAndLabels(path):
width_d, height_d = 280, 280 # Declare your own width and height
imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
faceSamples=[]
Ids=[]
for imagePath in imagePaths:
pilImage = Image.open(imagePath).convert('L')
imageNp = np.array(pilImage,'uint8')
Id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(imageNp)
for (x,y,w,h) in faces:
########################################
# The line to be changed by cv2.resize()
########################################
faceSamples.append(cv2.resize(imageNp[y:y+h,x:x+w], (width_d, height_d))
Ids.append(Id)
return faceSamples,Ids
faces,Ids = getImagesAndLabels('dataSet')
recognizer.train(faces, np.array(Ids))
recognizer.write('trainner/trainnerEi.yml')
Keep in mind even the test images has to be of same size

Related

Increase brightness of tiff file with python

I am working on a python script to get images from a microscope camera and then combine them to have an avi file.(15fps for 3sec).
I wrote a program but the final result is too dark.
I am looking for a solution to make it brighter.
When I add enhancer I get this error
ValueError: image has wrong mode
The camera gives 45 dat files .I converted them to tiff and then concatenated then to have an avi file.
The camera is Andor Neo 5.5 sCMOS
Here is a part of my code:
with tifffile.TiffWriter('datfiles.tif', bigtiff=False) as tif:
for datfile in datfiles:
data = numpy.fromfile(
datfile,
count=width * height * 3 // 2, # 12 bit packed
offset=4, # 4 byte integer header
dtype=numpy.uint8,
).astype(numpy.uint16)
image = numpy.zeros(width * height, numpy.uint16)
image[0::2] = (data[1::3] & 15) | (data[0::3] << 4)
image[1::2] = (data[1::3] >> 4) | (data[2::3] << 4)
image.shape = height, width
#img = increase_brightness(image)
tif.write(
image, photometric='minisblack', compression=None,
metadata=None)
img = Image.open('datfiles.tif')
enhancer = ImageEnhance.Brightness(img)
img=enhancer.enhance(4)
for i in range(45):
try:
img.seek(i)
img.save('page_%s.tif'%(i,))
except EOFError:
break
os.remove("datfiles.tif")
'''
------------------------------------------
Concatenate the tif files ----> .avi of 3 sec
'''
image_folder =os.getcwd() #to be modified
print(image_folder) #testing
video_name = "ConcatenatedVideo.avi"
images = [img for img in os.listdir(image_folder) if img.endswith(".tif")]
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, 0, 15, (width,height))
for image in images:
video.write(cv2.imread(os.path.join(image_folder, image)))
cv2.destroyAllWindows()
video.release()
for f in os.listdir(image_folder): #delete allthe tiff files
if(f.endswith(".tif")):
os.remove(os.path.join(image_folder, f))
print('finished')
os.chdir(path)

is there a frame size limitation in opencv VideoWriter?

When using opencv to save a bunch of pictures to a video file the file won't open unless I resize the image:
(Windows error - "This item is in a format we don't support. 0xc00d36b4")
My code looks like this:
import cv2
import os
input_path = "input_images_folder_path"
imgs_lst = os.listdir(r"C:\Users\...\input_path")
outvid_path = r"C:\Users\...\output.avi"
image0 = input_path +"\\"+ imgs_lst[0]
img0 = cv2.imread(image)
size = (int(img0.shape[1]), int(img0.shape[0]))
fps = 12.0
is_color = True
fourcc = cv2.VideoWriter_fourcc(*"XVID")
vid = cv2.VideoWriter(outvid_path, fourcc, 10.0, size, True)
for i in range(0,50):# int(len(imgs_lst))):
image = input_path +"\\"+ imgs_lst[i]
img = cv2.imread(image,1)
img = cv2.resize(img, size) #tried to comment this out... wont work either
cv2.imshow("img", img)
cv2.waitKey(1)
vid.write(img)
vid.release()
if I resize -
size = (int(img0.shape[1]/2), int(img0.shape[0]/2))
all works well.
My question is whether or not there is a limit on the frame size. I found some other answers regarding the output file size limitation, but nothing about the single frame shape.
(changing fps, format didn't work either)

OpenCV Panorama: TypeError: cannot unpack non-iterable NoneType object

I'm trying to make a panorama from a image set. I'm using Spyder, OpenCV 3.4 and Python 3.7. Here's the code:
The main:
from stitches import Stitcher
#from PIL import Image
import os
import glob
import numpy as np
import imutils
import cv2
cap = cv2.VideoCapture('C:/Users/VID_20181208_111037881.mp4')
img_dir = "C:/Users/user/images"
data_path = os.path.join(img_dir, '*g')
files = glob.glob(data_path)
args = []
for f1 in files:
img = cv2.imread(f1)
args.append(img)
def retImg(img):
return img
for i in args:
j = i+1
frame = retImg(i)
frame2 = retImg(j)
#imageA = cv2.imread(frame)
#imageB = cv2.imread(frame2)
imageA = imutils.resize(frame, width=400)
imageB = imutils.resize(frame2, width=400)
# stitch the images together to create a panorama
stitcher = Stitcher()
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=False)
frame = result
# show the images
cv2.imshow("Keypoint Matches", vis)
cv2.imshow("Result", result)
cv2.waitKey(0)
The stitch inside stitches.py:
def stitch(self, images, ratio=0.75, reprojThresh=4.0, showMatches=False):
# unpack the images, then detect keypoints and extract
# local invariant descriptors from them
(imageB, imageA) = images
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)
# match features between the two images
M = self.matchKeypoints(kpsA, kpsB, featuresA, featuresB, ratio, reprojThresh)
# if the match is None, then there aren't enough matched
# keypoints to create a panorama
if M is None:
return None
# together
(matches, H, status) = M
result = cv2.warpPerspective(imageA, H, (imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
# check to see if the keypoint matches should be visualized
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches, status)
# return a tuple of the stitched image and the
# visualization
return (result, vis)
# return the stitched image
return result
When I compile it, the following error is shown:
File "C:/Users/user/panorama.py", line 44, in <module>
(result, vis) = stitcher.stitch([imageA, imageB], showMatches=False)
TypeError: cannot unpack non-iterable NoneType object
And I cannot understand why. I'm new to Python, so maybe this mistake is really simple, but I don't know what to do here. Thanks by advance!

Passing an image as an argument to a function in python

How can I create a function that takes an image file (not image filename) in python. Simply, like the following:
FaceController.py
import cv2
from Computer_Vision import Face_Detector as FD
def detectface():
img = cv2.imread('DSC_1902.JPG')
FD.detect(img)
detectface()
Face_Detector.py
import cv2
def detect(img):
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
cv2.namedWindow('img',cv2.WINDOW_NORMAL)
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imwrite('messigray.png', img)
return img
Error:
OpenCV Error: Assertion failed (!empty()) in cv::CascadeClassifier::detectMultiScale, file C:\projects\opencv-python\opencv\modules\objdetect\src\cascadedetect.cpp, line 1698
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
cv2.error: C:\projects\opencv-python\opencv\modules\objdetect\src\cascadedetect.cpp:1698: error: (-215) !empty() in function cv::CascadeClassifier::detectMultiScale
You can pass a pointer pointing to the image instead of the image or the filename of the image
EDIT
def image_function(imagePointer):
#DO SOMETHING WITH THE IMAGE
#HERE IS THE IMAGE POINTER
image = open('your_image.png')
#CALLING THE FUNCTION
image_function(image)
Sorry, I don't know opencv so I can not help in your code :(
You can actually pass the image as a tensor.
with cv2.imread() and torch.
Which is easy, useful.
short answer:
load with cv2.imread()
transform to tensor with
img = torch.Tensor(img)/255.
That works for my application.
Yours might be a little different.
Code answer:
from Computer_Vision import Face_Detector as FD
def detectface():
import cv2
import torch
folder = r"This Folder/"
image_file = folder+"image.png"
# or
# file = r"image.png"
# image_file = os.path.join(folder, file)
img = imread(image_file)
img = torch.Tensor(img)/255. # THE KEY LINE HERE.
FDdetect(img):
"""Do stuff with object detection..."""
result =
return result

AttributeError: 'module' object has no attribute 'io' in caffe

I am trying to do a gender recognition program, below is the code..
import caffe
import os
import numpy as np
import sys
import cv2
import time
#Models root folder
models_path = "./models"
#Loading the mean image
mean_filename=os.path.join(models_path,'./mean.binaryproto')
proto_data = open(mean_filename, "rb").read()
a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
mean_image = caffe.io.blobproto_to_array(a)[0]
#Loading the gender network
gender_net_pretrained=os.path.join(models_path,
'./gender_net.caffemodel')
gender_net_model_file=os.path.join(models_path,
'./deploy_gender.prototxt')
gender_net = caffe.Classifier(gender_net_model_file, gender_net_pretrained)
#Reshaping mean input image
mean_image = np.transpose(mean_image,(2,1,0))
#Gender labels
gender_list=['Male','Female']
#cv2 Haar Face detector
face_cascade=cv2.CascadeClassifier(os.path.join
(models_path,'haarcascade_frontalface_default.xml'))
#Getting prediction from live camera
cap = cv2.VideoCapture(0)
while True:
ret,frame = cap.read()
if ret is True:
start_time = time.time()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = face_cascade.detectMultiScale(frame_gray, 1.3, 5)
#Finding the largest face
if len(rects) >= 1:
rect_area = [rects[i][2]*rects[i][3] for i in xrange(len(rects))]
rect = rects[np.argmax(rect_area)]
x,y,w,h = rect
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_color = frame[y:y+h, x:x+w]
#Resizing the face image
crop = cv2.resize(roi_color, (256,256))
#Subtraction from mean file
#input_image = crop -mean_image
input_image = rect
#Getting the prediction
start_prediction = time.time()
prediction = gender_net.predict([input_image])
gender = gender_list[prediction[0].argmax()]
print("Time taken by DeepNet model: {}").format(time.time()-start_prediction)
print prediction,gender
cv2.putText(frame,gender,(x,y), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2)
print("Total Time taken to process: {}").format(time.time()-start_time)
#Showing output
cv2.imshow("Gender Detection",frame)
cv2.waitKey(1)
#Delete objects
cap.release()
cv2.killAllWindows()
When I am running the I am getting an error:
a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
AttributeError: 'module' object has no attribute 'io'
How Can I solve it. I am using cnn_gender_age_prediction model. I want to make a real time gender recognition program using python and cnn_gender_age model.
io is a module in caffe package. Basically when you type import caffe, it will not automatically try to import all modules in caffe package including io. There are two solutions.
First one: import caffe.io manually
import caffe
import caffe.io
Second one: update to the latest caffe version, in which you should find a line in __init__.py under python/caffe directory:
from . import io

Categories