applying multiple effect to opencv live stream - python

I want to write a script on raspberry with opencv and python that can apply effect to livestream video,
every thing is working but the problem is that i cant apply clahe to stream when it is in gray scale mode, when stream is not grayscale I can apply clahe but when i change it to grayscale clahe doesnt work.
here is my code:
import cv2
cap = cv2.VideoCapture(0)
gray_flag=True
cl_flag = False
def clahe():
global frame
clahe = cv2.createCLAHE(20, tileGridSize=(8,8))
lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
l2 = clahe.apply(l)
lab = cv2.merge((l2,a,b))
frame = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
while True:
was_pressed = False
ret, frame = cap.read()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if c & 0xFF == ord('h'):
if not gray_flag:
gray_flag = True
else:
gray_flag = False
elif c & 0xFF == ord('d'):
if not cl_flag:
cl_flag = True
else:
cl_flag = False
if gray_flag:
if cl_flag:
clahe()
else:
pass
cv2.imshow('a', gray)
else:
if cl_flag:
clahe()
else:
pass
cv2.imshow('a', frame)
cap.release()
cv2.destroyAllWindows()
does anybody know how can I apply clahe in both grayscale and colored?

I think that is because they don't have the same amount of channels so you can't convert from grayscale to clahe

Related

I need some help it's about a face recognition python opencv module

I'm trying to do a face recognition that can detect faces and for some weird reason my code is giving me error
the error :
line 13, in <module>
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.error: OpenCV(4.5.2) C:\Users\runneradmin\AppData\Local\Temp\pip-req-build-
m8us58q4\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in
function 'cv::cvtColor'
the code itself:
import numpy as np
import cv2
faceCascade = cv2.CascadeClassifier('Cascades/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
cap.set(3,640) # set Width
cap.set(4,480) # set Height
while True:
ret , img = cap.read()
img = cv2.flip(img,-1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize = (20,20)
)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h,x:x+w]
roi_color = img[y:y+h,x:x+w]
cv2.imshow('video',img)
k = cv2.waitKey(30) & 0xff
if k == 27: # pressing esc in order to quit
break
cap.release()
cap.destroyAllWindows()
I would be very happy if some of the people who may read this will send me their solution by the way I'm very sorry about my indentation I'm just kinda new to stack overflow
It seems like your VideoCapture() did not grab frames.
Try to check the script on a saved video first (VideoCapture("vid_name.mp4")).
Try disabling the cv2.flip() and see if it helps, if yes, perhaps try move this flipping to after converting the source frame to gray.
Also it is a good idea to check the value of ret in ret, frame = cap.read() by using
if not ret:
print('no frame')
break
Then if the program exits with this error message, it means there's something wrong with the camera connection.
Modifying code like this worked for me. Putting the frame part outside the loop and then continuing:
import numpy as np
import cv2
faceCascade = cv2.CascadeClassifier('Cascades/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
cap.set(3,640) # set Width
cap.set(4,480) # set Height
ret , img = cap.read()
img = cv2.flip(img,-1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
while True:
if ret == False:
continue
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize = (20,20)
)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h,x:x+w]
roi_color = img[y:y+h,x:x+w]
cv2.imshow('video',img)
k = cv2.waitKey(30) & 0xff
if k == 27: # pressing esc in order to quit
break
cap.release()
cap.destroyAllWindows()
With regard to the error message you posted: The issue is the the HaarCascade classifer did not properly load due to the syntax in the load command. I was able to get it to work by
making sure I hade the file: https://github.com/opencv/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
placing it into a known directory and modifying the load command to match.
...
import numpy as np
import cv2
faceCascade = cv2.CascadeClassifier('c:\\Cascades\\haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
cap.set(3,640) # set Width
cap.set(4,480) # set Height
ret , img = cap.read()
img = cv2.flip(img,-1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
while True:
if ret == False:
continue
faces = faceCascade.detectMultiScale(gray,scaleFactor=1.2,minNeighbors=5,minSize = (20,20))
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h,x:x+w]
roi_color = img[y:y+h,x:x+w]
cv2.imshow('video',img)
k = cv2.waitKey(30) & 0xff
if k == 27: # pressing esc in order to quit
break
cap.release()
cap.destroyAllWindows()

OpenCV ret=False, is it reading the video, or not?

I want to detect edges of a video file (640x640 9sn.) and save the result. I followed OpenCV documents and some other examples. Most of the examples that I found was reading from camera.
Here is my code. I checked cap.isOpened(), it returns True but ret does False and frame is NoneType object. What is confusing is that I'm having the gray array which depends the condition if ret == True. How can I get gray matrix if ret = False?
(I installed ffmpeg pip install ffmpeg-python)
(andy.avi was saved in folder but it's broke, empty)
import cv2
import numpy as np
cap = cv2.VideoCapture("...\\video.mp4")
while(cap.isOpened()):
ret, frame = cap.read()
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
size = (frame_width, frame_height)
result = cv2.VideoWriter('andy.avi',
cv2.VideoWriter_fourcc(*'DIVX'),
30, size)
if ret == True:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 45, 90)
result.write(edges)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
result.release()
cv2.destroyAllWindows()
your code should be changed like this
# importing the module
import cv2
import numpy as np
# reading the vedio
source = cv2.VideoCapture("...\\video.mp4")
# We need to set resolutions.
# so, convert them from float to integer.
frame_width = int(source.get(3))
frame_height = int(source.get(4))
size = (frame_width, frame_height)
result = cv2.VideoWriter('andy.avi',
cv2.VideoWriter_fourcc(*'DIVX'),
30, size, 0)
# running the loop
while True:
# extracting the frames
ret, img = source.read()
# converting to gray-scale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 45, 90)
# write to gray-scale
result.write(edges)
# displaying the video
cv2.imshow("Live", gray)
# exiting the loop
key = cv2.waitKey(1)
if key == ord("q"):
break
# closing the window
result.release()
source.release()
cv2.destroyAllWindows()
If helpful this for you give 👍

how to solve an error when converting RGB to Gray? (error: (-215) scn == 3 || scn == 4 in function cv::cvtColor)

I have a code that gets a video from a folder and does some calculations using contours and background subtraction. After that I am going to save that edited video into the folder. The code is shown below:
import numpy as np
import cv2
import time
# Capture video from file
cap = cv2.VideoCapture('test_video.mp4')
time.sleep(1)
fgbg = cv2.createBackgroundSubtractorMOG2()
j = 0
fourcc = cv2.VideoWriter_fourcc(*'MPEG')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
while (cap.isOpened()):
ret, frame = cap.read()
if ret == True:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
_, contours, _ = cv2.findContours(fgmask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if contours:
areas = []
for contour in contours:
ar = cv2.contourArea(contour)
areas.append(ar)
max_area = max(areas or [0])
max_area_index = areas.index(max_area)
cnt = contours[max_area_index]
M = cv2.moments(cnt)
x, y, w, h = cv2.boundingRect(cnt)
cv2.drawContours(fgmask, [cnt], 0, (255,255,255), 3, maxLevel = 0)
if h < w:
j += 1
if j>10:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
if h > w:
j = 0
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('video',frame)
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
This opens up a window and plays a video, then output.avi is created, but it doesn't contain any content.
cmd produces nothing. I just can't able to save a file in a proper way.
Please recommend a solution to this issue
The error is telling you that frame does not have 3 or 4 channels.
Can you check that your camera is initialized properly
if not cap.isOpened():
print("Camera not initialized")
return
It is returning you a valid frame
if not ret:
print("Problem reading frame")
return
else:
# Convert your frame to gray and find contours etc.

Storing images in list

I've been trying to store cropped images from as single frame to a list but i keep getting errors. Here's a snippet of my code
import cv2
import numpy as np
cap = cv2.VideoCapture('/home/wael-karkoub/Desktop/Research/Videos/test.mp4')
ret = True
cropped_images = []
while ret == True:
ret, frame = cap.read()
height, width, channels = frame.shape
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
_ ,thresh = cv2.threshold(blur[0:height//2],10,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
im2, contours, hierarchy = cv2.findContours(thresh,1,2)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
image = frame[y:y+h, x:x+w]
cv2.imshow('test',image)
time.sleep(2)
# cv2.imshow('Contours',frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
cap.release()
cv2.destroyAllWindows()
Is this the correct way of doing it?
Output = enter image description here
If you want to store your cropped images to a list, you need some assignment going on. Just add cropped_images.append(image) somewhere in your code, most likely after image = frame[y:y+h, x:x+w]

displaying the camera feed in grayscale in python with opencv

i've been trying to display the camera feed from my laptops web cam in grayscale and i've done it using the following code:
import cv2
import numpy as np
clicked = False
def onMouse(event, x, y, flags, param):
global clicked
if event == cv2.cv.CV_EVENT_LBUTTONUP:
clicked = True
cv2.namedWindow('image capture', cv2.WINDOW_NORMAL)
cv2.setMouseCallback('image capture', onMouse)
#initialize the camera object with VideoCapture
camera = cv2.VideoCapture(0)
sucess, frame = camera.read()
cv2.imwrite('snapshot.png', frame)
gray = cv2.imread('snapshot.png', cv2.IMREAD_GRAYSCALE)
while sucess and cv2.waitKey(1) == -1 and not clicked:
cv2.imwrite('snapshot.png', frame)
gray = cv2.imread('snapshot.png', cv2.IMREAD_GRAYSCALE)
cv2.imshow('image capture', gray)
sucess, frame = camera.read()
cv2.imwrite('snapshot.png', frame)
print 'photo taken press any key to exit'
cv2.waitKey()
cv2.destroyAllWindows()
Here what i've done is saved the frame in 'snapshot.png' and again reloaded it in grayscale and display that grayscale image. Is there any method to directly read the camera frame in grayscale rather than going through all this mess. Thanks in advance.
wow, what a mess ;)
you simply want:
gray = cv2.cvtColor( img, cv2.COLOR_BGR2GRAY )
In the latest version of opencv, the cvtColor expects it's scr to be not None and therefore gives 215-assertion error.
This is basically like a scenario where you have to use a catch block and try to handle exceptions.
Code to overcome this problem:
while True:
ret, frame = cap.read()
if frame.any():
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)

Categories