Storing images in list - python

I've been trying to store cropped images from as single frame to a list but i keep getting errors. Here's a snippet of my code
import cv2
import numpy as np
cap = cv2.VideoCapture('/home/wael-karkoub/Desktop/Research/Videos/test.mp4')
ret = True
cropped_images = []
while ret == True:
ret, frame = cap.read()
height, width, channels = frame.shape
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray,(5,5),0)
_ ,thresh = cv2.threshold(blur[0:height//2],10,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
im2, contours, hierarchy = cv2.findContours(thresh,1,2)
for cnt in contours:
x,y,w,h = cv2.boundingRect(cnt)
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
image = frame[y:y+h, x:x+w]
cv2.imshow('test',image)
time.sleep(2)
# cv2.imshow('Contours',frame)
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
cap.release()
cv2.destroyAllWindows()
Is this the correct way of doing it?
Output = enter image description here

If you want to store your cropped images to a list, you need some assignment going on. Just add cropped_images.append(image) somewhere in your code, most likely after image = frame[y:y+h, x:x+w]

Related

How to make counter for detected objects

I want to put a horizontal line in the centre of the screen and make counter of the people that are going up and going down. And how can i find out which side of the screen is the object coming from to make the right counter.
import cv2
import numpy as np
video = cv2.VideoCapture('videos/video.mp4')
bgsg = cv2.bgsegm.createBackgroundSubtractorMOG()
_,frame = video.read()
r = cv2.selectROI(frame)
lower_black = np.array([0, 0, 0], np.uint8)
upper_black = np.array([179, 100, 130], np.uint8)
while True:
_,frame = video.read()
im_cropped = frame[int(r[1]):int(r[1]+r[3]),
int(r[0]):int(r[0]+r[2])]
fgmask = bgsg.apply(im_cropped)
image = cv2.cvtColor(im_cropped,cv2.COLOR_BGR2HSV)
mask = cv2.inRange(image,lower_black,upper_black)
contours,hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if len(contours) != 0:
for contour in contours:
if cv2.contourArea(contour) > 400:
x,y,w,h = cv2.boundingRect(contour)
cv2.rectangle(im_cropped,(x,y),(x+w,y+h),(0,0,255),3)
cv2.imshow('frame',im_cropped)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()
I couldnt find any method except for put a dot in the center of the detected objects and calculate the distance of the object from the line.

Merge individual frame to video file using Opencv

I am trying to stack a individual frame to a video file using Opencv. I want to combine two different code together to make the individual frame.
Following code help me extract the individual frame,
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('file_data.mp4',fourcc,20 (1920,1080),False)
while True:
ret, frame=cap.read()
mask = object_detector.apply(frame)
_, mask = cv2.threshold(mask,254,255,cv2.THRESH_BINARY)
contours,_ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
res = cv2.bitwise_and(frame,frame,mask=mask)
for cnt in contours:
area = cv2.contourArea(cnt)
if area>1000:
#print("Area of contour:", area)
cv2.drawContours(frame, [cnt], -1, (0,255,0),2)
cv2.imwrite("file%d.jpg"%count, frame)
out.write(frame)
if cv2.waitKey(1) and 0xFF == ord('q'):
break
I tried storing the individual frame in array, but it didn't work. It doesn't show any error, but pc crash.
fps = 20,
,width = 1920
,height = 1080
Thanks to Rotem.
The issue has been solved using the VideoWriter from Opencv. The working code is given below.
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('file_data.mp4',fourcc,20,(1920,1080), True)
while True:
ret, frame=cap.read()
if ret == True:
# frame[frame<=thresholds]=0
mask = object_detector.apply(frame)
_, mask = cv2.threshold(mask,254,255,cv2.THRESH_BINARY)
contours,_ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
res = cv2.bitwise_and(frame,frame,mask=mask)
for cnt in contours:
area = cv2.contourArea(cnt)
if area>1000:
cv2.drawContours(frame, [cnt], -1, (0,255,0),2)
out.write(frame)
break
if cv2.waitKey(20) == ord('q'):
break

I need some help it's about a face recognition python opencv module

I'm trying to do a face recognition that can detect faces and for some weird reason my code is giving me error
the error :
line 13, in <module>
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.error: OpenCV(4.5.2) C:\Users\runneradmin\AppData\Local\Temp\pip-req-build-
m8us58q4\opencv\modules\imgproc\src\color.cpp:182: error: (-215:Assertion failed) !_src.empty() in
function 'cv::cvtColor'
the code itself:
import numpy as np
import cv2
faceCascade = cv2.CascadeClassifier('Cascades/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
cap.set(3,640) # set Width
cap.set(4,480) # set Height
while True:
ret , img = cap.read()
img = cv2.flip(img,-1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize = (20,20)
)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h,x:x+w]
roi_color = img[y:y+h,x:x+w]
cv2.imshow('video',img)
k = cv2.waitKey(30) & 0xff
if k == 27: # pressing esc in order to quit
break
cap.release()
cap.destroyAllWindows()
I would be very happy if some of the people who may read this will send me their solution by the way I'm very sorry about my indentation I'm just kinda new to stack overflow
It seems like your VideoCapture() did not grab frames.
Try to check the script on a saved video first (VideoCapture("vid_name.mp4")).
Try disabling the cv2.flip() and see if it helps, if yes, perhaps try move this flipping to after converting the source frame to gray.
Also it is a good idea to check the value of ret in ret, frame = cap.read() by using
if not ret:
print('no frame')
break
Then if the program exits with this error message, it means there's something wrong with the camera connection.
Modifying code like this worked for me. Putting the frame part outside the loop and then continuing:
import numpy as np
import cv2
faceCascade = cv2.CascadeClassifier('Cascades/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
cap.set(3,640) # set Width
cap.set(4,480) # set Height
ret , img = cap.read()
img = cv2.flip(img,-1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
while True:
if ret == False:
continue
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize = (20,20)
)
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h,x:x+w]
roi_color = img[y:y+h,x:x+w]
cv2.imshow('video',img)
k = cv2.waitKey(30) & 0xff
if k == 27: # pressing esc in order to quit
break
cap.release()
cap.destroyAllWindows()
With regard to the error message you posted: The issue is the the HaarCascade classifer did not properly load due to the syntax in the load command. I was able to get it to work by
making sure I hade the file: https://github.com/opencv/opencv/blob/master/data/haarcascades/haarcascade_frontalface_default.xml
placing it into a known directory and modifying the load command to match.
...
import numpy as np
import cv2
faceCascade = cv2.CascadeClassifier('c:\\Cascades\\haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
cap.set(3,640) # set Width
cap.set(4,480) # set Height
ret , img = cap.read()
img = cv2.flip(img,-1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
while True:
if ret == False:
continue
faces = faceCascade.detectMultiScale(gray,scaleFactor=1.2,minNeighbors=5,minSize = (20,20))
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h,x:x+w]
roi_color = img[y:y+h,x:x+w]
cv2.imshow('video',img)
k = cv2.waitKey(30) & 0xff
if k == 27: # pressing esc in order to quit
break
cap.release()
cap.destroyAllWindows()

applying multiple effect to opencv live stream

I want to write a script on raspberry with opencv and python that can apply effect to livestream video,
every thing is working but the problem is that i cant apply clahe to stream when it is in gray scale mode, when stream is not grayscale I can apply clahe but when i change it to grayscale clahe doesnt work.
here is my code:
import cv2
cap = cv2.VideoCapture(0)
gray_flag=True
cl_flag = False
def clahe():
global frame
clahe = cv2.createCLAHE(20, tileGridSize=(8,8))
lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
l2 = clahe.apply(l)
lab = cv2.merge((l2,a,b))
frame = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
while True:
was_pressed = False
ret, frame = cap.read()
gray=cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if c & 0xFF == ord('h'):
if not gray_flag:
gray_flag = True
else:
gray_flag = False
elif c & 0xFF == ord('d'):
if not cl_flag:
cl_flag = True
else:
cl_flag = False
if gray_flag:
if cl_flag:
clahe()
else:
pass
cv2.imshow('a', gray)
else:
if cl_flag:
clahe()
else:
pass
cv2.imshow('a', frame)
cap.release()
cv2.destroyAllWindows()
does anybody know how can I apply clahe in both grayscale and colored?
I think that is because they don't have the same amount of channels so you can't convert from grayscale to clahe

how to solve an error when converting RGB to Gray? (error: (-215) scn == 3 || scn == 4 in function cv::cvtColor)

I have a code that gets a video from a folder and does some calculations using contours and background subtraction. After that I am going to save that edited video into the folder. The code is shown below:
import numpy as np
import cv2
import time
# Capture video from file
cap = cv2.VideoCapture('test_video.mp4')
time.sleep(1)
fgbg = cv2.createBackgroundSubtractorMOG2()
j = 0
fourcc = cv2.VideoWriter_fourcc(*'MPEG')
out = cv2.VideoWriter('output.avi',fourcc, 20.0, (640,480))
while (cap.isOpened()):
ret, frame = cap.read()
if ret == True:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
fgmask = fgbg.apply(gray)
_, contours, _ = cv2.findContours(fgmask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if contours:
areas = []
for contour in contours:
ar = cv2.contourArea(contour)
areas.append(ar)
max_area = max(areas or [0])
max_area_index = areas.index(max_area)
cnt = contours[max_area_index]
M = cv2.moments(cnt)
x, y, w, h = cv2.boundingRect(cnt)
cv2.drawContours(fgmask, [cnt], 0, (255,255,255), 3, maxLevel = 0)
if h < w:
j += 1
if j>10:
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
if h > w:
j = 0
cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('video',frame)
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
This opens up a window and plays a video, then output.avi is created, but it doesn't contain any content.
cmd produces nothing. I just can't able to save a file in a proper way.
Please recommend a solution to this issue
The error is telling you that frame does not have 3 or 4 channels.
Can you check that your camera is initialized properly
if not cap.isOpened():
print("Camera not initialized")
return
It is returning you a valid frame
if not ret:
print("Problem reading frame")
return
else:
# Convert your frame to gray and find contours etc.

Categories