I'm trying to edit a Video and if use the "cv2.imshow" function i can see my edited frames,
but when i try to open the saved the Video i get a error "0xc10100be".
it seems like there is a problem with the saving of the edited frames and i think it has something to do with the size of the images. I tried changing the sizes, but to no avail.
Help would be appreciated, cheers.
import cv2
import numpy as np
video = cv2.VideoCapture(".......mp4")
result = cv2.VideoWriter('filename.mp4',
cv2.VideoWriter_fourcc(*'mp4v'),
15.0, (960, 450))
while(True):
ret, frame = video.read()
if ret == True:
width, height = 450, 960
pts1 = np.float32([[68, 755], [1908, 733], [63, 803], [1909, 787]])
pts2 = np.float32([[0, 0], [width, 0], [0, height], [width, height]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
Img = cv2.warpPerspective(frame, matrix, (width, height))
imgOutput = cv2.rotate(Img, cv2.ROTATE_90_COUNTERCLOCKWISE)
imgHSV = cv2.cvtColor(imgOutput, cv2.COLOR_BGR2HSV)
h_min = 19
h_max = 60
s_min = 73
s_max = 237
v_min = 20
v_max = 188
lower = np.array([h_min, s_min, v_min])
upper = np.array([h_max, s_max, v_max])
mask = cv2.inRange(imgHSV, lower, upper)
result.write(mask)
cv2.imshow('Output', mask)
if cv2.waitKey(1) & 0xFF == ord('s'):
break
# Break the loop
else:
break
video.release()
result.release()
cv2.destroyAllWindows()
print("The video was successfully saved")
so what I'm doing here is taking a frame of my Video, cropping it, put an HSV filter on and get a Black & White mask so that only the relevant stuff is visible in white.
and i'm tring to safe those Black and white frames into a Video.
Related
I want to put a horizontal line in the centre of the screen and make counter of the people that are going up and going down. And how can i find out which side of the screen is the object coming from to make the right counter.
import cv2
import numpy as np
video = cv2.VideoCapture('videos/video.mp4')
bgsg = cv2.bgsegm.createBackgroundSubtractorMOG()
_,frame = video.read()
r = cv2.selectROI(frame)
lower_black = np.array([0, 0, 0], np.uint8)
upper_black = np.array([179, 100, 130], np.uint8)
while True:
_,frame = video.read()
im_cropped = frame[int(r[1]):int(r[1]+r[3]),
int(r[0]):int(r[0]+r[2])]
fgmask = bgsg.apply(im_cropped)
image = cv2.cvtColor(im_cropped,cv2.COLOR_BGR2HSV)
mask = cv2.inRange(image,lower_black,upper_black)
contours,hierarchy = cv2.findContours(mask,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
if len(contours) != 0:
for contour in contours:
if cv2.contourArea(contour) > 400:
x,y,w,h = cv2.boundingRect(contour)
cv2.rectangle(im_cropped,(x,y),(x+w,y+h),(0,0,255),3)
cv2.imshow('frame',im_cropped)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video.release()
cv2.destroyAllWindows()
I couldnt find any method except for put a dot in the center of the detected objects and calculate the distance of the object from the line.
This is an example of what I will be seeing, I am trying to count the number of items that are blue in the video. In this example it would be 2, my shirt and the phone. How would I go about doing this?
Here is my code
import numpy as np import cv2
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
width = int(cap.get(3))
height = int(cap.get(4))
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_blue = np.array([90, 50, 50])
upper_blue = np.array([130, 255, 255])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
result = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow('frame', result)
cv2.imshow('mask', mask)
if cv2.waitKey(1) == ord('q'):
break
cap.release() cv2.destroyAllWindows()
I need real time video capture i've tried reducing resolution, setting a static fps and none worked why am i getting a slow video feed although it says my fps is 30 i don't really know where exactly is the problem it's really diving me mad.
Code:
import cv2
import os
import face_recognition
import pickle
from cv2.cv2 import CAP_DSHOW
known_faces_dir = "known_faces"
video = cv2.VideoCapture(0)
accuracy = 0.6
frame_thikness = 3
font_size = 2
MODEL = "cnn"
print("loading known faces")
known_faces = []
known_names = []
unknown_faces = []
for name in os.listdir(known_faces_dir):
for filename in os.listdir(f"{known_faces_dir}/{name}"):
image = face_recognition.load_image_file(f"{known_faces_dir}/{name}/{filename}")
encodings = face_recognition.face_encodings(image)[0]
# encodings = pickle.load(open(f"{name}/{filename}","rb"))
known_faces.append(encodings)
known_names.append(name)
print("treating unknow faces")
while True :
# print(filename)
# image = face_recognition.load_image_file(f"{unknown_faces_dir}/{filename}")
ret, image = video.read()
print(video.get(cv2.CAP_PROP_FPS))
locations = face_recognition.face_locations(image, model=MODEL)
encodings = face_recognition.face_encodings(image, locations)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
for face_location, face_encodings in zip(locations, encodings):
results = face_recognition.compare_faces(known_faces, face_encodings, tolerance=0.54)
if True in results:
match = known_names[results.index(True)]
print("Face Found" f"{match}")
top_l = (face_location[3], face_location[0])
bottom_r = (face_location[1], face_location[2])
color = [0, 255, 0]
cv2.rectangle(image, top_l, bottom_r, color, frame_thikness)
top_l = (face_location[3], face_location[2])
bottom_r = (face_location[1], face_location[2] + 22)
cv2.rectangle(image, top_l, bottom_r, color, cv2.FILLED)
cv2.putText(image, str(match), (face_location[3]+10, face_location[2]+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (200, 200, 200), 2)
cv2.imshow("", image)
if cv2.waitKey(1)&0xFF == ord("e"):
break
# cv2.waitKey(10200)
video.release()
cv2.destroyWindow(filename)
Try this and look at elapsed time to compute fps.
Then add other process.
Work with gray images from the beginning is a good idea.
if faces images are in color save it in gray and use only gray images.
Avoid the process repeats same things if not necessary.
import numpy as np
import cv2
import time
cap = cv2.VideoCapture(0)
start_time = time.time()
end_time = start_time
elapsed_time = 0
font = cv2.FONT_HERSHEY_SIMPLEX
org = (50, 50)
fontScale = 1
color = (255, 0, 0)
thickness = 2
while(True):
start_time = time.time()
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.putText(gray, str(1 / elapsed_time) + "fps", org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
end_time = time.time()
elapsed_time = end_time - start_time
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
It is highly likely that the inferencing of your model is taking more than 33ms (1000ms / 30 FPS) and thus limiting your FPS. Try to remove your face recognition model from the loop and see if it still is slow.
If that solves your problem, your CPU or GPU is the limiting factor depending on how you run the model.
I have implemented the code where i am capturing the image and saving that image, After that i have another code which adds jewelry to that captured image, But i am facing issue while adding jewelry to captured face error==> "face_landmarks = face_landmarks_list[0]
IndexError: list index out of range"
Can some one help me with the solutions.
image capturing code
import cv2
cam = cv2.VideoCapture(0)
cv2.namedWindow("test")
img_counter = 0
while True:
ret, frame = cam.read()
if not ret:
print("failed to grab frame")
break
cv2.imshow("test", frame)
k = cv2.waitKey(1)
if k%256 == 27:
# ESC pressed
print("Escape hit, closing...")
break
elif k%256 == 32:
# SPACE pressed
img_name = "opencv_frame_{}.png".format(img_counter)
cv2.imwrite(img_name, frame)
print("{} written!".format(img_name))
img_counter += 1
cam.release()
cv2.destroyAllWindows()
-------- below code for adding image --------
import cv2
import face_recognition
from PIL
import Image, ImageDraw
import numpy
jewel_img = cv2.imread("jewelery.png")
frame = cv2.imread('akash.jpg')
frame = cv2.resize(frame, (432, 576))
# Returns a list of face landmarks present on frame
face_landmarks_list = face_recognition.face_landmarks(frame)
# For demo images only one person is present in image
face_landmarks = face_landmarks_list[0]
shape_chin = face_landmarks['chin']
# x, y cordinates on frame where jewelery will be added
x = shape_chin[3][0]
y = shape_chin[6][1]
# Jewelry width & height calculated using face chin cordinates
img_width = abs(shape_chin[3][0] - shape_chin[14][0])
img_height = int(1.02 * img_width)
jewel_img = cv2.resize(jewel_img, (img_width, img_height), interpolation = cv2.INTER_AREA)
jewel_gray = cv2.cvtColor(jewel_img, cv2.COLOR_BGR2GRAY)
# All pixels greater than 230 will be converted to white and others will be converted to black
thresh, jewel_mask = cv2.threshold(jewel_gray, 230, 255, cv2.THRESH_BINARY)
# Convert to black the background of jewelry image
jewel_img[jewel_mask == 255] = 0
# Crop out jewelry area from original frame
jewel_area = frame[y: y + img_height, x: x + img_width]
# bitwise_and will convert all black regions in any image to black in resulting image
masked_jewel_area = cv2.bitwise_and(jewel_area, jewel_area, mask = jewel_mask)
# add both images so that the black region in any image will result in another image non black regions being rendered over that area
final_jewel = cv2.add(masked_jewel_area, jewel_img)
# replace original frame jewel area with newly created jewel_area
frame[y: y + img_height, x: x + img_width] = final_jewel
# convert image to RGB format to read it in pillow library
rgb_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
pil_img = Image.fromarray(rgb_img)
draw = ImageDraw.Draw(pil_img, 'RGBA')
draw.polygon(face_landmarks['left_eyebrow'], fill = (23, 26, 31, 100))
draw.polygon(face_landmarks['right_eyebrow'], fill = (23, 26, 31, 100))
draw.polygon(face_landmarks['top_lip'], fill = (158, 63, 136, 100))
draw.polygon(face_landmarks['bottom_lip'], fill = (158, 63, 136, 100))
draw.polygon(face_landmarks['left_eye'], fill = (23, 26, 31, 100))
draw.polygon(face_landmarks['right_eye'], fill = (23, 26, 31, 100))
# calculate x, y, radius
for ellipse to be drawn between two eyebrows
x_centre_eyebrow = face_landmarks['nose_bridge'][0][0]
y_centre_eyebrow = face_landmarks['left_eyebrow'][4][1]
r = int(1 / 4 * abs(face_landmarks['left_eyebrow'][4][0] - face_landmarks['right_eyebrow'][0][0]))
draw.ellipse((x_centre_eyebrow - r, y_centre_eyebrow - r, x_centre_eyebrow + r, y_centre_eyebrow + r), fill = (128, 0, 128, 100))
pil_img.show()
Error says the answer
face_landmarks = face_landmarks_list[0]
You need to check whether a single face is detected or not.
Your second code, should start by checking the stored list length.
If the length is greater than 0, meaning some faces are detected, then continue.
# Returns a list of face landmarks present on frame
face_landmarks_list = face_recognition.face_landmarks(frame)
if len(face_landmarks_list) > 0:
jewel_img = cv2.imread("jewelery.png")
frame = cv2.imread('akash.jpg')
frame = cv2.resize(frame, (432, 576))
# For demo images only one person is present in image
face_landmarks = face_landmarks_list[0]
I'mm writing this piece of python to display a stream of video from my webcam while at the same time record the video - which I've got working, however I've grayscaled the video streaming to my screen and time stamped it - but my recorded video is in colour! I've included the code below - I've tried using some global variables but nothing worked - any help, greatly appreciated
import cv2
import numpy as np
import time, datetime
import os
genericfilename = "recording"
filetime = str(time.time())
extension = '.avi'
filename = genericfilename + filetime +extension
frames_per_second = 100
res = '720p'
print("NEW FILE NAME: " + filename)
# Set resolution for the video capture
def change_res(cap, width, height):
cap.set(3, width)
cap.set(4, height)
# Standard Video Dimensions Sizes
STD_DIMENSIONS = {
"480p": (640, 480),
"720p": (1280, 720),
"1080p": (1920, 1080),
"4k": (3840, 2160),
}
# grab resolution dimensions and set video capture to it.
def get_dims(cap, res='1080p'):
width, height = STD_DIMENSIONS["480p"]
if res in STD_DIMENSIONS:
width,height = STD_DIMENSIONS[res]
## change the current caputre device
## to the resulting resolution
change_res(cap, width, height)
return width, height
# Video Encoding, might require additional installs
VIDEO_TYPE = {
'avi': cv2.VideoWriter_fourcc(*'XVID'),
#'mp4': cv2.VideoWriter_fourcc(*'H264'),
'mp4': cv2.VideoWriter_fourcc(*'XVID'),
}
def get_video_type(filename):
filename, ext = os.path.splitext(filename)
if ext in VIDEO_TYPE:
return VIDEO_TYPE[ext]
return VIDEO_TYPE['avi']
capture = cv2.VideoCapture(0)
out = cv2.VideoWriter(filename, get_video_type(filename), 60,
get_dims(capture, res))
while(True):
ret, frame = capture.read()
out.write(frame)
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
font = cv2.FONT_ITALIC = 1
cv2.putText(grayFrame, str(datetime.datetime.now()), (-330, 460), font, 3,
(200, 200, 200), 2, cv2.LINE_AA)
cv2.imshow('combilift output', grayFrame)
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if cv2.waitKey(1) & 0xFF == ord('r'):
print(datetime.datetime.now())
capture.release()
out.release()
cv2.destroyAllWindows()
You save the frame to video, then convert frame to gray.
out.write(frame)
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
If you want your recorded video to be gray, maybe reverse the order of operations and save grayFrame?
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
out.write(grayFrame)
If you want to also save the texts, put the text before writing frame to output.
Lets take a look at ur code
out = cv2.VideoWriter(filename, get_video_type(filename), 60,
.....
while(True):
ret, frame = capture.read()
out.write(frame)
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
You first save out then convert color
The correct sequence should be
out = cv2.VideoWriter(filename, get_video_type(filename), 60,
.....
while(True):
ret, frame = capture.read()
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
out.write(grayFrame)
I don't have data to test. Just in case you experience some issue with channels. You can use opencv merge(grayFrame,grayFrame,grayFrame) to create a normal 3 channel grey scale image and save to video