I have a mp4/avi videos with duration 10 minutes and FPS 30. I want to reduce duration to 5 mins but FPS still 30. It means that the new videos will drop a half of frame (for example, f0 f2 f4 compare with original video f0 f1 f2 f3 f4). How can I do it on opencv? This is current code to get duration and FPS of the video.
# import module
import cv2
import datetime
# create video capture object
data = cv2.VideoCapture('C:/Users/Asus/Documents/videoDuration.mp4')
# count the number of frames
frames = data.get(cv2.CAP_PROP_FRAME_COUNT)
fps = data.get(cv2.CAP_PROP_FPS)
# calculate duration of the video
seconds = round(frames / fps)
video_time = datetime.timedelta(seconds=seconds)
print(f"duration in seconds: {seconds}")
print(f"video time: {video_time}")
This code will read the input video file, frame by frame, and write every other frame to the output video file. As a result, the output video will have half the number of frames as the input video and therefore, half the duration.
import cv2
# Open the input video file
cap = cv2.VideoCapture("input.mp4")
# Check if the video is opened successfully
if not cap.isOpened():
print("Error opening video file")
# Read the video's width, height, and frame rate
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
# Create the output video file
out = cv2.VideoWriter("output.mp4", cv2.VideoWriter_fourcc(*"mp4v"), fps, (width, height))
# Read the frames from the input video and write them to the output video,
# skipping every other frame
while True:
ret, frame = cap.read()
if not ret:
break
cap.grab()
out.write(frame)
# Release the video capture and video write objects
cap.release()
out.release()
Read frames from the capture, keeping track of how many you've read, and write only every Nth frame, like so:
from itertools import count
import cv2
in_video = cv2.VideoCapture("example.mp4")
frames = int(in_video.get(cv2.CAP_PROP_FRAME_COUNT))
fps = in_video.get(cv2.CAP_PROP_FPS)
w = int(in_video.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(in_video.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(f"{frames=}, {fps=}, {w=}, {h=}")
out_video = cv2.VideoWriter("out.mp4", cv2.VideoWriter_fourcc(*"mp4v"), fps, (w, h))
frames_written = 0
every_nth = 2
for frame_num in count(0):
ret, frame = in_video.read()
if not ret: # out of frames
break
if frame_num % every_nth == 0:
out_video.write(frame)
frames_written += 1
print(f"{frames_written=}")
Related
I have this function that records a video using a webcam:
def capture_video(webcam_video):
video_capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)
output = cv2.VideoWriter(webcam_video,
cv2.VideoWriter_fourcc(*'mp4v'), 29.97, (640, 480))
frames_counter = 300
while(frames_counter > 0):
# Capture each frame of webcam video
# frameRead - bool indicting if the frame was read correctly
frameRead, frame = video_capture.read()
output.write(frame)
frames_counter -= 1
# close the already opened camera
video_capture.release()
# close the already opened file
output.release()
# close the window and de-allocate any associated memory usage
cv2.destroyAllWindows()
This produces a video that is 10 seconds long, however it takes 20 seconds to record it.
Why is this the case?
How would I be able to get the recording time to match the length of the video that it produces?
I'm capturing video from my video file using OpenCV on linux. It works fine but when I try to play my captured video it plays too fast. i.e. I capture from video for 10 seconds but when I play on the video is 8 seconds.
Video capture function
def save_frames(video_file, path_in):
cap = cv2.VideoCapture(video_file)
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = cap.get(cv2.CAP_PROP_FPS)
count = 1
while cap.isOpened():
ret, frame = cap.read()
cv2.imwrite(path_in + "frame{}.jpg".format(str(count).zfill(5)), frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
count += 1
cap.release()
cv2.destroyAllWindows()
return fps
Video writer function
def make_video(path_out, outvid, fps=25, size=None, is_color=True,format='mp4v'):
images = [f for f in os.listdir(path_out) if ".png" in f]
from cv2 import VideoWriter, VideoWriter_fourcc, imread, resize
fourcc = VideoWriter_fourcc(*'mp4v')
vid = None
count = 0
for image in images:
if not os.path.exists(path_out + image):
raise FileNotFoundError(image)
img = imread(path_out + image)
if vid is None:
if size is None:
size = img.shape[1], img.shape[0]
vid = VideoWriter(outvid, fourcc, float(fps), size, is_color)
if size[0] != img.shape[1] and size[1] != img.shape[0]:
img = resize(img, size)
vid.write(img)
if count % 100 == 0:
print("Progress: %0.2f%%" % (count / len(images) * 100,), flush=True)
vid.release()
return
I tried different fps (30,25,15,10,5) .These fps didn't works me and for all fps 600 frame captures on 10 sec video .The cv2.CAP_PROP_FPS default capture 30 fps. When I try to change the fps but the frame rate didn't change .please let me know why this happens .Any answers welcome.
1)The problem here is cv2.CAP_PROP_FPS default fps is 30. So, it saves 600 images for 20sec video. To solve this use if count%(1*fps) == 0: before the cv2.imwrite .It saves one frame for each second.
2)Then set the fps = 1 in VideoWriter object. After that video is in normal speed.
Guys I am having problem on overlaying my video to webcam. I can open webcam without any issues or errors but my video is not being showed. I am trying to play my video in specific x-y coordinates.I take most of my code from an other stackoverflow question but I cannot find it know so that I cannot mention it here.
So can someone help me to solve this? Why my video is not being played in my webcam?
I have following code:
from os.path import sep
import cv2 as cv2
# load the overlay image. size should be smaller than video frame size
img = cv2.VideoCapture('photos' + sep + 'Baslksz-3.mp4')
# Get Image dimensions
width = img.set(cv2.CAP_PROP_FRAME_WIDTH, 150) # float `width`
height = img.set(cv2.CAP_PROP_FRAME_HEIGHT, 150)
# Start Capture
cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
cap.set(cv2.CAP_PROP_FPS, 30)
frame_vid = img.read()
# Decide X,Y location of overlay image inside video frame.
# following should be valid:
# * image dimensions must be smaller than frame dimensions
# * x+img_width <= frame_width
# * y+img_height <= frame_height
# otherwise you can resize image as part of your code if required
x = 50
y = 50
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# add image to frame
frame[ y:y+width , x:x+height ] = img
'''
tr = 0.3 # transparency between 0-1, show camera if 0
frame = ((1-tr) * frame.astype(np.float) + tr * frame_vid.astype(np.float)).astype(np.uint8)
'''
# Display the resulting frame
cv2.imshow('frame',frame)
# Exit if ESC key is pressed
if cv2.waitKey(20) & 0xFF == 27:
break
cap.release()
cv2.destroyAllWindows()
Let me start analyzing the code step-by-step.
Step #1
img = cv2.VideoCapture('photos' + sep + 'Baslksz-3.mp4')
The above code look fine, but it would be better if you give as a string name
video_name = 'photos' + sep + 'Baslksz-3.mp4'
img = cv2.VideoCapture(video_name)
Step #2
# Get Image dimensions
width = img.set(cv2.CAP_PROP_FRAME_WIDTH, 150) # float `width`
height = img.set(cv2.CAP_PROP_FRAME_HEIGHT, 150)
Now what are width and height variables?
# Get Image dimensions
width = img.set(cv2.CAP_PROP_FRAME_WIDTH, 150) # float `width`
height = img.set(cv2.CAP_PROP_FRAME_HEIGHT, 150)
print(width)
print(height)
Result is:
False
False
It seems you want to set width and height to the dimension (150, 150). It would be better if you initialize them separately
# Get Image dimensions
img.set(cv2.CAP_PROP_FRAME_WIDTH, 150) # float `width`
img.set(cv2.CAP_PROP_FRAME_HEIGHT, 150)
width = 150
height = 150
Step #3
# Start Capture
cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
cap.set(cv2.CAP_PROP_FPS, 30)
Why do you initialize cap variable two-times?
Step #4
frame_vid = img.read()
Why do you initialize frame_vid you did not use anywhere in the code?
Step #5
while (True):
# Capture frame-by-frame
ret, frame = cap.read()
frame[y:y + width, x:x + height] = img
The above code is not making any sense, you want to display your video as long as your webcam open. You also did not check whether the current webcam frame returns or not. You also set VideoCapture variable to the array?
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read()
Now you are getting frames, as long as your webcam is open, then you need to check whether the webcam frame returns. If the webcam frame returns then you need to start reading the video frames. If the video frame returns successfully resize the video frame to (width, height) then set it to the frame.
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read()
if ret:
ret_video, frame_video = img.read()
if ret_video:
# add image to frame
frame_video = cv2.resize(frame_video, (width, height))
frame[y:y + width, x:x + height] = frame_video
Step #6
Make sure close img variable after the execution.
img.release()
cap.release()
cv2.destroyAllWindows()
Please change img variable to something that makes sense. Like rename the img variable to video_capture and cap to the webcam_capture.
When video stops then webcam stacks. But I want to continue infinitive. and video should start again. But video does not starts from beggining.and webcam freezes
Update
This issue was mentioned in the Playback loop option in OpenCV videos
If you look at the answer, the problem was solved by counting the video frames. When video frames equal to the capture frame count (CAP_PROP_FRAME_COUNT) set to counter and CAP_PROP_FRAME_COUNT to 0.
First initialize the frame counter.
video_frame_counter = 0
and when webcam opens, get the frame. If frame returns, increase the counter by 1.
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read()
if ret:
ret_video, frame_video = img.read()
video_frame_counter += 1
If counter equals to the capture class frame count, then initialize both variable to 0.
if video_frame_counter == img.get(cv2.CAP_PROP_FRAME_COUNT):
video_frame_counter = 0
img.set(cv2.CAP_PROP_POS_FRAMES, 0)
Code:
from os.path import sep
import cv2 as cv2
# load the overlay image. size should be smaller than video frame size
# img = cv2.VideoCapture('photos' + sep + 'Baslksz-3.mp4')
video_name = 'photos' + sep + 'Baslksz-3.mp4'
img = cv2.VideoCapture(video_name)
# Get Image dimensions
img.set(cv2.CAP_PROP_FRAME_WIDTH, 150) # float `width`
img.set(cv2.CAP_PROP_FRAME_HEIGHT, 150)
width = 150
height = 150
# Start Capture
cap = cv2.VideoCapture(0)
# cap = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
cap.set(cv2.CAP_PROP_FPS, 30)
# frame_vid = img.read()
# Decide X,Y location of overlay image inside video frame.
# following should be valid:
# * image dimensions must be smaller than frame dimensions
# * x+img_width <= frame_width
# * y+img_height <= frame_height
# otherwise you can resize image as part of your code if required
x = 50
y = 50
video_frame_counter = 0
while cap.isOpened():
# Capture frame-by-frame
ret, frame = cap.read()
if ret:
ret_video, frame_video = img.read()
video_frame_counter += 1
if video_frame_counter == img.get(cv2.CAP_PROP_FRAME_COUNT):
video_frame_counter = 0
img.set(cv2.CAP_PROP_POS_FRAMES, 0)
if ret_video:
# add image to frame
frame_video = cv2.resize(frame_video, (width, height))
frame[y:y + width, x:x + height] = frame_video
'''
tr = 0.3 # transparency between 0-1, show camera if 0
frame = ((1-tr) * frame.astype(np.float) + tr * frame_vid.astype(np.float)).astype(np.uint8)
'''
# Display the resulting frame
cv2.imshow('frame', frame)
# Exit if ESC key is pressed
if cv2.waitKey(1) & 0xFF == 27:
break
img.release()
cap.release()
cv2.destroyAllWindows()
I want a screen recorder. I thought of making my own.
I checked the internet and found: https://www.thepythoncode.com/code/make-screen-recorder-python
The Code:
import cv2
import numpy as np
import pyautogui
# Display screen resolution, get it from your OS settings
SCREEN_SIZE = (1366, 768)
# Define the codec
fourcc = cv2.VideoWriter_fourcc(*"XVID")
# Create the video write object
out = cv2.VideoWriter("output.avi", fourcc, 30.0, (SCREEN_SIZE))
while True:
# make a screenshot
img = pyautogui.screenshot()
# img = pyautogui.screenshot(region=(0, 0, 300, 400))
# convert these pixels to a proper numpy array to work with OpenCV
frame = np.array(img)
# convert colors from BGR to RGB
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# write the frame
out.write(frame)
# show the frame
cv2.imshow("screenshot", frame)
# if the user clicks q, it exits
if cv2.waitKey(1) == ord("q"):
break
# Make sure everything is closed when exited
cv2.destroyAllWindows()
out.release()
The Problem:
When I run this, this works good. But it has a random speed after output. The fps is 30 but when I record for 1 minute, the video is 5 seconds or 10 minutes (random).
How do I make this recorder give output in 30 fps with the correct speed?
basically if you want to continue with your same code, you will have to compromise on resolution or frame rate.
My suggestion is to try the cv2.VideoCapture() functionality.
I am attaching the link to the webpage where there is a detailed step-by-step process where the author has achieved an FPS rate of 30.75.
Here's the link:
https://www.pyimagesearch.com/2017/02/06/faster-video-file-fps-with-cv2-videocapture-and-opencv/
The second half of the content present in the link has The faster, threaded method to reading video frames with OpenCV.
# import the necessary packages
from imutils.video import FileVideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True,
help="path to input video file")
args = vars(ap.parse_args())
# start the file video stream thread and allow the buffer to
# start to fill
print("[INFO] starting video file thread...")
fvs = FileVideoStream(args["video"]).start()
time.sleep(1.0)
# start the FPS timer
fps = FPS().start()
# loop over frames from the video file stream
while fvs.more():
# grab the frame from the threaded video file stream, resize
# it, and convert it to grayscale (while still retaining 3
# channels)
frame = fvs.read()
frame = imutils.resize(frame, width=450)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frame = np.dstack([frame, frame, frame])
# display the size of the queue on the frame
cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# show the frame and update the FPS counter
cv2.imshow("Frame", frame)
cv2.waitKey(1)
fps.update()
# stop the timer and display FPS information
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
# do a bit of cleanup
cv2.destroyAllWindows()
fvs.stop()
So i have a video with a duration of 15 seconds and at a specific time i need to insert a text field.
Until now my code just reads a video and displays it. After that we extract frames and calculate the duration of each frame.
import cv2
import numpy as np
# Create a VideoCapture object and read from input file
# If the input is the camera, pass 0 instead of the video file name
cap = cv2.VideoCapture('my_baby_dog.mp4')
# Check if camera opened successfully
if (cap.isOpened() == False):
print("Error opening video stream or file")
# Read until video is completed
while (cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
duration = frame_count / fps
print('fps = ' + str(fps))
print('number of frames = ' + str(frame_count))
print('duration (S) = ' + str(duration))
minutes = int(duration / 60)
seconds = duration % 60
print('duration (M:S) = ' + str(minutes) + ':' + str(seconds))
# Display the resulting frame
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cv2.putText(img=frame, text='EKO', org=(int(frameWidth / 2 - 20), int(frameHeight / 2)),
fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=3,
color=(0, 255, 0))
cv2.imshow('Frame', frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
It sounds like you want to add a text overlay after 6 seconds. Assuming this overlay will continue until the video is finished, you will want to add an if statement to compare the duration to the start time of your text overlay.
then display it.
import cv2
import numpy as np
start_time = 6
# Create a VideoCapture object and read from input file
# If the input is the camera, pass 0 instead of the video file name
cap = cv2.VideoCapture('my_baby_dog.mp4')
........
while (cap.isOpened()):
........
# ADD OVERLAY TEXT
if start_time < duration:
cv2.putText(img=frame, text='EKO', org=(int(frameWidth / 2 - 20), int(frameHeight / 2)),
fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=3,
color=(0, 255, 0))
cv2.imshow('Frame', frame)
Likewise you could start and stop the text overlay like this
import cv2
import numpy as np
start_time = 6
stop_time = 10
........
# ADD OVERLAY TEXT
if start_time < duration < stop_time:
cv2.putText(img=frame, text='EKO', org=(int(frameWidth / 2 - 20), int(frameHeight / 2)),
fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=3,
color=(0, 255, 0))
.......