cv2 module capturing frame from video every 5 seconds - python

I have been trying to capture a frame(screenshot) after every 5 seconds from a live .webm video stream which is constantly increasing in duration.
import cv2
import time
TIMER = int(5)
k=0
cap = cv2.VideoCapture('video.webm')
capture=int(1)
while True:
ret, img = cap.read()
cv2.imshow('a', img)
if ret:
prev = time.time()
while TIMER >= 0:
ret, img = cap.read()
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, str(TIMER),
(200, 250), font,
7, (0, 255, 255),
4, cv2.LINE_AA)
cv2.imshow('a', img)
cv2.waitKey(125)
cur = time.time()
if cur-prev >= 1:
prev = cur
TIMER = TIMER-1
if TIMER == 0:
ret, img = cap.read()
cv2.imshow('a', img)
cv2.waitKey(2000)
cv2.imwrite('camera' + str(capture) + '.jpg', img)
capture=capture+1
TIMER = int(5)
else:
break
cap.release()
cv2.destroyAllWindows()
Here my code captures the frame after every 5 frames, I'm not able to figure out the framerate calculation and making it work to capture frames after every 5 seconds rather than 5 frames.

If you want to utilize the fps information of the video an approach might be:
import cv2
cap = cv2.VideoCapture(r'path/to/vid')
fps = int(cap.get(cv2.CAP_PROP_FPS))
save_interval = 5
frame_count = 0
while cap.isOpened():
ret, frame = cap.read()
if ret:
frame_count += 1
if frame_count % (fps * save_interval) == 0:
cv2.imwrite(r'path/to/save/to')
# optional
frame_count = 0
# Break the loop
else:
break
cap.release()
cv2.destroyAllWindows()

Related

Opencv Rstp Frame Very Slow When I start recording

Before the rstp ip camera I didnt faced with this problem.
Firstly I show downcounting(3-2-1-GO!) to user and than I start recording in only 10 seconds. After that record fnished and show the Fnished. But when I start using RSTP camera, I have time lag in record moments how can I solve this ? But in the webcam I dont have this problem.
Process:
DownCount 3-2-1-GO!
Start Recording in 10 second --I have problem this step, video record have time lag.
Show Fnished Text
This is source code
class MainRecord():
def __init__(self, rfidCode):
TIMER = int(3)
TIMER_RECORD = int(10)
self.rfidCode = rfidCode
print(self.rfidCode)
user_id = str(self.rfidCode) # this id will get from RFID
# user_id = "a1"
# test = 'fourth'q
# cap = cv2.VideoCapture(0)
# cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
# cap.set(cv2.CAP_PROP_POS_AVI_RATIO, 1)
cap = cv2.VideoCapture('rtsp://private:private#192.168.1.64/1')
# segmentor = SelfiSegmentation()
filename = user_id +'.avi' #.avi .mp4
frames_per_seconds = 24 #this is the standart for the movie or films
config = CFEVideoConf(cap, filepath = filename, res='1080p')
out = cv2.VideoWriter(filename, config.video_type, frames_per_seconds,config.dims)
img_path = 'ap_logo.png'
logo = cv2.imread(img_path,-1)
watermark = image_resize(logo, height=60)
watermark = cv2.cvtColor(watermark, cv2.COLOR_BGR2BGRA)
#grayscale watermark
# cv2.imshow('watermark',watermark)
# time.sleep(2)
font = cv2.FONT_HERSHEY_SIMPLEX
ret, readBefore = cap.read()
cv2.imshow('Start', readBefore)
while(True):
prev = time.time()
while TIMER >= 0:
ret, img = cap.read()
print("showing")
# Display countdown on each frame
# specify the font and draw the
# countdown using puttext
if TIMER != 0:
cv2.putText(img, str(TIMER),
(560, 250), font,
7, (0, 255, 255),
4, cv2.LINE_AA)
cv2.imshow('Start', img)
else:
cv2.putText(img, str("GO!"),
(580, 250), font,
7, (0, 255, 255),
4, cv2.LINE_AA)
cv2.imshow('Start', img)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# current time
cur = time.time()
# Update and keep track of Countdown
# if time elapsed is one second
# than decrease the counter
if cur - prev >= 1:
prev = cur
TIMER = TIMER - 1
if TIMER == -1:
cv2.destroyAllWindows()
while TIMER_RECORD >= 0: #problem occurs here !!
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
frame_h,frame_w, frame_c = frame.shape
#overlay with 4 channel BGR and Alpha
overlay = np.zeros((frame_h, frame_w,4), dtype='uint8')
# overlay[100:250, 100:125] = (255,255,0,1) #B,G,R,A
# overlay[100:250, 150:255] = (0,255,0,1)
watermark_h, watermark_w, watermark_c = watermark.shape
for i in range(0, watermark_h):
for j in range(0, watermark_w):
if watermark[i,j][3] != 0:
h_offset = frame_h - watermark_h
w_offset = frame_w - watermark_w
overlay[h_offset+i,w_offset+j] = watermark[i,j]
cv2.addWeighted(overlay, 0.25, frame, 1.0, 0, frame)
#Display the resulting frame
frame = cv2.cvtColor(frame, cv2.COLOR_BGRA2BGR)
cv2.imshow('Frame', frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
out.write(frame) #file a ilgili frame yazılıyor
cur = time.time()
# Update and keep track of Countdown
# if time elapsed is one second
# than decrease the counter
if cur - prev >= 1:
prev = cur
TIMER_RECORD = TIMER_RECORD - 1
print(TIMER_RECORD)
if TIMER_RECORD == -1:
cv2.destroyAllWindows()
# if cv2.waitKey(20) & 0xFF == ord('q'):
# break
else:
ret, finishImg = cap.read()
cv2.putText(finishImg, str("FINISH !"),
(150, 250), font,
7, (0, 255, 255),
4, cv2.LINE_AA)
cv2.imshow('Finished', finishImg)
# cv2.destroyAllWindows()
if cv2.waitKey(20) & 0xFF == ord('q'):
break
# break
# if cv2.waitKey(20) & 0xFF == ord('q'):
# break
#When everything done, relase the capture
cap.release()
out.release()
# saved
cv2.destroyAllWindows()
# self.ui.show()
In the while TIMER_RECORD >= 0: loop showing video is very slow but before and after everything is perfect. How can I solve this problem ?

How to get last processed frame from video using opencv?

I want to extract last frame of video that have been processed before with Motion History Image (MHI) and I want to save last image processed by MHI.
Here is my code:
import numpy as np
import cv2
MHI_DURATION = 35
DEFAULT_THRESHOLD = 30
def main():
live_video = False
video_src = 1
if not live_video:
video_src = "data/vid1.mp4"
cv2.namedWindow('motion-history')
cv2.namedWindow('raw')
cv2.moveWindow('raw', 200, 0)
cam = cv2.VideoCapture(video_src)
ret, frame = cam.read()
h, w = frame.shape[:2]
prev_frame = frame.copy()
motion_history = np.zeros((h, w), np.float32)
timestamp = 0
while True:
ret, frame = cam.read()
if not ret:
break
frame_diff = cv2.absdiff(frame, prev_frame)
gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
ret, fgmask = cv2.threshold(gray_diff, DEFAULT_THRESHOLD, 1, cv2.THRESH_BINARY)
timestamp += 1
# update motion history
cv2.motempl.updateMotionHistory(fgmask, motion_history, timestamp, MHI_DURATION)
# normalize motion history
mh = np.uint8(np.clip((motion_history - (timestamp - MHI_DURATION)) / MHI_DURATION, 0, 1) * 255)
cv2.imshow('motion-history', mh)
# cv2.imshow('raw', frame)
prev_frame = frame.copy()
if 0xFF & cv2.waitKey(5) == 27:
break
cv2.destroyAllWindows()
And, can we process faster without loop through all frame in the video?

Extract a frame at a specific time of a video and insert a text OpenCV Python

So i have a video with a duration of 15 seconds and at a specific time i need to insert a text field.
Until now my code just reads a video and displays it. After that we extract frames and calculate the duration of each frame.
import cv2
import numpy as np
# Create a VideoCapture object and read from input file
# If the input is the camera, pass 0 instead of the video file name
cap = cv2.VideoCapture('my_baby_dog.mp4')
# Check if camera opened successfully
if (cap.isOpened() == False):
print("Error opening video stream or file")
# Read until video is completed
while (cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
if ret == True:
fps = cap.get(cv2.CAP_PROP_FPS) # OpenCV2 version 2 used "CV_CAP_PROP_FPS"
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
duration = frame_count / fps
print('fps = ' + str(fps))
print('number of frames = ' + str(frame_count))
print('duration (S) = ' + str(duration))
minutes = int(duration / 60)
seconds = duration % 60
print('duration (M:S) = ' + str(minutes) + ':' + str(seconds))
# Display the resulting frame
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
cv2.putText(img=frame, text='EKO', org=(int(frameWidth / 2 - 20), int(frameHeight / 2)),
fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=3,
color=(0, 255, 0))
cv2.imshow('Frame', frame)
# Press Q on keyboard to exit
if cv2.waitKey(25) & 0xFF == ord('q'):
break
# Break the loop
else:
break
# When everything done, release the video capture object
cap.release()
# Closes all the frames
cv2.destroyAllWindows()
It sounds like you want to add a text overlay after 6 seconds. Assuming this overlay will continue until the video is finished, you will want to add an if statement to compare the duration to the start time of your text overlay.
then display it.
import cv2
import numpy as np
start_time = 6
# Create a VideoCapture object and read from input file
# If the input is the camera, pass 0 instead of the video file name
cap = cv2.VideoCapture('my_baby_dog.mp4')
........
while (cap.isOpened()):
........
# ADD OVERLAY TEXT
if start_time < duration:
cv2.putText(img=frame, text='EKO', org=(int(frameWidth / 2 - 20), int(frameHeight / 2)),
fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=3,
color=(0, 255, 0))
cv2.imshow('Frame', frame)
Likewise you could start and stop the text overlay like this
import cv2
import numpy as np
start_time = 6
stop_time = 10
........
# ADD OVERLAY TEXT
if start_time < duration < stop_time:
cv2.putText(img=frame, text='EKO', org=(int(frameWidth / 2 - 20), int(frameHeight / 2)),
fontFace=cv2.FONT_HERSHEY_DUPLEX, fontScale=3,
color=(0, 255, 0))
.......

Using timer to capture webcam images

How do I get breaks during the image capture? In fact, I need 5sec in between the image.
This code captures around 15-20 images per sec, But I need to take a picture at an interval of 5sec in between images
import cv2
import time
import numpy
capture = cv2.VideoCapture(0)
capture.set(3,640)
capture.set(4,480)
img_counter = 0
frame_set = []
start_time = time.time()
while(True):
ret , frame = capture.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
frame_set.append(gray)
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
end_time = time.time()
elapsed = end_time - start_time
img_name = "opencv_frame_{}.png".format(img_counter)
cv2.imwrite(img_name,frame)
print("{} written!".format(img_counter))
img_counter +=1
if elapsed > 1:
capture.release()
cv2.destroyAllWindows()
time.sleep(3)
You need an if statement to check if the desired time has passed. Using the already imported time package you can do this:
import cv2
import time
capture = cv2.VideoCapture(0)
capture.set(3, 640)
capture.set(4, 480)
img_counter = 0
frame_set = []
start_time = time.time()
while True:
ret, frame = capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if time.time() - start_time >= 5: #<---- Check if 5 sec passed
img_name = "opencv_frame_{}.png".format(img_counter)
cv2.imwrite(img_name, frame)
print("{} written!".format(img_counter))
start_time = time.time()
img_counter += 1
The above script will save the frame in RGB every 5 seconds to the working directory.

Taking videos with opencv after some intervals

I am beginner in python and also in opencv. I want to take video of some interval with opencv videocapture function let say 10 seconds and then system should go to sleep mode for some time let say for 6 seconds .then it takes video of 10 second .Both video should be saved in separate file.my problem is that when i call first function it save the first video but on call of video2 function it doesn't save the video.sorry for bad arrangement of code . i am new user to so.
import cv2
import time
def delay(delay_time):
timeout = delay_time # [seconds]
timeout_start = time.time()
print(time.strftime("%c"))
while time.time() < timeout_start + timeout:
test = 0
if test == 3:
break
test -= 1
print(time.strftime("%c"))
def video1():
cap = cv2.VideoCapture(0)
if (cap.isOpened() == False):
print("Unable to read camera feed")
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
out = cv2.VideoWriter('Data/output1.avi',
cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (frame_width, frame_height))
while time.time() < time_start + closing_time:
ret, frame = cap.read()
if ret == True:
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
def video2():
cap = cv2.VideoCapture(0)
if (cap.isOpened() == False):
print("Unable to read camera feed")
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
out = cv2.VideoWriter('Data/output2.avi',
cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (frame_width, frame_height))
while time.time() < time_start + closing_time:
ret, frame = cap.read()
if ret == True:
out.write(frame)
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
cv2.destroyAllWindows()
print(time.strftime("%c"))
video1()
delay(6)
print(time.strftime("%c"))
video2()
Your code has so many indentation problems that I can't even point to 1 location and say this is a problem. So I rearranged your code here. Also you didn't even define time_start or closing_time in your code. But I'm assuming your real code has them and the problem is just the second video not outputting right. Well that is because you have to define time_start in the beginning of video1 and video2. If you don't re-define time_start, by the time you get to video2 this while time.time() < time_start + closing_time: has already been exhausted and will be False. So no real video is being written to file.
import cv2
import time
def delay(delay_time):
timeout = delay_time # [seconds]
timeout_start = time.time()
print(time.strftime("%c"))
while time.time() < timeout_start + timeout:
test = 0
if test == 3:
break
test -= 1
print(time.strftime("%c"))
def video1():
time_start = time.time()
cap = cv2.VideoCapture(0)
if (cap.isOpened() == False):
print("Unable to read camera feed")
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
print(frame_width,frame_height)
out = cv2.VideoWriter('output1.avi',
cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (frame_width, frame_height))
while time.time() < time_start + closing_time:
ret, frame = cap.read()
if ret == True:
out.write(frame)
#cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
def video2():
time_start = time.time()
cap = cv2.VideoCapture(0)
if (cap.isOpened() == False):
print("Unable to read camera feed")
frame_width = int(cap.get(3))
frame_height = int(cap.get(4))
print(frame_width,frame_height)
out = cv2.VideoWriter('output2.avi',
cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 10, (frame_width, frame_height))
while time.time() < time_start + closing_time:
ret, frame = cap.read()
if ret == True:
out.write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
print(time.strftime("%c"))
closing_time = 10
video1()
delay(6)
print(time.strftime("%c"))
video2()
# this loop is here because sometimes cv2 has problems destroying windows
# if you call it once. so to be safe I call it 10 times
for _ in range(10):
cv2.destroyAllWindows()
This should do the trick it does what you asked in the question, the code is below:
`
import cv2
import time
import numpy as np
cap=cv2.VideoCapture(0)
fourcc=cv2.VideoWriter_fourcc(*'XVID')
out=cv2.VideoWriter('output1.avi', fourcc, 20.0, (640, 48))
start_time=time.time()
while True:
ret, frame=cap.read()
out.write(frame)
cv2.imshow('frame', frame)
if time.time()-start_time>=10:
break
cap.release()
out.release()
time.sleep(6)
cap2=cv2.VideoCapture(0)
fourcc2=cv2.VideoWriter_fourcc(*'XVID')
out2=cv2.VideoWriter('output2.avi', fourcc2, 20.0, (640, 48))
start_time2=time.time()
while True:
ret, frame=cap2.read()
out2.write(frame)
cv2.imshow('frame', frame)
if time.time()-start_time2>=10:
break
cap2.release()
out.release()
`

Categories