I'm trying to make a python program with OpenCV, which opens the webcam and takes several images with different exposures in real time (40ms,95ms,150ms) and averages them in the end.
I tried to create a loop in which I change the exposure time, update the rendering (frame) and save it in a list, but the problem is that the display remains static and the rendering hardly changes (which gives after merging the images an image whose exposure time is almost 40)
I supposed that after setting exposure time, the frame update needs some time so I added the method time.sleep to suspend the execution for 3 seconds, but it was in vain.
Here is my code
import numpy as np
import cv2
import os
import time
capture = cv2.VideoCapture(0, cv2.CAP_V4L2)
while True:
(grabbed, frame) = capture.read()
if not grabbed:
break
# Resize frame
width = 1500
height = 1000
dim = (width, height)
frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
cv2.imshow('RGB', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if cv2.waitKey(1) == ord('h') or cv2.waitKey(1) == ord('H'):
#repertory = input("Enter the name of the directory: ")
if not os.path.exists(repertory):
os.mkdir(repertory)
exposure = [40,95,150]
ims = []
for i in exposure:
capture.set(cv2.CAP_PROP_EXPOSURE, i) # Setting Exposure
(grabbed, frame) = capture.read() # Updating frame
if grabbed:
cv2.imshow('RGB', frame) #Display
ims.append(frame)
# Convert to numpy
ims = np.array([np.array(im) for im in ims])
# average et conversion en uint8
imave = np.average(ims, axis=0)
imave = imave.astype(np.uint8)
# image HDR
cv2.imwrite(repertory + '/' + repertory + '_HDR8.jpg', imave)
capture.release()
cv2.destroyAllWindows()
Is there an optimal solution that allows to take pictures with differents exposure time in real time and in an automatic way?
Related
I want to run inference of my vehicle detection model only on the center of a video. like you see in this picture. The red zone is only where I want my model to run. I wanted to know if there's a way for me to do that. to specify a zone for my model to work.
This is the code that I have used for the crop which kinda works. I just need to implement it with my vehicle detection model which I will post right after I finish
# Import packages
import cv2
import numpy as np
# Open the video
cap = cv2.VideoCapture('test.mp4')
# Initialize frame counter
cnt = 0
# Some characteristics from the original video
w_frame, h_frame = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps, frames = cap.get(cv2.CAP_PROP_FPS), cap.get(cv2.CAP_PROP_FRAME_COUNT)
# Here you can define your croping values
x,y,h,w = 0,0,600,1000
# output
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
#fourcc = cv2.cv.CV_FOURCC(*'XVID')
out = cv2.VideoWriter('result.mp4', fourcc, fps, (w, h))
# Now we start
while(cap.isOpened()):
ret, frame = cap.read()
cnt += 1 # Counting frames
# Avoid problems when video finish
if ret==True:
# Croping the frame
crop_frame = frame[y:y+h, x:x+w]
# Percentage
xx = cnt *100/frames
print(int(xx),'%')
# Saving from the desired frames
#if 15 <= cnt <= 90:
# out.write(crop_frame)
# I see the answer now. Here you save all the video
out.write(crop_frame)
# Just to see the video in real time
cv2.imshow('frame',frame)
cv2.imshow('croped',crop_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
out.release()
cv2.destroyAllWindows()
I am trying to write a script to manipulate video from a webcam. I am trying to do this through OpenCV with Python, but I am running into some issues.
If I run the video capture stream with no pixel manipulation applied, the stream works fine and has a smooth frame rate. However, I applied a threshold loop as a test, and my stream undergoes major lag and updates once every few seconds. Any ideas if it is possible to optimise this? Ideally, I am looking to get a 30 fps stream with the video manipulation applied. Here is the code:
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
T = 100
while True:
ret, frame = cap.read()
height, width, channels = frame.shape
for x in range(width):
for y in range(height):
if frame[y,x,0] < T:
frame[y,x]=0
else:
frame[y,x]=255
cv2.imshow('frame', frame)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
accessing Pixel by pixel in image processing in general is very bad practice as it slow the performance very much, packages like opencv and numpy has optimized this by doing matrix operations allowing your program to be much more faster, here is a sample code that will perform your task but much more faster.
import cv2
import numpy as np
cap = cv2.VideoCapture(0)
T = 100
while True:
ret, frame = cap.read()
height, width, channels = frame.shape
B,G,R = cv2.split(frame)
# for x in range(width):
# for y in range(height):
# if frame[y,x,0] < T:
# frame[y,x]=0
# else:
# frame[y,x]=255
_,B = cv2.threshold(B,T,255,cv2.THRESH_BINARY)
frame = cv2.merge((B,G,R))
cv2.imshow('frame', frame)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
I have conferance call video with different people's tiles arranged on a grid.
Example:
gallery view zoom
Can I crop every video tile to a separate file using python or nodejs?
Yes, you can achieve that using OpenCV library
Read the video in OpenCV using VideoCapture API. Note down framerate while reading.
Parse through each frame and crop the frame:
Write the frame in a video using OpenCV VideoWriter
Here is the example code using (640,480) to be the new dimensions:
cap = cv2.VideoCapture(<video_file_name>)
fps = cap.get(cv2.CAP_PROP_FPS)
out = cv2.VideoWriter('<output video file name>, -1, fps, (640,480))
while(cap.isOpened()):
ret, frame = cap.read()
crop_frame = frame[y:y+h, x:x+w]
# write the crooped frame
out.write(crop_frame)
# Release reader wand writer after parsing all frames
cap.release()
out.release()
Here's the code (tested). It works by initialising a number of video outputs, then for each frame of the input video: cropping the region of interest (roi) and assigning each to the relevent output video. You might need to make tweaks depending on input video dimensions, number of times, offsets etc.
import numpy as np
import cv2
import time
cap = cv2.VideoCapture('in.mp4')
ret, frame = cap.read()
(h, w, d) = np.shape(frame)
horiz_divisions = 5 # Number of tiles stacked horizontally
vert_divisions = 5 # Number of tiles stacked vertically
divisions = horiz_divisions*vert_divisions # Total number of tiles
seg_h = int(h/vert_divisions) # Tile height
seg_w = int(w/horiz_divisions) # Tile width
# Initialise the output videos
outvideos = [0] * divisions
for i in range(divisions):
outvideos[i] = cv2.VideoWriter('out{}.avi'.format(str(i)),cv2.VideoWriter_fourcc('M','J','P','G'), 10, (seg_w,seg_h))
# main code
while(cap.isOpened()):
ret, frame = cap.read()
if ret == True:
vid = 0 # video counter
for i in range(vert_divisions):
for j in range(horiz_divisions):
# Get the coordinates (top left corner) of the current tile
row = i * seg_h
col = j * seg_w
roi = frame[row:row+seg_h,col:col+seg_w,0:3] # Copy the region of interest
outvideos[vid].write(roi)
vid += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# Release all the objects
cap.release()
for i in range(divisions):
outvideos[i].release()
# Release everything if job is finished
cv2.destroyAllWindows()
Hope this helps!
I am trying to write a openCV program where i am breaking down the video into frames and comparing two frames one after the other if both are the same i reject the frame else append the frame to a output file.
How can i achieve it?
OpenCV 2.4.13 Python 2.7
The following example captures frames from the first camera connected to your system, compares each frame to the previous frame, and when different, the frame is added to a file. If you sit still in front of the camera, you might see the diagnostic 'no change' message printed if you run the program from a console terminal window.
There are a number of ways to measure how different one frame is from another. For simplicity we have used the average difference, pixel by pixel, between the new frame and the previous frame, compared to a threshold.
Note that frames are returned as numpy arrays by the openCV read function.
import numpy as np
import cv2
interval = 100
fps = 1000./interval
camnum = 0
outfilename = 'temp.avi'
threshold=100.
cap = cv2.VideoCapture(camnum)
ret, frame = cap.read()
height, width, nchannels = frame.shape
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter( outfilename,fourcc, fps, (width,height))
while(True):
# previous frame
frame0 = frame
# new frame
ret, frame = cap.read()
if not ret:
break
# how different is it?
if np.sum( np.absolute(frame-frame0) )/np.size(frame) > threshold:
out.write( frame )
else:
print( 'no change' )
# show it
cv2.imshow('Type "q" to close',frame)
# check for keystroke
key = cv2.waitKey(interval) & 0xFF
# exit if so-commanded
if key == ord('q'):
print('received key q' )
break
# When everything done, release the capture
cap.release()
out.release()
print('VideoDemo - exit' )
What I need to do is fairly simple:
load a 5 frames video file
detect background
On every frames, one by one :
subtract background (create foreground mask)
do some calculations on foreground mask
save both original frame and foreground mask
Just to see the 5 frames and the 5 corresponding fgmasks :
import numpy as np
import cv2
cap = cv2.VideoCapture('test.avi')
fgbg = cv2.BackgroundSubtractorMOG()
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
# Display the fgmask frame
cv2.imshow('fgmask',fgmask)
# Display original frame
cv2.imshow('img', frame)
k = cv2.waitKey(0) & 0xff
if k == 5:
break
cap.release()
cv2.destroyAllWindows()
Every frame gets opened and displayed correctly but the showed fgmask do not correspond to the showed original frame. Somewhere in the process, the order of the fgmasks gets mixed up.
The background does get subtracted correctly but I don't get the 5 expected fgmasks.
What am I missing ? I feel like this should be straightforward : the while loop runs over the 5 frames of the video and fgbg.apply apply the background subtraction function to each frame.
OpenCV version that I use is opencv-2.4.9-3
As bikz05 suggested, running average method worked pretty good on my 5 images sets. Thanks for the tip !
import cv2
import numpy as np
c = cv2.VideoCapture('test.avi')
_,f = c.read()
avg1 = np.float32(f)
avg2 = np.float32(f)
# loop over images and estimate background
for x in range(0,4):
_,f = c.read()
cv2.accumulateWeighted(f,avg1,1)
cv2.accumulateWeighted(f,avg2,0.01)
res1 = cv2.convertScaleAbs(avg1)
res2 = cv2.convertScaleAbs(avg2)
cv2.imshow('img',f)
cv2.imshow('avg1',res1)
cv2.imshow('avg2',res2)
k = cv2.waitKey(0) & 0xff
if k == 5:
break