I want to use opencv to obtain images via an USB-camera (DMK 23UP1300). Unfortunately the images are nearly completely bright, the intensity values are all between 250 and 255, even if there is no light (tested for python2.7 and 3.7). Does anybody know what the problem could be? I also updated the drivers for the camera, no effect. I even set down the exposure time to the minimal possible value, what had no impact. Please see the attached code as a minimal example:
import cv2
class Camera:
def __init__(self):
self.initialize_value = 0
def GetImage(self):
if self.initialize_value == 0:
self.cap = cv2.VideoCapture(0)
self.cap.set(3,1280)
self.cap.set(4,1024)
self.cap.set(cv2.CAP_PROP_EXPOSURE,-13) # change exposure
self.initialize_value = 1
ret, im_orig = self.cap.read()
#cv2.imshow("Livestream_Window",im_orig)
self.im_main = cv2.cvtColor(im_orig, cv2.COLOR_BGR2GRAY)
print(self.im_main)
cv2.imshow("Livestream_Window",self.im_main)
cv2.waitKey(1)
if __name__ == "__main__":
cam=Camera() #initialize camera
ct=0
while 1:
print(ct)
ct=ct+1
cam.GetImage() #Options: Camera, Image, Video
Related
Do you know a library in Python to add a frame image to an existing video? The result video must have the same quality as the image.
I tried to use OpenCV to add google image: https://www.google.com/search?q=google&sxsrf=ALiCzsZhrdoHnOTmg0We4dxtguCqzma5Jg:1657603343101&source=lnms&tbm=isch&sa=X&ved=2ahUKEwiTh8bTzfL4AhWhplYBHfXNAKwQ_AUoAXoECAIQAw&biw=1492&bih=739&dpr=1.25#imgrc=PRtenhDnrVrfOM
But the quality decreases when the video elongates.
Here is the final result video : https://drive.google.com/file/d/1ArDvoX-kN9H_oLbACk3kU1Cid93SMczC/view?usp=sharing
Here is my code using OpenCV:
image = cv2.imread(path_image)
height, width, dimensions = image.shape
video = cv2.VideoCapture(path_video)
frames = []
while(True):
ret, frame = video.read()
if ret == True:
frames.append(frame)
# frame = frame.resize(frame, (width, height), fx=0, fy=0, interpolation = cv2.INTER_CUBIC)
# Press S on keyboard
# to stop the process
if cv2.waitKey(1) & 0xFF == ord('s'):
break
# Break the loop
else:
break
video2 = cv2.VideoWriter(path_video,cv2.VideoWriter_fourcc('M','J','P','G'), 30, (width, height))
for frame in frames:
video2.write(frame)
video2.write(image)
video2.release() # releasing the video generated
print("Added {}".format(image_name))
I hope to improve the quality of this video.
I guess a simple way to achieve that using moviepy would be the following:
from moviepy.editor import *
from PIL import Image
import numpy as np
maxsize = (target_width, target_height)
jpg_image = Image.open(path_to_the_image)
# modify the image's resolution to be the target one
jpg_image.thumbnail(maxsize, Image.ANTIALIAS)
jpg_image = np.array(jpg_image)
image = ImageClip(jpg_image).set_duration(target_duration)
clip = VideoFileClip(path_to_the_video)
video = concatenate([image, clip], method="compose")
video.write_videofile("output_example.mp4", fps=target_fps)
As long as you set the target resolution of the image to match the video's one you are set with just that.
I am working with opencv v.'4.4.0' in python 3.7 and whenever I get images from an external USB camera, this ones are oversaturated. How can I control the adjust the brighness parameter for the img capture?
The camera is an external USB camera from Microsoft 1080p HD Sensor.
Below the code and img sample.
import cv2
import numpy
def get_img_camera(): #return frame (img)
cam = cv2.VideoCapture(0) # 1 laptop camera, 0 external camera
cam.set(3,1280)
cam.set(4,720)
cv2.namedWindow("Plates")
while True:
ret, frame = cam.read()
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
scale = 1.0 # opacity out of 100%
frame_darker = (frame * scale).astype(numpy.uint8)
#cam = frame_darker
if not ret:
print("failed to grab frame")
break
#cv2.imshow("Image", frame)
#k = cv2.waitKey(0)
img_name = "img_from_camera.jpg"
cv2.imwrite(img_name, frame_darker)
print("{} written!".format(img_name))
break
cam.release()
cv2.destroyAllWindows()
return frame
get_img_camera()
View Img capture: oversaturated
Thank you in advance!
I think you can try the ApiPreference which preferred Capture API backends to use. Can be used to enforce a specific reader implementation if multiple are available.(https://docs.opencv.org/3.4/d4/d15/group__videoio__flags__base.html)
#capture from camera at location 0
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
#Brightness (0-100)
cap.set(10,100)
#Sturation (0-100)
cap.set(12,100)
Those functions are work to me, worth to try. And make sure your python and opencv version are not too old.
I'm stuck. My code sucks. My silders don't work either, but the infinite image windows are driving me nuts. When I close the namedWindow, it opens a new display window with the image (infinitely). Help?
import numpy as np
import cv2
from pylepton import Lepton
#setup the Lepton image buffer
def capture(device = "/dev/spidev0.0"):
with Lepton() as l:
a,_ = l.capture() #grab the buffer
cv2.normalize(a, a, 0, 65535, cv2.NORM_MINMAX) # extend contrast
np.right_shift(a, 8, a) # fit data into 8 bits
return np.uint8(a)
#Create a window and give it features
def nothing(x):
pass
cv2.namedWindow('flir', cv2.WINDOW_NORMAL)
cv2.moveWindow('flir',1,1)
cv2.createTrackbar('thresh','flir',50,100,nothing)
cv2.createTrackbar('erode','flir',5,100,nothing)
cv2.createTrackbar('dilate','flir',7,100,nothing)
#process the buffer into an image on a continuous loop
while True:
#update the image processing variables
thresh = cv2.getTrackbarPos('thresh', 'flir')
erodeSize = cv2.getTrackbarPos('erode', 'flir')
dilateSize = cv2.getTrackbarPos('dilate', 'flir')
image = capture()
#apply some image processing
blurredBrightness = cv2.bilateralFilter(image,9,150,150)
thresh = 50
edges = cv2.Canny(blurredBrightness,thresh,thresh*2, L2gradient=True)
_,mask = cv2.threshold(blurredBrightness,200,1,cv2.THRESH_BINARY)
erodeSize = 5
dilateSize = 14
eroded = cv2.erode(mask, np.ones((erodeSize, erodeSize)))
mask = cv2.dilate(eroded, np.ones((dilateSize, dilateSize)))
adjusted_image = cv2.resize(cv2.cvtColor(mask*edges, cv2.COLOR_GRAY2RGB) | image, (640, 4$
final_image = cv2.applyColorMap(adjusted_image, cv2.COLORMAP_HOT)
#display the image
cv2.imshow('flir', final_image)
if cv2.waitKey(1) == ord('q'):
break
cv2.waitKey()
cv2.destroyWindow('flir')
Firstly, Calm down.
Secondly, look at your code closely. Closing the window wouldn't do you any good, because of the lines:
cv2.imshow('flir', final_image)
and
cv2.destroyWindow('flir')
What these 2 are doing in tandem is that you're displaying a frame in a new window, and then destroying it, then recreating that window in imshow, then displaying the next frame and destroying it...and so on and so forth.
That should explain your flickering windows.
To stop execution of your program, you've added this code:
if cv2.waitKey(1) == ord('q'):
break
What this implies is that when you press 'q' on your keyboard while your image window is in focus, your while loop will break and your program will terminate.
So I would advise you to remove cv2.destroyWindow and use 'q' key to quit your application than to close it using your mouse.
I am using pygame to capture an image, and I can't seem to get it right. Attached is the image, and as you can see the image is split down the middle.
Here is the sourcecode:
def getImg(fname):
pygame.camera.init()
cm = pygame.camera.list_cameras()
cam = pygame.camera.Camera(cm[0])
cam.start()
img = cam.get_image()
pygame.image.save(img,fname)
cam.stop()
Ok, so I found a work around that I am posting, since it may be useful in the future:
def saveImage(fname):
pygame.camera.init()
cam_list = pygame.camera.list_cameras()
cam = pygame.camera.Camera(cam_list[0])
cam.start()
## the duplicate get_image():
## the first one gets the camera working,
## the second captures a clean image
cam.get_image()
cam.get_image()
img = cam.get_image()
pygame.image.save(img,fname)
cam.stop()
I am trying to superimpose an image over a camera feed in python. I can get an image to superimpose over another image, but when I apply the same thing to my camera feed it doesn't work. Here's my code so far:
#!/usr/bin/python
import cv2
import time
cv2.cv.NamedWindow("Hawk Eye", 1)
capture = cv2.cv.CaptureFromCAM(0)
cv2.cv.SetCaptureProperty(capture, cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 800)
cv2.cv.SetCaptureProperty(capture, cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 600)
x_offset=y_offset=50
arrows = cv2.imread("arrows.png")
while True:
webcam=cv2.cv.QueryFrame(capture)
#webcam[y_offset:y_offset+arrows.shape[0], x_offset:x_offset+arrows.shape[1]]=arrows
cv2.cv.ShowImage("Hawk Eye", webcam)
if cv2.cv.WaitKey(10) == 27:
break
cv2.cv.DestroyAllWindows()
If I uncomment:
img[y_offset:y_offset+arrows.shape[0], x_offset:x_offset+arrows.shape[1]]=arrows
the line that imposes the image, it shows just the camera feed, but when I add it in my loop it stops working. Thanks!
This works OK using the cv2 API:
import cv2
import time
cv2.namedWindow("Hawk Eye", 1)
capture = cv2.VideoCapture(0)
capture.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, 800)
capture.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, 600)
x_offset=y_offset=50
arrows = cv2.imread("hawk.png")
while True:
ret, webcam = capture.read()
if ret:
webcam[y_offset:y_offset+arrows.shape[0], x_offset:x_offset+arrows.shape[1]]=arrows
cv2.imshow("Hawk Eye", webcam)
if cv2.waitKey(10) == 27:
break
cv2.destroyAllWindows()