External USB-camera opencv. Img oversaturated - python

I am working with opencv v.'4.4.0' in python 3.7 and whenever I get images from an external USB camera, this ones are oversaturated. How can I control the adjust the brighness parameter for the img capture?
The camera is an external USB camera from Microsoft 1080p HD Sensor.
Below the code and img sample.
import cv2
import numpy
def get_img_camera(): #return frame (img)
cam = cv2.VideoCapture(0) # 1 laptop camera, 0 external camera
cam.set(3,1280)
cam.set(4,720)
cv2.namedWindow("Plates")
while True:
ret, frame = cam.read()
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
scale = 1.0 # opacity out of 100%
frame_darker = (frame * scale).astype(numpy.uint8)
#cam = frame_darker
if not ret:
print("failed to grab frame")
break
#cv2.imshow("Image", frame)
#k = cv2.waitKey(0)
img_name = "img_from_camera.jpg"
cv2.imwrite(img_name, frame_darker)
print("{} written!".format(img_name))
break
cam.release()
cv2.destroyAllWindows()
return frame
get_img_camera()
View Img capture: oversaturated
Thank you in advance!

I think you can try the ApiPreference which preferred Capture API backends to use. Can be used to enforce a specific reader implementation if multiple are available.(https://docs.opencv.org/3.4/d4/d15/group__videoio__flags__base.html)
#capture from camera at location 0
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
#Brightness (0-100)
cap.set(10,100)
#Sturation (0-100)
cap.set(12,100)
Those functions are work to me, worth to try. And make sure your python and opencv version are not too old.

Related

How to append an image to a video using OpenCV or FFMPEG or Moviepy or other libraries?

Do you know a library in Python to add a frame image to an existing video? The result video must have the same quality as the image.
I tried to use OpenCV to add google image: https://www.google.com/search?q=google&sxsrf=ALiCzsZhrdoHnOTmg0We4dxtguCqzma5Jg:1657603343101&source=lnms&tbm=isch&sa=X&ved=2ahUKEwiTh8bTzfL4AhWhplYBHfXNAKwQ_AUoAXoECAIQAw&biw=1492&bih=739&dpr=1.25#imgrc=PRtenhDnrVrfOM
But the quality decreases when the video elongates.
Here is the final result video : https://drive.google.com/file/d/1ArDvoX-kN9H_oLbACk3kU1Cid93SMczC/view?usp=sharing
Here is my code using OpenCV:
image = cv2.imread(path_image)
height, width, dimensions = image.shape
video = cv2.VideoCapture(path_video)
frames = []
while(True):
ret, frame = video.read()
if ret == True:
frames.append(frame)
# frame = frame.resize(frame, (width, height), fx=0, fy=0, interpolation = cv2.INTER_CUBIC)
# Press S on keyboard
# to stop the process
if cv2.waitKey(1) & 0xFF == ord('s'):
break
# Break the loop
else:
break
video2 = cv2.VideoWriter(path_video,cv2.VideoWriter_fourcc('M','J','P','G'), 30, (width, height))
for frame in frames:
video2.write(frame)
video2.write(image)
video2.release() # releasing the video generated
print("Added {}".format(image_name))
I hope to improve the quality of this video.
I guess a simple way to achieve that using moviepy would be the following:
from moviepy.editor import *
from PIL import Image
import numpy as np
maxsize = (target_width, target_height)
jpg_image = Image.open(path_to_the_image)
# modify the image's resolution to be the target one
jpg_image.thumbnail(maxsize, Image.ANTIALIAS)
jpg_image = np.array(jpg_image)
image = ImageClip(jpg_image).set_duration(target_duration)
clip = VideoFileClip(path_to_the_video)
video = concatenate([image, clip], method="compose")
video.write_videofile("output_example.mp4", fps=target_fps)
As long as you set the target resolution of the image to match the video's one you are set with just that.

Webcam input sent to pyvirtualcam is blue(using pyvirtualcam and opencv)

I am trying to get input from my webcam using OpenCv and send it to a virtual camera using pyvirtualcam. For some reason when my webcam is displayed it gives it a blue filter. When i display my webcam without sending it to virtual camera there is no filter and everything works well.
import pyvirtualcam
import cv2
cap = cv2.VideoCapture(0)
with pyvirtualcam.Camera(width=1280, height=720, fps=20) as cam:
while True:
ret_val, frame = cap.read()
frame = cv2.resize(frame, (1280, 720), interpolation=cv2.BORDER_DEFAULT)
# cv2.imshow('my webcam', frame)
cam.send(frame)
cam.sleep_until_next_frame()
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
OpenCV uses BGR as pixel format. pyvirtualcam expects RGB by default, but it supports BGR as well:
fmt = pyvirtualcam.PixelFormat.BGR
with pyvirtualcam.Camera(width=1280, height=720, fps=20, fmt=fmt) as cam:

Completly bright camera image with python/opencv (even for no light)

I want to use opencv to obtain images via an USB-camera (DMK 23UP1300). Unfortunately the images are nearly completely bright, the intensity values are all between 250 and 255, even if there is no light (tested for python2.7 and 3.7). Does anybody know what the problem could be? I also updated the drivers for the camera, no effect. I even set down the exposure time to the minimal possible value, what had no impact. Please see the attached code as a minimal example:
import cv2
class Camera:
def __init__(self):
self.initialize_value = 0
def GetImage(self):
if self.initialize_value == 0:
self.cap = cv2.VideoCapture(0)
self.cap.set(3,1280)
self.cap.set(4,1024)
self.cap.set(cv2.CAP_PROP_EXPOSURE,-13) # change exposure
self.initialize_value = 1
ret, im_orig = self.cap.read()
#cv2.imshow("Livestream_Window",im_orig)
self.im_main = cv2.cvtColor(im_orig, cv2.COLOR_BGR2GRAY)
print(self.im_main)
cv2.imshow("Livestream_Window",self.im_main)
cv2.waitKey(1)
if __name__ == "__main__":
cam=Camera() #initialize camera
ct=0
while 1:
print(ct)
ct=ct+1
cam.GetImage() #Options: Camera, Image, Video

Python Opencv crops input image from usb camera

Im trying to create a tool that will take images from stereo cameras (connected to a sync board) with python and opencv.
When im looking at the image i get from opencv it seems different then what i get with windows camera app.
both set to the same resolution. what am i missing?
cap = cv2.VideoCapture(0)
# cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
#
# cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
while 1:
ret, frame = cap.read()
# cv2.imshow("frame",frame)
cv2.imwrite('test.bmp',frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
print('size:', width, height)
both images are 1344x376

displaying the camera feed in grayscale in python with opencv

i've been trying to display the camera feed from my laptops web cam in grayscale and i've done it using the following code:
import cv2
import numpy as np
clicked = False
def onMouse(event, x, y, flags, param):
global clicked
if event == cv2.cv.CV_EVENT_LBUTTONUP:
clicked = True
cv2.namedWindow('image capture', cv2.WINDOW_NORMAL)
cv2.setMouseCallback('image capture', onMouse)
#initialize the camera object with VideoCapture
camera = cv2.VideoCapture(0)
sucess, frame = camera.read()
cv2.imwrite('snapshot.png', frame)
gray = cv2.imread('snapshot.png', cv2.IMREAD_GRAYSCALE)
while sucess and cv2.waitKey(1) == -1 and not clicked:
cv2.imwrite('snapshot.png', frame)
gray = cv2.imread('snapshot.png', cv2.IMREAD_GRAYSCALE)
cv2.imshow('image capture', gray)
sucess, frame = camera.read()
cv2.imwrite('snapshot.png', frame)
print 'photo taken press any key to exit'
cv2.waitKey()
cv2.destroyAllWindows()
Here what i've done is saved the frame in 'snapshot.png' and again reloaded it in grayscale and display that grayscale image. Is there any method to directly read the camera frame in grayscale rather than going through all this mess. Thanks in advance.
wow, what a mess ;)
you simply want:
gray = cv2.cvtColor( img, cv2.COLOR_BGR2GRAY )
In the latest version of opencv, the cvtColor expects it's scr to be not None and therefore gives 215-assertion error.
This is basically like a scenario where you have to use a catch block and try to handle exceptions.
Code to overcome this problem:
while True:
ret, frame = cap.read()
if frame.any():
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)

Categories