I have a 720p resolution camera and I'm trying to open the camera in a view window in openCV. here's the code:
while(True):
ret, frame = vid.read()
if render_poly:
frame = cv2.polylines(frame, [pts],
isClosed, color, thickness)
cv2.imshow('window', frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
elif key == ord('p'):
render_poly = not render_poly
Before that, I tried to ajust the view window with this code (720p resolution):
vid = cv2.VideoCapture(0)
vid.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
vid.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
What I expected is 1280x720 view window size, but for some reason I got 1600x900 which caused incorrect positioning of the polygon I'm trying to render, and to solve that I needed to adjust its coordinates by multiplying it with the scale of 1280/1600 in width and 720/900 in height.
Can I skip that proccess (which will be more in the future project) and just to render the camera view on a window with its same resolution size? is it a good idea?
this simple python script should show continually the webcam video but it only shows the first frame.
import cv2
print('cv2 version is ' + str(cv2.getVersionString()))
def capture_config(camera_port=0):
frame_height = 480
frame_width = 640
cap = cv2.VideoCapture(camera_port)
cap.set(3, frame_width)
cap.set(4, frame_height)
if not cap.isOpened():
print('Unable to read camera feed')
return False
return cap
cap = capture_config()
while cap:
ret, frame = cap.read()
cv2.imshow('captured frame', frame)
if cv2.waitKey(0) & 0xff == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
the output is
cv2 version is 4.5.1
It' on ubuntu 20.03 and python 3.8
any valid reason why the script would only show the first frame ?
I am trying to get input from my webcam using OpenCv and send it to a virtual camera using pyvirtualcam. For some reason when my webcam is displayed it gives it a blue filter. When i display my webcam without sending it to virtual camera there is no filter and everything works well.
import pyvirtualcam
import cv2
cap = cv2.VideoCapture(0)
with pyvirtualcam.Camera(width=1280, height=720, fps=20) as cam:
while True:
ret_val, frame = cap.read()
frame = cv2.resize(frame, (1280, 720), interpolation=cv2.BORDER_DEFAULT)
# cv2.imshow('my webcam', frame)
cam.send(frame)
cam.sleep_until_next_frame()
if cv2.waitKey(1) == 27:
break # esc to quit
cv2.destroyAllWindows()
OpenCV uses BGR as pixel format. pyvirtualcam expects RGB by default, but it supports BGR as well:
fmt = pyvirtualcam.PixelFormat.BGR
with pyvirtualcam.Camera(width=1280, height=720, fps=20, fmt=fmt) as cam:
I am working with opencv v.'4.4.0' in python 3.7 and whenever I get images from an external USB camera, this ones are oversaturated. How can I control the adjust the brighness parameter for the img capture?
The camera is an external USB camera from Microsoft 1080p HD Sensor.
Below the code and img sample.
import cv2
import numpy
def get_img_camera(): #return frame (img)
cam = cv2.VideoCapture(0) # 1 laptop camera, 0 external camera
cam.set(3,1280)
cam.set(4,720)
cv2.namedWindow("Plates")
while True:
ret, frame = cam.read()
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
scale = 1.0 # opacity out of 100%
frame_darker = (frame * scale).astype(numpy.uint8)
#cam = frame_darker
if not ret:
print("failed to grab frame")
break
#cv2.imshow("Image", frame)
#k = cv2.waitKey(0)
img_name = "img_from_camera.jpg"
cv2.imwrite(img_name, frame_darker)
print("{} written!".format(img_name))
break
cam.release()
cv2.destroyAllWindows()
return frame
get_img_camera()
View Img capture: oversaturated
Thank you in advance!
I think you can try the ApiPreference which preferred Capture API backends to use. Can be used to enforce a specific reader implementation if multiple are available.(https://docs.opencv.org/3.4/d4/d15/group__videoio__flags__base.html)
#capture from camera at location 0
cap = cv2.VideoCapture(0,cv2.CAP_DSHOW)
#Brightness (0-100)
cap.set(10,100)
#Sturation (0-100)
cap.set(12,100)
Those functions are work to me, worth to try. And make sure your python and opencv version are not too old.
I'mm writing this piece of python to display a stream of video from my webcam while at the same time record the video - which I've got working, however I've grayscaled the video streaming to my screen and time stamped it - but my recorded video is in colour! I've included the code below - I've tried using some global variables but nothing worked - any help, greatly appreciated
import cv2
import numpy as np
import time, datetime
import os
genericfilename = "recording"
filetime = str(time.time())
extension = '.avi'
filename = genericfilename + filetime +extension
frames_per_second = 100
res = '720p'
print("NEW FILE NAME: " + filename)
# Set resolution for the video capture
def change_res(cap, width, height):
cap.set(3, width)
cap.set(4, height)
# Standard Video Dimensions Sizes
STD_DIMENSIONS = {
"480p": (640, 480),
"720p": (1280, 720),
"1080p": (1920, 1080),
"4k": (3840, 2160),
}
# grab resolution dimensions and set video capture to it.
def get_dims(cap, res='1080p'):
width, height = STD_DIMENSIONS["480p"]
if res in STD_DIMENSIONS:
width,height = STD_DIMENSIONS[res]
## change the current caputre device
## to the resulting resolution
change_res(cap, width, height)
return width, height
# Video Encoding, might require additional installs
VIDEO_TYPE = {
'avi': cv2.VideoWriter_fourcc(*'XVID'),
#'mp4': cv2.VideoWriter_fourcc(*'H264'),
'mp4': cv2.VideoWriter_fourcc(*'XVID'),
}
def get_video_type(filename):
filename, ext = os.path.splitext(filename)
if ext in VIDEO_TYPE:
return VIDEO_TYPE[ext]
return VIDEO_TYPE['avi']
capture = cv2.VideoCapture(0)
out = cv2.VideoWriter(filename, get_video_type(filename), 60,
get_dims(capture, res))
while(True):
ret, frame = capture.read()
out.write(frame)
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
font = cv2.FONT_ITALIC = 1
cv2.putText(grayFrame, str(datetime.datetime.now()), (-330, 460), font, 3,
(200, 200, 200), 2, cv2.LINE_AA)
cv2.imshow('combilift output', grayFrame)
# Press Q on keyboard to exit
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if cv2.waitKey(1) & 0xFF == ord('r'):
print(datetime.datetime.now())
capture.release()
out.release()
cv2.destroyAllWindows()
You save the frame to video, then convert frame to gray.
out.write(frame)
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
If you want your recorded video to be gray, maybe reverse the order of operations and save grayFrame?
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
out.write(grayFrame)
If you want to also save the texts, put the text before writing frame to output.
Lets take a look at ur code
out = cv2.VideoWriter(filename, get_video_type(filename), 60,
.....
while(True):
ret, frame = capture.read()
out.write(frame)
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
You first save out then convert color
The correct sequence should be
out = cv2.VideoWriter(filename, get_video_type(filename), 60,
.....
while(True):
ret, frame = capture.read()
grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
out.write(grayFrame)
I don't have data to test. Just in case you experience some issue with channels. You can use opencv merge(grayFrame,grayFrame,grayFrame) to create a normal 3 channel grey scale image and save to video