I try to get the frame_width of an image with the following code:
"""Predict the gender of the faces showing in the image"""
# Read Input Image
img = cv2.imread(imag)
# resize the image, uncomment if you want to resize the image
img = cv2.resize(img, (frame_width, frame_height))
# Take a copy of the initial image and resize it
frame = img.copy()
print(frame.shape[1])
if frame.shape[1] > frame_width:
frame = image_resize(frame, width=frame_width)
# predict the faces
faces = get_faces(frame)
Following my main method were I call the method with an image:
if __name__ == '__main__':
predict_gender("/Users/$$$/Downloads/test.jpg")
I already tried to import tkinter because I thought I need tkinter for the frame but it was also not working. Here is my current error:
<ipython-input-10-2d047afa91e4> in predict_gender(imag)
4 img = cv2.imread(imag)
5 # resize the image, uncomment if you want to resize the image
----> 6 img = cv2.resize(img, (frame_width, frame_height))
7 # Take a copy of the initial image and resize it
8 frame = img.copy()
NameError: name 'frame_width' is not defined
You need to define frame_width and frame_height.
img = cv2.imread(imag)
frame_height, frame_width, _ = img.shape
# resize the image, uncomment if you want to resize the image
img = cv2.resize(img, (frame_width, frame_height))
Related
Do you know a library in Python to add a frame image to an existing video? The result video must have the same quality as the image.
I tried to use OpenCV to add google image: https://www.google.com/search?q=google&sxsrf=ALiCzsZhrdoHnOTmg0We4dxtguCqzma5Jg:1657603343101&source=lnms&tbm=isch&sa=X&ved=2ahUKEwiTh8bTzfL4AhWhplYBHfXNAKwQ_AUoAXoECAIQAw&biw=1492&bih=739&dpr=1.25#imgrc=PRtenhDnrVrfOM
But the quality decreases when the video elongates.
Here is the final result video : https://drive.google.com/file/d/1ArDvoX-kN9H_oLbACk3kU1Cid93SMczC/view?usp=sharing
Here is my code using OpenCV:
image = cv2.imread(path_image)
height, width, dimensions = image.shape
video = cv2.VideoCapture(path_video)
frames = []
while(True):
ret, frame = video.read()
if ret == True:
frames.append(frame)
# frame = frame.resize(frame, (width, height), fx=0, fy=0, interpolation = cv2.INTER_CUBIC)
# Press S on keyboard
# to stop the process
if cv2.waitKey(1) & 0xFF == ord('s'):
break
# Break the loop
else:
break
video2 = cv2.VideoWriter(path_video,cv2.VideoWriter_fourcc('M','J','P','G'), 30, (width, height))
for frame in frames:
video2.write(frame)
video2.write(image)
video2.release() # releasing the video generated
print("Added {}".format(image_name))
I hope to improve the quality of this video.
I guess a simple way to achieve that using moviepy would be the following:
from moviepy.editor import *
from PIL import Image
import numpy as np
maxsize = (target_width, target_height)
jpg_image = Image.open(path_to_the_image)
# modify the image's resolution to be the target one
jpg_image.thumbnail(maxsize, Image.ANTIALIAS)
jpg_image = np.array(jpg_image)
image = ImageClip(jpg_image).set_duration(target_duration)
clip = VideoFileClip(path_to_the_video)
video = concatenate([image, clip], method="compose")
video.write_videofile("output_example.mp4", fps=target_fps)
As long as you set the target resolution of the image to match the video's one you are set with just that.
from PIL import Image, ImageSequence
dye = Image.open(gif_filename)
img = Image.open(jpg_filename)
frames = []
for frame in ImageSequence.Iterator(dye):
frame = frame.convert("RGBA").copy()
frame.paste(img, (0,0), img)
frames.append(frame)
frames[0].save('character.gif', save_all=True, append_images=frames[1:])
character = Image.open('character.gif')
character is supposed to be a gif, but it just returns the first frame of the gif.
I want to overlay webcam captured video on another video in live. So I have tried the above code,but it's too slow.
Should I shift to another language or lib or something?
Any suggestion or help will be appreciated.
import numpy as np
from cv2 import cv2
def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
cap2 = cv2.VideoCapture('http://192.168.43.1:8080/video')
cap = cv2.VideoCapture('test.mp4')
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('sample3.mp4',fourcc,30, (640,480))
# watermark = logo
# cv2.imshow("watermark",watermark)
while(cap.isOpened()):
ret, frame = cap.read()
frame = cv2.cvtColor(frame,cv2.COLOR_BGR2BGRA)
ret2 ,frame2 = cap2.read()
frame2 = cv2.cvtColor(frame2,cv2.COLOR_BGR2BGRA)
watermark = image_resize(frame2,height=177)
if ret==True:
frame_h, frame_w, frame_c = frame.shape
overlay = np.zeros((frame_h, frame_w, 4), dtype='uint8')
overlay[543:543+177,1044:1044+236] = watermark
cv2.addWeighted(frame, 0.25, overlay, 1.0, 0, frame)
frame = cv2.cvtColor(frame,cv2.COLOR_BGRA2BGR)
out.write(frame)
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
# Release everything if job is finished
cap.release()
out.release()
cv2.destroyAllWindows()
Requirement is to overlay webcam live video on another video at bottom smoothly.
Thanks.
Basically, if you want it to be fast, you may not iterate over an image using a python cycle. You are trying to copy the scaled-down image into the empty overlay using 2 nested cycles and that's terribly slow. I do not understand the condition
if watermark[i,j][3] != 0:
Also this part:
offset = 0
h_offset = frame_h - watermark_h -offset
w_offset = frame_w - watermark_w - offset
should be out of the cycle - they are all constants.
But most importantly instead of cycling over the image you can do:
offset[h_offset:h_offset+watermark_h,w_offset:w_offset+watermark_w] = watermark
After this, I am up from 9 fps to 28 fps.
I am trying to take 2 images present in a local folder and use OpenCV's Videowriter function to create a video from these images.
I am using FrameRate of 1. So this creates a video of 2 seconds duration. Below is the code (I got it from here):
import cv2
import os
image_folder = 'images'
video_name = 'video.avi'
images = [img for img in os.listdir(image_folder) if img.endswith(".png")]
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, 0, 1, (width,height))
for image in images:
video.write(cv2.imread(os.path.join(image_folder, image)))
cv2.destroyAllWindows()
video.release()
Goal: I want to create a video of 10 seconds which will have these 2 images each being displayed for 5 seconds.
I think there might be a similar question like this in the forum but I am not able to find it. If someone can point me to the solution, it would be great.
Thank you.
The below code snippet should solve your problem. Notice that you will have to specify each_image_duration. I used this variable to write the each image in the video for a specific duration. For this use case, you have to keep the fps as 1.0, so each video frame will be displayed for 1.0 sec. This makes the 3rd argument in cv2.Videowriter.
import cv2
import os
image_folder = 'images'
video_name = 'video.avi'
each_image_duration = 5 # in secs
fourcc = cv2.VideoWriter_fourcc(*'XVID') # define the video codec
images = [img for img in os.listdir(image_folder) if img.endswith(".png")]
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, fourcc, 1.0, (width, height))
for image in images:
for _ in range(each_image_duration):
video.write(cv2.imread(os.path.join(image_folder, image)))
cv2.destroyAllWindows()
video.release()
Here you can calculate fps dynamically based on how many frames you have and how many seconds of video you want to make.
See code below:
import cv2
import os
image_folder = 'images'
video_name = 'video.avi'
fourcc = cv2.VideoWriter_fourcc(*'XVID') # define the video codec
images = [img for img in os.listdir(image_folder) if img.endswith(".png")]
img_count = len(images)
video_secs = 10
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, fourcc, float(img_count/video_secs), (width, height))
for image in images:
video.write(image)
cv2.destroyAllWindows()
video.release()
I am trying to get my Raspberry Pi B+ to use a USB webcam to measure distances between it and an object of fixed width (11.0 inches).
I am following this guide now. However, instead of using static images, I am using a video feed from my webcam.
This is the code I am trying to run:
import argparse
import datetime
import imutils
import time
import cv2
import numpy as np
def find_marker(frame):
# convert the image to grayscale, blur it, and detect edges
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 35, 125)
# find the contours in the edged image and keep the largest one;
# we'll assume that this is our piece of paper in the image
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
c = max(cnts, key = cv2.contourArea)
# compute the bounding box of the of the paper region and return it
return cv2.minAreaRect(c)
def distance_to_camera(knownWidth, focalLength, perWidth):
# compute and return the distance from the maker to the camera
return (knownWidth * focalLength) / perWidth
#======================================================================
#main is here
# initialize the known distance from the camera to the object, which
# in this case is 24 inches
KNOWN_DISTANCE = 24.0
# initialize the known object width, which in this case, the piece of
# paper is 11 inches wide
KNOWN_WIDTH = 11.0
frame = cv2.VideoCapture(0)
marker = find_marker(frame)
focalLength = (marker[1][0] * KNOWN_DISTANCE) / KNOWN_WIDTH
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())
# if the video argument is None, then we are reading from webcam
if args.get("video", None) is None:
camera = cv2.VideoCapture(0)
time.sleep(0.25)
# otherwise, we are reading from a video file
else:
camera = cv2.VideoCapture(args["video"])
# loop over the frames of the video
while True:
# grab the current frame and initialize the occupied/unoccupied
# text
(grabbed, frame) = camera.read()
# if the frame could not be grabbed, then we have reached the end
# of the video
if not grabbed:
break
# resize the frame, convert it to grayscale, and blur it
frame = imutils.resize(frame, width=500)
marker = find_marker(frame)
inches = distance_to_camera(KNOWN_WIDTH, focalLength, marker[1][0])
# draw a bounding box around the image and display it
box = np.int0(cv2.cv.BoxPoints(marker))
cv2.drawContours(frame, [box], -1, (0, 255, 0), 2)
cv2.putText(frame, "%.2fft" % (inches / 12),
(frame.shape[1] - 200, frame.shape[0] - 20), cv2.FONT_HERSHEY_SIMPLEX,
2.0, (0, 255, 0), 3)
cv2.imshow("Frame", frame)
cv2.waitKey(0)
However, this is the output I get when I try to run it:
Traceback (most recent call last):
File "testcam.py", line 39, in <module>
marker = find_marker(frame)
File "testcam.py", line 10, in find_marker
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
TypeError: src is not a numpy array, neither a scalar
I am new to opencv, so I am unsure what this error means..
The thing you are doing is
frame = cv2.VideoCapture(0)
cv2.VideoCapture(0)
initialize the capture device or the camera device to fetch a frame from that you need to call cap.read() but instead you passed the capture object that gave the error
Which Should be
capForFocal = cv2.VideoCapture(0)
_,frame=capForFocal.read()
capForFocal.release()