GIF only shows the first frame - PIL - python

from PIL import Image, ImageSequence
dye = Image.open(gif_filename)
img = Image.open(jpg_filename)
frames = []
for frame in ImageSequence.Iterator(dye):
frame = frame.convert("RGBA").copy()
frame.paste(img, (0,0), img)
frames.append(frame)
frames[0].save('character.gif', save_all=True, append_images=frames[1:])
character = Image.open('character.gif')
character is supposed to be a gif, but it just returns the first frame of the gif.

Related

How to append an image to a video using OpenCV or FFMPEG or Moviepy or other libraries?

Do you know a library in Python to add a frame image to an existing video? The result video must have the same quality as the image.
I tried to use OpenCV to add google image: https://www.google.com/search?q=google&sxsrf=ALiCzsZhrdoHnOTmg0We4dxtguCqzma5Jg:1657603343101&source=lnms&tbm=isch&sa=X&ved=2ahUKEwiTh8bTzfL4AhWhplYBHfXNAKwQ_AUoAXoECAIQAw&biw=1492&bih=739&dpr=1.25#imgrc=PRtenhDnrVrfOM
But the quality decreases when the video elongates.
Here is the final result video : https://drive.google.com/file/d/1ArDvoX-kN9H_oLbACk3kU1Cid93SMczC/view?usp=sharing
Here is my code using OpenCV:
image = cv2.imread(path_image)
height, width, dimensions = image.shape
video = cv2.VideoCapture(path_video)
frames = []
while(True):
ret, frame = video.read()
if ret == True:
frames.append(frame)
# frame = frame.resize(frame, (width, height), fx=0, fy=0, interpolation = cv2.INTER_CUBIC)
# Press S on keyboard
# to stop the process
if cv2.waitKey(1) & 0xFF == ord('s'):
break
# Break the loop
else:
break
video2 = cv2.VideoWriter(path_video,cv2.VideoWriter_fourcc('M','J','P','G'), 30, (width, height))
for frame in frames:
video2.write(frame)
video2.write(image)
video2.release() # releasing the video generated
print("Added {}".format(image_name))
I hope to improve the quality of this video.
I guess a simple way to achieve that using moviepy would be the following:
from moviepy.editor import *
from PIL import Image
import numpy as np
maxsize = (target_width, target_height)
jpg_image = Image.open(path_to_the_image)
# modify the image's resolution to be the target one
jpg_image.thumbnail(maxsize, Image.ANTIALIAS)
jpg_image = np.array(jpg_image)
image = ImageClip(jpg_image).set_duration(target_duration)
clip = VideoFileClip(path_to_the_video)
video = concatenate([image, clip], method="compose")
video.write_videofile("output_example.mp4", fps=target_fps)
As long as you set the target resolution of the image to match the video's one you are set with just that.

How to read grayscale img from a video with OpenCV?

I read all pictures from my pic directory and then convert them each to gray-scale with canny edge detections before writing it all to a video.
But, when I use my video software to play it, it shows a green background, and I can't read video frames from it. Could someone show me how to solve it?
Sample code
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
fourcc = cv.VideoWriter_fourcc(*"I420")
out = cv.VideoWriter("t2.avi", fourcc, 1, (640, 480), 0)
for pic in glob.glob1("./pic/", "A*"):
img = cv.imread(f"./pic/{pic}", -1)
edge = cv.Canny(img, 100, 200)
edge = cv.resize(edge, (640, 480))
out.write(edge)
out.release()
# Cant read video frame here:
cap = cv.VideoCapture("t2.avi")
ret, frame = cap.read()
if ret:
plt.imshow(frame)
else:
print("end")
cap.release()
Video plays with green background
It looks like a compatibility issue between I420 FOURCC and Grayscale format.
Replace fourcc = cv.VideoWriter_fourcc(*"I420") with:
fourcc = cv.VideoWriter_fourcc(*"GREY")
Note:
I am using OpenCV 4.5.5 in Windows 10, and it's working with "GREY".
I am not sure it's going to work in all platforms and versions.
I420 applies colored video.
You may use I420 with colored video:
Replace out = cv.VideoWriter("t2.avi", fourcc, 1, (640, 480), 0) with:
out = cv.VideoWriter("t2.avi", fourcc, 1, (640, 480), 1)
Convert edge to BGR before writing:
edge = cv.cvtColor(edge, cv.COLOR_GRAY2BGR)
out.write(edge)
Code sample using "GREY" FOURCC:
import numpy as np
import cv2 as cv
#import matplotlib.pyplot as plt
import glob
#fourcc = cv.VideoWriter_fourcc(*"I420")
fourcc = cv.VideoWriter_fourcc(*"GREY")
out = cv.VideoWriter("t2.avi", fourcc, 1, (640, 480), 0)
for pic in glob.glob1("./pic/", "A*"):
img = cv.imread(f"./pic/{pic}", -1)
edge = cv.Canny(img, 100, 200)
edge = cv.resize(edge, (640, 480))
out.write(edge)
out.release()
# Cant read video frame here:
cap = cv.VideoCapture("t2.avi")
while True:
ret, frame = cap.read()
if ret:
#plt.imshow(frame)
cv.imshow('frame', frame)
cv.waitKey(1000)
else:
print("end")
cap.release()
break
cv.destroyAllWindows()

How to get the frame_width of an image with OpenCV?

I try to get the frame_width of an image with the following code:
"""Predict the gender of the faces showing in the image"""
# Read Input Image
img = cv2.imread(imag)
# resize the image, uncomment if you want to resize the image
img = cv2.resize(img, (frame_width, frame_height))
# Take a copy of the initial image and resize it
frame = img.copy()
print(frame.shape[1])
if frame.shape[1] > frame_width:
frame = image_resize(frame, width=frame_width)
# predict the faces
faces = get_faces(frame)
Following my main method were I call the method with an image:
if __name__ == '__main__':
predict_gender("/Users/$$$/Downloads/test.jpg")
I already tried to import tkinter because I thought I need tkinter for the frame but it was also not working. Here is my current error:
<ipython-input-10-2d047afa91e4> in predict_gender(imag)
4 img = cv2.imread(imag)
5 # resize the image, uncomment if you want to resize the image
----> 6 img = cv2.resize(img, (frame_width, frame_height))
7 # Take a copy of the initial image and resize it
8 frame = img.copy()
NameError: name 'frame_width' is not defined
You need to define frame_width and frame_height.
img = cv2.imread(imag)
frame_height, frame_width, _ = img.shape
# resize the image, uncomment if you want to resize the image
img = cv2.resize(img, (frame_width, frame_height))

Write 2 images into a video using OpenCV (Python) of 10 seconds duration

I am trying to take 2 images present in a local folder and use OpenCV's Videowriter function to create a video from these images.
I am using FrameRate of 1. So this creates a video of 2 seconds duration. Below is the code (I got it from here):
import cv2
import os
image_folder = 'images'
video_name = 'video.avi'
images = [img for img in os.listdir(image_folder) if img.endswith(".png")]
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, 0, 1, (width,height))
for image in images:
video.write(cv2.imread(os.path.join(image_folder, image)))
cv2.destroyAllWindows()
video.release()
Goal: I want to create a video of 10 seconds which will have these 2 images each being displayed for 5 seconds.
I think there might be a similar question like this in the forum but I am not able to find it. If someone can point me to the solution, it would be great.
Thank you.
The below code snippet should solve your problem. Notice that you will have to specify each_image_duration. I used this variable to write the each image in the video for a specific duration. For this use case, you have to keep the fps as 1.0, so each video frame will be displayed for 1.0 sec. This makes the 3rd argument in cv2.Videowriter.
import cv2
import os
image_folder = 'images'
video_name = 'video.avi'
each_image_duration = 5 # in secs
fourcc = cv2.VideoWriter_fourcc(*'XVID') # define the video codec
images = [img for img in os.listdir(image_folder) if img.endswith(".png")]
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, fourcc, 1.0, (width, height))
for image in images:
for _ in range(each_image_duration):
video.write(cv2.imread(os.path.join(image_folder, image)))
cv2.destroyAllWindows()
video.release()
Here you can calculate fps dynamically based on how many frames you have and how many seconds of video you want to make.
See code below:
import cv2
import os
image_folder = 'images'
video_name = 'video.avi'
fourcc = cv2.VideoWriter_fourcc(*'XVID') # define the video codec
images = [img for img in os.listdir(image_folder) if img.endswith(".png")]
img_count = len(images)
video_secs = 10
frame = cv2.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv2.VideoWriter(video_name, fourcc, float(img_count/video_secs), (width, height))
for image in images:
video.write(image)
cv2.destroyAllWindows()
video.release()

render jpg byte array sequence with python

I receive a jpg sequence from network and wish to draw it on screen.
I want to use turtle to draw it but now I only know turtle accept gif as background image but not jpg.
#!/usr/bin/env python
import turtle
image = "demo.gif"
screen = turtle.Screen()
screen.addshape(image)
turtle.shape(image)
turtle.mainloop()
How can I draw jpg from byte array to turtle? Or any other better framework to show video sequence?
opencv can do it perfect!
#!/usr/bin/env python
import cv2
import numpy as np
import time
def openfile(filename):
fd = open("test.jpg","rb")
frame = fd.read()
frame = np.array(bytearray(frame))
fd.close()
frame = cv2.imdecode(frame, 1)
return frame
def openfile2(filename):
frame = cv2.imread(filename)
return frame
frame = openfile("test.jpg")
title = "jpeg"
cv2.namedWindow(title,cv2.WINDOW_AUTOSIZE);
cv2.imshow(title,frame)
cv2.waitKey(0)
cv2.destroyAllWindows()

Categories