Python opencv subprocess write return broken pipe - python

I want to read an rtsp video source, add overlay text and push it to the RTMP endpoint.I am using Videocapture to read the video source and python subprocess to write the frames back to RTMP endpoint. I referred this FFmpeg stream video to rtmp from frames OpenCV python
import sys
import subprocess
import cv2
import ffmpeg
rtmp_url = "rtmp://127.0.0.1:1935/live/test"
path = 0
cap = cv2.VideoCapture("rtsp://10.0.1.7/media.sdp")
# gather video info to ffmpeg
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
command = ['ffmpeg', '-i', '-', "-c", "copy", '-f', 'flv', rtmp_url]
p = subprocess.Popen(command, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
font = cv2.FONT_HERSHEY_SIMPLEX
while cap.isOpened():
ret, frame = cap.read()
cv2.putText(frame, 'TEXT ON VIDEO', (50, 50), font, 1, (0, 255, 255), 2, cv2.LINE_4)
cv2.imshow('video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
if not ret:
print("frame read failed")
break
try:
p.stdin.write(frame.tobytes())
except Exception as e:
print (e)
cap.release()
p.stdin.close()
p.stderr.close()
p.wait()
The python script returns "[Errno 32] Broken pipe". Running the ffmpeg command in the terminal works fine.
ffmpeg -i rtsp://10.0.1.7/media.sdp -c copy -f flv
rtmp://127.0.0.1:1935/live/test
The above command works fine, and I can push the input stream to RTMP endpoint. But I can't write processed frame to subprocess which has ffmpeg running.
Please let me know, if I miss anything.

You can't use "-c", "copy" when writing raw frames to stdin PIPE.
The frame returned by ret, frame = cap.read() is a uint8 NumPy array in BGR color format (cap.read() decodes the video and converts the color format).
In FFmpeg terminology, the frame format is "rawvideo".
The command should tell FFmpeg to expect raw video as input, with specific size and pixel format:
command = ['ffmpeg', '-f', 'rawvideo', '-s', f'{width}x{height}', '-pixel_format', 'bgr24', ...
Because the input is raw video, we have to re-encode it.
We may specify the encoded pixel format and video codec:
'-pix_fmt', 'yuv420p', '-c:v', 'libx264' ....
Remarks:
Decoding and re-encoding the video looses some quality (but there is no choice).
The suggested solution looses the audio (there are solutions that preserves the audio, but OpenCV lack audio support).
The posted solution reuses some code from the following post.
Few FFmpeg arguments are taken with no explanation (like '-bufsize', '64M').
Executing a listener application:
The RTMP streaming, is not going to work, without a "listener" that receives the video.
The listener should be started before starting the RTMP streaming (due to TCP usage).
We may use FFplay sub-process as "listener" application:
ffplay_process = sp.Popen(['ffplay', '-listen', '1', '-i', rtmp_url])
Streaming synthetic video frames:
Start with a simpler code sample, that streams synthetic frames (without capturing RTSP video).
The following "self contained" code sample writes yellow text on gray background, and passes the frames to FFmpeg for RTMP streaming:
import cv2
import numpy as np
import subprocess as sp
width = 320
height = 240
fps = 5
rtmp_url = "rtmp://127.0.0.1:1935/live/test"
# Start the TCP server first, before the sending client.
ffplay_process = sp.Popen(['ffplay', '-listen', '1', '-i', rtmp_url]) # Use FFplay sub-process for receiving the RTMP video.
command = ['ffmpeg',
'-re',
'-f', 'rawvideo', # Apply raw video as input
'-s', f'{width}x{height}',
'-pixel_format', 'bgr24',
'-r', f'{fps}',
'-i', '-',
'-pix_fmt', 'yuv420p',
'-c:v', 'libx264',
'-bufsize', '64M',
'-maxrate', '4M',
'-f', 'flv',
rtmp_url]
process = sp.Popen(command, stdin=sp.PIPE) # Execute FFmpeg sub-process for RTSP streaming
frame_counter = 0;
while True:
# Build sythetic frame in BGR color format (3D NumPy array).
frame = np.full((height, width, 3), 60, np.uint8)
cv2.putText(frame, 'TEXT ON VIDEO ' + str(frame_counter), (20, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_4) # Put a frame counter for showing progress.
process.stdin.write(frame.tobytes()) # Write raw frame to stdin pipe.
cv2.imshow('frame', frame) # Show frame for testing
key = cv2.waitKey(int(round(1000/fps))) # We need to call cv2.waitKey after cv2.imshow
if key == ord('q'): # Press 'q' for exit
break
frame_counter += 1
process.stdin.close() # Close stdin pipe
process.wait() # Wait for FFmpeg sub-process to finish
ffplay_process.kill() # Forcefully close FFplay sub-process
cv2.destroyAllWindows() # Close OpenCV window
Output sample:
Capturing video frames from RTSP stream.
The following code sample captures video frames from a public RTSP stream, writes text, and passes the frames to FFmpeg for RTMP streaming:
import cv2
import numpy as np
import subprocess as sp
# Use public RTSP Streaming for testing.
rtsp_stream = "rtsp://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov"
rtmp_url = "rtmp://127.0.0.1:1935/live/test"
cap = cv2.VideoCapture(rtsp_stream)
# gather video info to ffmpeg
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Start the TCP server first, before the sending client.
ffplay_process = sp.Popen(['ffplay', '-listen', '1', '-i', rtmp_url]) # Use FFplay sub-process for receiving the RTMP video.
command = ['ffmpeg',
'-re',
'-f', 'rawvideo', # Apply raw video as input
'-s', f'{width}x{height}',
'-pixel_format', 'bgr24',
'-r', f'{fps}',
'-i', '-',
'-pix_fmt', 'yuv420p',
'-c:v', 'libx264',
'-bufsize', '64M',
'-maxrate', '4M',
'-f', 'flv',
rtmp_url]
process = sp.Popen(command, stdin=sp.PIPE) # Execute FFmpeg sub-process for RTSP streaming
frame_counter = 0;
while cap.isOpened():
# Read frame from RTSP stream.
ret, frame = cap.read()
if not ret:
print("frame read failed")
break
cv2.putText(frame, 'TEXT ON VIDEO', (0, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_4)
cv2.putText(frame, str(frame_counter), (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 2, cv2.LINE_4)
process.stdin.write(frame.tobytes()) # Write raw frame to stdin pipe.
cv2.imshow('frame', frame) # Show frame for testing
key = cv2.waitKey(1) # We need to call cv2.waitKey after cv2.imshow
if key == ord('q'): # Press 'q' for exit
break
frame_counter += 1
cap.release()
process.stdin.close() # Close stdin pipe
process.wait() # Wait for FFmpeg sub-process to finish
ffplay_process.kill() # Forcefully close FFplay sub-process
cv2.destroyAllWindows() # Close OpenCV window
Output sample:

Related

ffmpeg piped to python and displayed with cv2.imshow slides rightward and changes colors

Code:
import cv2
import time
import subprocess
import numpy as np
w,h = 1920, 1080
fps = 15
def ffmpegGrab():
"""Generator to read frames from ffmpeg subprocess"""
cmd = f'.\\Resources\\ffmpeg.exe -f gdigrab -framerate {fps} -offset_x 0 -offset_y 0 -video_size {w}x{h} -i desktop -pix_fmt bgr24 -vcodec rawvideo -an -sn -f image2pipe -'
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
raw_frame = proc.stdout.read(w*h*3)
frame = np.fromstring(raw_frame, np.uint8)
frame = frame.reshape((h, w, 3))
yield frame
# Get frame generator
gen = ffmpegGrab()
# Get start time
start = time.time()
# Read video frames from ffmpeg in loop
nFrames = 0
while True:
# Read next frame from ffmpeg
frame = next(gen)
nFrames += 1
frame = cv2.resize(frame, (w // 4, h // 4))
cv2.imshow('screenshot', frame)
if cv2.waitKey(1) == ord("q"):
break
fps = nFrames/(time.time()-start)
print(f'FPS: {fps}')
cv2.destroyAllWindows()
The code does display the desktop capture however the color format seems to switch and the video scrolls rightward as if it is repeated. Am I going about this in the correct way?
The cause for the problem is: stderr=subprocess.STDOUT.
The argument stderr=subprocess.STDOUT redirects stderr to stdout.
stdout is used as a PIPE for reading the output video from FFmpeg sub-process.
FFmpeg writes some text to stderr and the text is "mixed" with the raw video (due to the redirection). The "mixing" causes weird slides and colors changes.
Replace proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) with:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
Minor corrections:
As Mark Setchell commended, use np.frombuffer() instead of np.fromstring() and avoid shell=True.
Replace -f image2pipe with -f rawvideo.
The output format is raw video and not an image (the code is working with image2pipe, but rawvideo is more correct).
Complete updated code:
import cv2
import time
import subprocess
import numpy as np
w,h = 1920, 1080
fps = 15
def ffmpegGrab():
"""Generator to read frames from ffmpeg subprocess"""
# Use "-f rawvideo" instead of "-f image2pipe" (command is working with image2pipe, but rawvideo is the correct format).
cmd = f'.\\Resources\\ffmpeg.exe -f gdigrab -framerate {fps} -offset_x 0 -offset_y 0 -video_size {w}x{h} -i desktop -pix_fmt bgr24 -vcodec rawvideo -an -sn -f rawvideo -'
#proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
# Don't use stderr=subprocess.STDOUT, and don't use shell=True
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
while True:
raw_frame = proc.stdout.read(w*h*3)
frame = np.frombuffer(raw_frame, np.uint8) # Use frombuffer instead of fromarray
frame = frame.reshape((h, w, 3))
yield frame
# Get frame generator
gen = ffmpegGrab()
# Get start time
start = time.time()
# Read video frames from ffmpeg in loop
nFrames = 0
while True:
# Read next frame from ffmpeg
frame = next(gen)
nFrames += 1
frame = cv2.resize(frame, (w // 4, h // 4))
cv2.imshow('screenshot', frame)
if cv2.waitKey(1) == ord("q"):
break
fps = nFrames/(time.time()-start)
print(f'FPS: {fps}')
cv2.destroyAllWindows()

pipe compressed stream to another process using ffmpeg subprocess

I want to get compressed data from camera and send it to client side before converting it to frames. I have created a dummy code. but i am getting error that cannot reshape array of 0 byte
import cv2
import subprocess as sp
import numpy
IMG_W = 640
IMG_H = 480
FFMPEG_BIN = "/usr/bin/ffmpeg"
ffmpeg_cmd = [ FFMPEG_BIN,
'-i', 'h264.h264',
'-vcodec', 'h264', # disable audio processing
'-f', 'image2pipe', '-']
ffmpeg_cmd2 = [ FFMPEG_BIN,
'-i','image2pipe',
'-r', '30', # FPS
'-pix_fmt', 'bgr24', # opencv requires bgr24 pixel format.
'-vcodec', 'rawvideo',
'-an','-sn', # disable audio processing
'-f', 'image2pipe', '-']
pipe = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, bufsize=10)
pipe2 = sp.Popen(ffmpeg_cmd2, stdin = pipe.stdout,stdout = sp.PIPE, bufsize=10)
while True:
raw_image = pipe2.stdout.read(IMG_W*IMG_H*3)
image = numpy.fromstring(raw_image, dtype='uint8') # convert read bytes to np
image = image.reshape((IMG_H,IMG_W,3))
cv2.imshow('Video', image)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
pipe.stdout.flush()
cv2.destroyAllWindows()

Pipe OpenCV and PyAudio to ffmpeg streaming youtube rtmp from python

How can I pipe openCV and PyAudio to ffmpeg streaming youtube rtmp from python.
The error message shows as following:
No such filter: 'pipe:1'
pipe:1: Invalid argument
Here is my code:
Import module
import cv2
import subprocess
import pyaudio
Audio
p = pyaudio.PyAudio()
info = p.get_host_api_info_by_index(0)
numdevices = info.get('deviceCount')
for i in range(0, numdevices):
if (p.get_device_info_by_host_api_device_index(0, i).get('maxInputChannels')) > 0:
print("Input Device id ", i, " - ", p.get_device_info_by_host_api_device_index(0, i).get('name'))
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
stream = p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
# Stream Audio data here
# data = stream.read(CHUNK)
Video
rtmp = r'rtmp://a.rtmp.youtube.com/live2/key'
cap = cv2.VideoCapture(0)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = 30
command param here
command = ['ffmpeg',
'-y',
'-f', 'rawvideo',
'-pixel_format', 'bgr24',
'-video_size', "{}x{}".format(width, height),
'-framerate', str(fps),
'-i', 'pipe:0',
'-re',
'-f', 'lavfi',
'-i', 'pipe:1',
'-c:v', 'libx264',
'-c:a', 'aac',
'-vf', 'format=yuv420p',
'-f', 'flv',
rtmp]
Create subprocess to ffmpeg command
pipe = subprocess.Popen(command, shell=False, stdin=subprocess.PIPE
)
while cap.isOpened():
success, frame = cap.read()
if success:
if cv2.waitKey(1) & 0xFF == ord('q'):
break
pipe.stdin.write(frame.tostring())
pipe.stdin.write(stream.read(CHUNK))
Audio Stop
stream.stop_stream()
stream.close()
p.terminate()
Video Stop
cap.release()
pipe.terminate()
Thanks
I have successfully used CreateNamePipe to create for video and use stdin for audio.
PIPE_NAME = r'\\.\pipe\VideoPipe'
video_pipe = win32pipe.CreateNamedPipe(
PIPE_NAME,
win32pipe.PIPE_ACCESS_OUTBOUND,
win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_READMODE_BYTE | win32pipe.PIPE_WAIT,
1, 65536, 65536,
0,
None)

Python gets stuck at pipe.stdin.write(image.tostring())

I am reading each frame of video and adding time stamp to it as given below.
command = ['ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-f', 'rawvideo', #Input is raw video
'-pix_fmt', 'bgr24', #Raw video format
'-s', str(int(width)) + 'x' + str(int(height)), # size of one frame
'-i', '-', # The input comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-vcodec', 'mpeg4',
'-b:v', '10M', #Sets a maximum bit rate
Output_name]
#Open the pipe
pipe = sp.Popen(command, stdin=sp.PIPE, stderr=sp.PIPE)
print('Processing....')
print(' ')
#Reads through each frame, calculates the timestamp, places it on the frame and exports the frame to the output video.
#import pdb
#pdb.set_trace()
while current_frame < total_frames:
success, image = video.read()
if success:
elapsed_time = video.get(cv2.CAP_PROP_POS_MSEC)
current_frame = video.get(cv2.CAP_PROP_POS_FRAMES)
timestamp = initial + dt.timedelta(microseconds = elapsed_time*1000)
cv2.putText(image, 'Date: ' + str(timestamp)[0:10], (50,int(height-150)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 3)
cv2.putText(image, 'Time: ' + str(timestamp)[11:-4], (50,int(height-100)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 3)
pipe.stdin.write(image.tostring())
print('frame number',current_frame)
else:
print('video reader fail')
video.release()
pipe.stdin.close()
pipe.stderr.close()
However, after around 18k frames, Python gets stuck at 'pipe.stdin.write(image.tostring())'. It does not produce any error, but simply hangs. How to resolve this issue?
Thanks in advance.
I think I solved the puzzle:
stderr buffer is filled up and the process gets stuck.
I managed to reproduce the problem under Windows 10.
FFmpeg writes statuses to stderr from time to time.
You are using stderr=sp.PIPE, but not reading from stderr.
After encoding many frames, the stderr buffer filled up, and the process gets stuck.
You may either remove stderr=sp.PIPE, or making sure to read the data from stderr.
Reading data from stderr may be preformed using a Thread:
# Read from pipe.stdrr for "draining the pipe"
def drain_stderr():
while True:
try:
stderr_output = pipe.stderr.readline()
except:
pass
I created a "self contained" code sample that generates synthetic video file, and executes the code using the synthetic video as input.
Here is the testing code sample:
import numpy as np
import cv2
import subprocess as sp
import threading
import datetime as dt
# Generate synthetic video file - resolution 640x480, 30000 frames, 1 fps
# H.264 encoded video (for testing):
#########################################################################
input_name = 'test.mp4'
width, height = 640, 480
total_frames = 30000
sp.run('ffmpeg -y -f lavfi -i testsrc=size={}x{}:rate=1 -vcodec libx264 -crf 23 -t {} {}'.format(width, height, total_frames, input_name))
#########################################################################
# Read from pipe.stdrr for "draining the pipe"
def drain_stderr():
while keep_drain_stderr:
try:
stderr_output = pipe.stderr.readline()
except:
pass
Output_name = 'out.mp4'
command = ['ffmpeg',
'-y', # (optional) overwrite output file if it exists
'-f', 'rawvideo', #Input is raw video
'-pix_fmt', 'bgr24', #Raw video format
'-s', str(int(width)) + 'x' + str(int(height)), # size of one frame
'-i', '-', # The input comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-vcodec', 'mpeg4',
'-b:v', '10M', #Sets a maximum bit rate
Output_name]
# Open the pipe
pipe = sp.Popen(command, stdin=sp.PIPE, stderr=sp.PIPE)
keep_drain_stderr = True
thread = threading.Thread(target=drain_stderr)
thread.start()
# Open video file for reading
video = cv2.VideoCapture(input_name)
print('Processing....')
print(' ')
#Reads through each frame, calculates the timestamp, places it on the frame and exports the frame to the output video.
#import pdb
#pdb.set_trace()
initial = dt.timedelta(microseconds=0*1000)
current_frame = 0
while current_frame < total_frames:
success, image = video.read()
if success:
elapsed_time = video.get(cv2.CAP_PROP_POS_MSEC)
current_frame = video.get(cv2.CAP_PROP_POS_FRAMES)
timestamp = initial + dt.timedelta(microseconds=elapsed_time*1000)
cv2.putText(image, 'Date: ' + str(timestamp)[0:10], (50,int(height-150)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 3)
cv2.putText(image, 'Time: ' + str(timestamp)[11:-4], (50,int(height-100)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2, (255, 255, 255), 3)
pipe.stdin.write(image.tostring())
print('frame number', current_frame)
else:
print('video reader fail')
keep_drain_stderr = False
video.release()
pipe.stdin.close()
pipe.stderr.close()
#Wait 3 seconds before killing FFmpeg
try:
pipe.wait(3)
except (sp.TimeoutExpired):
pipe.kill()
thread.join()

zbar not working on netcat video stream from raspberry pi read using OpenCV

I am streaming video feed from raspberry pi using netcam to my PC and using zbar to read qr codes i the feed.
I am reading the named pipe using ffmpeg:
FFMPEG_BIN = "ffmpeg"
command = [ FFMPEG_BIN,
'-i', 'fifo264', # fifo is the named pipe
'-pix_fmt', 'gray', # opencv requires bgr24 pixel format.
'-vcodec', 'rawvideo',
'-an','-sn', # we want to disable audio processing (there is no audio)
'-f', 'image2pipe', '-']
pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)
zbar is showing no output in this case.
Complete code:
import cv2
import subprocess as sp
import numpy
import zbar
from PIL import Image
FFMPEG_BIN = "ffmpeg"
command = [ FFMPEG_BIN,
'-i', 'fifo264', # fifo is the named pipe
'-pix_fmt', 'gray', # opencv requires bgr24 pixel format.
'-vcodec', 'rawvideo',
'-an','-sn', # we want to disable audio processing (there is no audio)
'-f', 'image2pipe', '-']
pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)
while True:
# Capture frame-by-frame
raw_image = pipe.stdout.read(1920*1088)
# transform the byte read into a numpy array
image = numpy.fromstring(raw_image, dtype='uint8')
image = image.reshape((1088,1920)) # Notice how height is specified first and then width
if image is not None:
cv2.imshow('Video', image)
image = Image.fromarray(image)
width, height = image.size
zbar_image = zbar.Image(width, height, 'Y800', image.tostring())
# Scans the zbar image.
scanner = zbar.ImageScanner()
scanner.scan(zbar_image)
# Prints data from image.
for decoded in zbar_image:
print(decoded.data)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
pipe.stdout.flush()
cv2.destroyAllWindows()

Categories