Gstreamer leaky queue stops the pipeline - python

The following code get in inputs a list of url and it create a Gstreamer pipeline. Specifically, for each url, a uridecodebin element is initialized and attached to a queue element. Each queue has the following properties: leaky=2, max-size-buffers=1 and flush-on-eos=1. When I start the pipeline, I can see from nvidia-smi dmon that some video is decoded (NVDEC is used). After a second, everything stops.
I would expect that decoding to keep going, and to push frames into the queue with each queue dropping the old frame every time it receives a new one. Am I wrong?
Code to initialize a Bin to decode the video (source_bin.py). You probably don't need to read it, but here it is:
import sys
from gi.repository import Gst
from pipeline.utils.pipeline import create_gst_elemement
class SourceBin:
#classmethod
def create_source_bin(cls, index: int, uri: str):
# Create a source GstBin to abstract this bin's content from the rest of the pipeline
bin_name = "source-bin-%02d" % index
nbin = Gst.Bin.new(bin_name)
if not nbin:
sys.stderr.write(" Unable to create source bin \n")
# Source element for reading from the uri.
# We will use decodebin and let it figure out the container format of the
# stream and the codec and plug the appropriate demux and decode plugins.
uri_decode_bin = create_gst_elemement("uridecodebin", f"uridecodebin_{index}")
# We set the input uri to the source element
uri_decode_bin.set_property("uri", uri)
# Connect to the "pad-added" signal of the decodebin which generates a
# callback once a new pad for raw data has been created by the decodebin
uri_decode_bin.connect("pad-added", cls.callback_newpad, nbin)
uri_decode_bin.connect("pad-removed", cls.callback_pad_removed, nbin)
uri_decode_bin.connect("no-more-pads", cls.callback_no_more_pads, nbin)
uri_decode_bin.connect("child-added", cls.decodebin_child_added, nbin)
# We need to create a ghost pad for the source bin which will act as a proxy
# for the video decoder src pad. The ghost pad will not have a target right
# now. Once the decode bin creates the video decoder and generates the
# cb_newpad callback, we will set the ghost pad target to the video decoder
# src pad.
Gst.Bin.add(nbin, uri_decode_bin)
bin_pad = nbin.add_pad(
Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC)
)
if not bin_pad:
sys.stderr.write(" Failed to add ghost pad in source bin \n")
return None
# Connect bus
return nbin
#classmethod
def callback_newpad(cls, uridecodebin, uridecodebin_new_src_pad, data):
print(f"SourceBin: added pad {uridecodebin_new_src_pad.name} to {uridecodebin.name}")
caps = uridecodebin_new_src_pad.get_current_caps()
gststruct = caps.get_structure(0)
gstname = gststruct.get_name()
source_bin = data
features = caps.get_features(0)
# Need to check if the pad created by the decodebin is for video and not audio.
if gstname.find("video") != -1:
# Link the decodebin pad only if decodebin has picked nvidia decoder plugin nvdec_*.
# We do this by checking if the pad caps contain NVMM memory features.
if features.contains("memory:NVMM"):
# Get the source bin ghost pad
bin_ghost_pad = source_bin.get_static_pad("src")
if not bin_ghost_pad.set_target(uridecodebin_new_src_pad):
sys.stderr.write(
"Failed to link decoder src pad to source bin ghost pad\n"
)
else:
sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")
#classmethod
def decodebin_child_added(cls, child_proxy, Object, name, user_data):
if name.find("decodebin") != -1:
Object.connect("child-added", cls.decodebin_child_added, user_data)
#classmethod
def callback_pad_removed(cls, uridecodebin, uridecodebin_removed_src_pad, data):
print(f"SourceBin: Removed pad {uridecodebin_removed_src_pad.name} from {uridecodebin.name}")
#classmethod
def callback_no_more_pads(cls, uridecodebin, data):
print(f"SourceBin: No more pads for {uridecodebin.name}")
Pipeline:
import sys
sys.path.append("../")
import gi
gi.require_version("Gst", "1.0")
gi.require_version("GstRtspServer", "1.0")
from gi.repository import GObject, Gst
from source_bin import SourceBin
def create_gst_elemement(factory_name, instance_name):
element = Gst.ElementFactory.make(factory_name, instance_name)
if not element:
sys.stderr.write(f" Unable to create {factory_name} {instance_name} \n")
return element
ai_rules = fususcore_api.get_ai_rules()
urls = [
"my rtsp url..."
]
GObject.threads_init()
Gst.init(None)
pipeline = Gst.Pipeline()
source_bins = [
SourceBin.create_source_bin(i, url)
for i, url in enumerate(urls)
]
frames_queues = list()
for i in range(len(source_bins)):
frames_queue = create_gst_elemement("queue", f"frame_queue_{i}")
frames_queue.set_property("leaky", 2)
frames_queue.set_property("max-size-buffers", 1)
frames_queue.set_property("flush-on-eos", 1)
frames_queues.append(frames_queue)
for source_bin, frames_queue in zip(source_bins, frames_queues):
pipeline.add(source_bin)
pipeline.add(frames_queue)
source_bin.link(frames_queue)
# loop = GObject.MainLoop()
# bus = pipeline.get_bus()
# bus.add_signal_watch()
# bus.connect("message", bus_call, loop)
pipeline.set_state(Gst.State.PLAYING)
For your convenience, you can find a graph of the pipeline: https://drive.google.com/file/d/1Vu8DR1Puam14k-fUKVBW2bG1gN4AgSPm/view?usp=sharing . Everything you see on the left of a queue is the Bin to decode the video. Each row represent a stream and it's identical to the other ones.

Related

Pipeline works with "nveglglessink", but not with "ximagesink"

I am really new to GStreamer and DeepStream. I have a pipeline created based on deepstream-test1. This is the order in which the elements appeared:
filesrc (an h264) -> h264parse -> nvv4l2decoder -> nvstreammux ->
nvinfer -> nvvideoconvert -> nvdsosd -> nvegltransform ->
nveglglessink
This works fine as intended. Heres the code:
import sys
sys.path.append('../')
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
import pyds
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
def osd_sink_pad_buffer_probe(pad,info,u_data):
frame_number=0
#Intiallizing object counter with 0.
obj_counter = {
PGIE_CLASS_ID_VEHICLE:0,
PGIE_CLASS_ID_PERSON:0,
PGIE_CLASS_ID_BICYCLE:0,
PGIE_CLASS_ID_ROADSIGN:0
}
num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.glist_get_nvds_frame_meta()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
#frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data)
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
l_obj=frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
#obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data)
obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)
try:
l_obj=l_obj.next
except StopIteration:
break
# Acquiring a display meta object. The memory ownership remains in
# the C code so downstream plugins can still access it. Otherwise
# the garbage collector will claim it when this probe function exits.
display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
display_meta.num_labels = 1
py_nvosd_text_params = display_meta.text_params[0]
# Setting display text to be shown on screen
# Note that the pyds module allocates a buffer for the string, and the
# memory will not be claimed by the garbage collector.
# Reading the display_text field here will return the C address of the
# allocated string. Use pyds.get_string() to get the string content.
py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])
# Now set the offsets where the string should appear
py_nvosd_text_params.x_offset = 10
py_nvosd_text_params.y_offset = 12
# Font , font-color and font-size
py_nvosd_text_params.font_params.font_name = "Serif"
py_nvosd_text_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
py_nvosd_text_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Using pyds.get_string() to get display_text as string
print(pyds.get_string(py_nvosd_text_params.display_text))
pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
try:
l_frame=l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
def main(args):
# Check input arguments
if len(args) != 2:
sys.stderr.write("usage: %s <media file or uri>\n" % args[0])
sys.exit(1)
# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)
# Create gstreamer elements
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
# Source element for reading from the file
print("Creating Source \n ")
source = Gst.ElementFactory.make("filesrc", "file-source")
if not source:
sys.stderr.write(" Unable to create Source \n")
# Since the data format in the input file is elementary h264 stream,
# we need a h264parser
print("Creating H264Parser \n")
h264parser = Gst.ElementFactory.make("h264parse", "h264-parser")
if not h264parser:
sys.stderr.write(" Unable to create h264 parser \n")
# Use nvdec_h264 for hardware accelerated decode on GPU
print("Creating Decoder \n")
decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder")
if not decoder:
sys.stderr.write(" Unable to create Nvv4l2 Decoder \n")
# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
# Use nvinfer to run inferencing on decoder's output,
# behaviour of inferencing is set through config file
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
# Use convertor to convert from NV12 to RGBA as required by nvosd
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv \n")
# Create OSD to draw on the converted RGBA buffer
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
# Finally render the osd output
if is_aarch64():
transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
print("Creating EGLSink \n")
sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
if not sink:
sys.stderr.write(" Unable to create egl sink \n")
print("Playing file %s " %args[1])
source.set_property('location', args[1])
streammux.set_property('width', 1920)
streammux.set_property('height', 1080)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', "dstest1_pgie_config.txt")
print("Adding elements to Pipeline \n")
pipeline.add(source)
pipeline.add(h264parser)
pipeline.add(decoder)
pipeline.add(streammux)
pipeline.add(pgie)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(sink)
if is_aarch64():
pipeline.add(transform)
# we link the elements together
# file-source -> h264-parser -> nvh264-decoder ->
# nvinfer -> nvvidconv -> nvosd -> video-renderer
print("Linking elements in the Pipeline \n")
source.link(h264parser)
h264parser.link(decoder)
sinkpad = streammux.get_request_pad("sink_0")
if not sinkpad:
sys.stderr.write(" Unable to get the sink pad of streammux \n")
srcpad = decoder.get_static_pad("src")
if not srcpad:
sys.stderr.write(" Unable to get source pad of decoder \n")
srcpad.link(sinkpad)
streammux.link(pgie)
pgie.link(nvvidconv)
nvvidconv.link(nvosd)
if is_aarch64():
nvosd.link(transform)
transform.link(sink)
else:
nvosd.link(sink)
# create an event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
# Lets add probe to get informed of the meta data generated, we add probe to
# the sink pad of the osd element, since by that time, the buffer would have
# had got all the metadata.
osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
sys.stderr.write(" Unable to get sink pad of nvosd \n")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
# start play back and listen to events
print("Starting pipeline \n")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleanup
pipeline.set_state(Gst.State.NULL)
if __name__ == '__main__':
sys.exit(main(sys.argv))
But whenever i try to replace nvegltransform -> nveglglessink of the pipeline with ximagesink i get an error saying:
Error: gst-stream-error-quark: Internal data stream error. (1):
/dvs/git/dirty/git-master_linux/deepstream/sdk/src/gst-plugins/gst-nvinfer/gstnvinfer.cpp(2288):
gst_nvinfer_output_loop ():
/GstPipeline:pipeline0/GstNvInfer:primary-inference: streaming
stopped, reason not-linked (-1)
I needed to remove nveglglessink from the pipeline originally, but as nvegltransform is related to nveglglessink i decided to remove both. And in place of them i used ximagesink.
This is the pipeline i am working on (which gives the error mentioned):
filesrc (an h264) -> h264parse -> nvv4l2decoder -> nvstreammux ->
nvinfer -> nvvideoconvert -> nvdsosd -> ximagesink
Heres the code:
import sys
sys.path.append('../')
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
import pyds
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
def osd_sink_pad_buffer_probe(pad,info,u_data):
frame_number=0
#Intiallizing object counter with 0.
obj_counter = {
PGIE_CLASS_ID_VEHICLE:0,
PGIE_CLASS_ID_PERSON:0,
PGIE_CLASS_ID_BICYCLE:0,
PGIE_CLASS_ID_ROADSIGN:0
}
num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.glist_get_nvds_frame_meta()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
#frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data)
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
l_obj=frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
#obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data)
obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)
try:
l_obj=l_obj.next
except StopIteration:
break
# Acquiring a display meta object. The memory ownership remains in
# the C code so downstream plugins can still access it. Otherwise
# the garbage collector will claim it when this probe function exits.
display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
display_meta.num_labels = 1
py_nvosd_text_params = display_meta.text_params[0]
# Setting display text to be shown on screen
# Note that the pyds module allocates a buffer for the string, and the
# memory will not be claimed by the garbage collector.
# Reading the display_text field here will return the C address of the
# allocated string. Use pyds.get_string() to get the string content.
py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])
# Now set the offsets where the string should appear
py_nvosd_text_params.x_offset = 10
py_nvosd_text_params.y_offset = 12
# Font , font-color and font-size
py_nvosd_text_params.font_params.font_name = "Serif"
py_nvosd_text_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
py_nvosd_text_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Using pyds.get_string() to get display_text as string
print(pyds.get_string(py_nvosd_text_params.display_text))
pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
try:
l_frame=l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
def main(args):
# Check input arguments
if len(args) != 2:
sys.stderr.write("usage: %s <media file or uri>\n" % args[0])
sys.exit(1)
# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)
# Create gstreamer elements
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
# Source element for reading from the file
print("Creating Source \n ")
source = Gst.ElementFactory.make("filesrc", "file-source")
if not source:
sys.stderr.write(" Unable to create Source \n")
# Since the data format in the input file is elementary h264 stream,
# we need a h264parser
print("Creating H264Parser \n")
h264parser = Gst.ElementFactory.make("h264parse", "h264-parser")
if not h264parser:
sys.stderr.write(" Unable to create h264 parser \n")
# Use nvdec_h264 for hardware accelerated decode on GPU
print("Creating Decoder \n")
decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder")
if not decoder:
sys.stderr.write(" Unable to create Nvv4l2 Decoder \n")
# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
# Use nvinfer to run inferencing on decoder's output,
# behaviour of inferencing is set through config file
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
# Use convertor to convert from NV12 to RGBA as required by nvosd
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv \n")
# Create OSD to draw on the converted RGBA buffer
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
# Finally render the osd output
# if is_aarch64():
# transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
# print("Creating EGLSink \n")
# sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
# if not sink:
# sys.stderr.write(" Unable to create egl sink \n")
print("Creating XIMAGESINK \n")
ximagesink = Gst.ElementFactory.make("ximagesink", "video-sink")
if not ximagesink:
sys.stderr.write(" Unable to create ximagesink \n")
print("Playing file %s " %args[1])
source.set_property('location', args[1])
streammux.set_property('width', 1920)
streammux.set_property('height', 1080)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', "dstest1_pgie_config.txt")
print("Adding elements to Pipeline \n")
pipeline.add(source)
pipeline.add(h264parser)
pipeline.add(decoder)
pipeline.add(streammux)
pipeline.add(pgie)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(ximagesink)
print("Linking elements in the Pipeline \n")
source.link(h264parser)
h264parser.link(decoder)
# Link elements manually as streammux donot have a static sink pad
decoder_srcpad = decoder.get_static_pad("src")
if not decoder_srcpad:
sys.stderr.write(" Unable to get source pad of decoder \n")
streammux_sinkpad = streammux.get_request_pad("sink_0")
if not streammux_sinkpad:
sys.stderr.write(" Unable to get the sink pad of streammux \n")
decoder_srcpad.link(streammux_sinkpad)
streammux.link(pgie)
pgie.link(nvvidconv)
nvvidconv.link(nvosd)
nvosd.link(ximagesink)
# create an event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
# Lets add probe to get informed of the meta data generated, we add probe to
# the sink pad of the osd element, since by that time, the buffer would have
# had got all the metadata.
osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
sys.stderr.write(" Unable to get sink pad of nvosd \n")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
# start play back and listen to events
print("Starting pipeline \n")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleanup
pipeline.set_state(Gst.State.NULL)
if __name__ == '__main__':
sys.exit(main(sys.argv))
Could you help me understand what I am doing wrong? Thank you.
NOTE: I am using NVIDIA JETSON XAVIER, LINUX, DeepStream 6 and GStreamer 1.0
You may use nvvideoconvert for copying from NVMM memory into system memory as expected by xvimagesink:
... nvstreammux -> nvinfer -> nvvideoconvert -> nvdsosd -> nvvideoconvert -> xvimagesink

gstreamer appsink can not get flvmux data

I use the gstreamer and python-gi to get encoded video stream data. My launch is like gst-launch-1.0 v4l2src device=/dev/video0 ! x264enc bitrate=1000 ! h264parse ! flvmux ! appsink.
Now I code in python like:
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstApp', '1.0')
from gi.repository import GObject, Gst, GstApp
GObject.threads_init()
Gst.init(None)
class Example:
def __init__(self):
self.mainloop = GObject.MainLoop()
self.pipeline = Gst.Pipeline()
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect('message::eos', self.on_eos)
self.bus.connect('message::error', self.on_error)
# Create elements
self.src = Gst.ElementFactory.make('v4l2src', None)
self.encoder = Gst.ElementFactory.make('x264enc', None)
self.parse = Gst.ElementFactory.make('h264parse', None)
self.mux = Gst.ElementFactory.make('flvmux', None)
self.sink = Gst.ElementFactory.make('appsink', None)
# Add elements to pipeline
self.pipeline.add(self.src)
self.pipeline.add(self.encoder)
self.pipeline.add(self.parse)
self.pipeline.add(self.mux)
self.pipeline.add(self.sink)
# Set properties
self.src.set_property('device', "/dev/video0")
# Link elements
self.src.link(self.encoder)
self.encoder.link(self.parse)
self.parse.link(self.mux)
self.mux.link(self.sink)
def run(self):
self.pipeline.set_state(Gst.State.PLAYING)
# self.mainloop.run()
appsink_sample = GstApp.AppSink.pull_sample(self.sink)
while True:
buff = appsink_sample.get_buffer()
size, offset, maxsize = buff.get_sizes()
frame_data = buff.extract_dup(offset, size)
print(frame_data)
def kill(self):
self.pipeline.set_state(Gst.State.NULL)
self.mainloop.quit()
def on_eos(self, bus, msg):
print('on_eos()')
self.kill()
def on_error(self, bus, msg):
print('on_error():', msg.parse_error())
self.kill()
example = Example()
example.run()
But I got the same data for every time, just like "FLV\0x01\0x01".
And than I use C languare to code the function, but I got the same result. Why? Could anyone help me?
print prints strings I assume? The buffer contains binary data. It just starts with data that resembles a string. So probably it starts with FLV\0x01\0x01\0x00.. followed by more binary data. String functions will treat 0x00 as the end marker of a string and will stop printing (as the print function does not take a size argument this is the agreement where your data ends). The size property should change though.. unless all the data has the same data chunk size.. but you need to find another function that prints your binary data - although I'm not sure if that is really what you want. Maybe you want to write this data to a file instead?
The problem is gstreamer's process. We should use signal function to get the stream data in appsink.

Python + GStreamer: scale video to window

I'm having some trouble rescaling video output of GStreamer to the dimension of the window the video is displayed in (retaining aspect ratio of the video). The problem is that I first need to preroll the video to be able to determine its dimensions by retrieving the negotiated caps, and then calculate the dimensions it needs to be displayed in to fit the window. Once I have prerolled the video and got the dimension caps, I cannot change the video's dimension anymore. Setting the new caps still results in the video being output in its original size. What must I do to solve this?
Just to be complete. In the current implementation I cannot render to an OpenGL texture which would have easily solved this problem because you could simply render output to the texture and scale it to fit the screen. I have to draw the output on a pygame surface, which needs to have the correct dimensions. pygame does offer functionality to scale its surfaces, but I think such an implementation (as I have now) is slower than retrieving the frames in their correct size directly from GStreamer (am I right?)
This is my code for loading and displaying the video (I omitted the main loop stuff):
def calcScaledRes(self, screen_res, image_res):
"""Calculate image size so it fits the screen
Args
screen_res (tuple) - Display window size/Resolution
image_res (tuple) - Image width and height
Returns
tuple - width and height of image scaled to window/screen
"""
rs = screen_res[0]/float(screen_res[1])
ri = image_res[0]/float(image_res[1])
if rs > ri:
return (int(image_res[0] * screen_res[1]/image_res[1]), screen_res[1])
else:
return (screen_res[0], int(image_res[1]*screen_res[0]/image_res[0]))
def load(self, vfile):
"""
Loads a videofile and makes it ready for playback
Arguments:
vfile -- the uri to the file to be played
"""
# Info required for color space conversion (YUV->RGB)
# masks are necessary for correct display on unix systems
_VIDEO_CAPS = ','.join([
'video/x-raw-rgb',
'red_mask=(int)0xff0000',
'green_mask=(int)0x00ff00',
'blue_mask=(int)0x0000ff'
])
self.caps = gst.Caps(_VIDEO_CAPS)
# Create videoplayer and load URI
self.player = gst.element_factory_make("playbin2", "player")
self.player.set_property("uri", vfile)
# Enable deinterlacing of video if necessary
self.player.props.flags |= (1 << 9)
# Reroute frame output to Python
self._videosink = gst.element_factory_make('appsink', 'videosink')
self._videosink.set_property('caps', self.caps)
self._videosink.set_property('async', True)
self._videosink.set_property('drop', True)
self._videosink.set_property('emit-signals', True)
self._videosink.connect('new-buffer', self.__handle_videoframe)
self.player.set_property('video-sink', self._videosink)
# Preroll movie to get dimension data
self.player.set_state(gst.STATE_PAUSED)
# If movie is loaded correctly, info about the clip should be available
if self.player.get_state(gst.CLOCK_TIME_NONE)[0] == gst.STATE_CHANGE_SUCCESS:
pads = self._videosink.pads()
for pad in pads:
caps = pad.get_negotiated_caps()[0]
self.vidsize = caps['width'], caps['height']
else:
raise exceptions.runtime_error("Failed to retrieve video size")
# Calculate size of video when fit to screen
self.scaledVideoSize = self.calcScaledRes((self.screen_width,self.screen_height), self.vidsize)
# Calculate the top left corner of the video (to later center it vertically on screen)
self.vidPos = ((self.screen_width - self.scaledVideoSize [0]) / 2, (self.screen_height - self.scaledVideoSize [1]) / 2)
# Add width and height info to video caps and reload caps
_VIDEO_CAPS += ", width={0}, heigh={1}".format(self.scaledVideoSize[0], self.scaledVideoSize[1])
self.caps = gst.Caps(_VIDEO_CAPS)
self._videosink.set_property('caps', self.caps) #??? not working, video still displayed in original size
def __handle_videoframe(self, appsink):
"""
Callback method for handling a video frame
Arguments:
appsink -- the sink to which gst supplies the frame (not used)
"""
buffer = self._videosink.emit('pull-buffer')
img = pygame.image.frombuffer(buffer.data, self.vidsize, "RGB")
# Upscale image to new surfuace if presented fullscreen
# Create the surface if it doesn't exist yet and keep rendering to this surface
# for future frames (should be faster)
if not hasattr(self,"destSurf"):
self.destSurf = pygame.transform.scale(img, self.destsize)
else:
pygame.transform.scale(img, self.destsize, self.destSurf)
self.screen.blit(self.destSurf, self.vidPos)
# Swap the buffers
pygame.display.flip()
# Increase frame counter
self.frameNo += 1
I'm pretty sure that your issue was (has it is very long time since you asked this question) that you never hooked up the bus to watch for messages that were emitted.
The code for this is usually something like this:
def some_function(self):
#code defining Gplayer (the pipeline)
#
# here
Gplayer.set_property('flags', self.GST_VIDEO|self.GST_AUDIO|self.GST_TEXT|self.GST_SOFT_VOLUME|self.GST_DEINTERLACE)
# more code
#
# finally
# Create the bus to listen for messages
bus = Gplayer.get_bus()
bus.add_signal_watch()
bus.enable_sync_message_emission()
bus.connect('message', self.OnBusMessage)
bus.connect('sync-message::element', self.OnSyncMessage)
# Listen for gstreamer bus messages
def OnBusMessage(self, bus, message):
t = message.type
if t == Gst.MessageType.ERROR:
pass
elif t == Gst.MessageType.EOS:
print ("End of Audio")
return True
def OnSyncMessage(self, bus, msg):
if msg.get_structure() is None:
return True
if message_name == 'prepare-window-handle':
imagesink = msg.src
imagesink.set_property('force-aspect-ratio', True)
imagesink.set_window_handle(self.panel1.GetHandle())
The key bit for your issue is setting up a call back for the sync-message and in that call-back, setting the property force-aspect-ratio to True.
This property ensures that the video fits the window it is being displayed in at all times.
Note the self.panel1.GetHandle() function names the panel in which you are displaying the video.
I appreciate that you will have moved on but hopefully this will help someone else trawling through the archives.

How to create video thumbnails with Python and Gstreamer

I'd like to create thumbnails for MPEG-4 AVC videos using Gstreamer and Python. Essentially:
Open the video file
Seek to a certain point in time (e.g. 5 seconds)
Grab the frame at that time
Save the frame to disc as a .jpg file
I've been looking at this other similar question, but I cannot quite figure out how to do the seek and frame capture automatically without user input.
So in summary, how can I capture a video thumbnail with Gstreamer and Python as per the steps above?
To elaborate on ensonic's answer, here's an example:
import os
import sys
import gst
def get_frame(path, offset=5, caps=gst.Caps('image/png')):
pipeline = gst.parse_launch('playbin2')
pipeline.props.uri = 'file://' + os.path.abspath(path)
pipeline.props.audio_sink = gst.element_factory_make('fakesink')
pipeline.props.video_sink = gst.element_factory_make('fakesink')
pipeline.set_state(gst.STATE_PAUSED)
# Wait for state change to finish.
pipeline.get_state()
assert pipeline.seek_simple(
gst.FORMAT_TIME, gst.SEEK_FLAG_FLUSH, offset * gst.SECOND)
# Wait for seek to finish.
pipeline.get_state()
buffer = pipeline.emit('convert-frame', caps)
pipeline.set_state(gst.STATE_NULL)
return buffer
def main():
buf = get_frame(sys.argv[1])
with file('frame.png', 'w') as fh:
fh.write(str(buf))
if __name__ == '__main__':
main()
This generates a PNG image. You can get raw image data using gst.Caps("video/x-raw-rgb,bpp=24,depth=24") or something like that.
Note that in GStreamer 1.0 (as opposed to 0.10), playbin2 has been renamed to playbin and the convert-frame signal is named convert-sample.
The mechanics of seeking are explained in this chapter of the GStreamer Application Development Manual. The 0.10 playbin2 documentation no longer seems to be online, but the documentation for 1.0 is here.
An example in Vala, with GStreamer 1.0 :
var playbin = Gst.ElementFactory.make ("playbin", null);
playbin.set ("uri", "file:///path/to/file");
// some code here.
var caps = Gst.Caps.from_string("image/png");
Gst.Sample sample;
Signal.emit_by_name(playbin, "convert-sample", caps, out sample);
if(sample == null)
return;
var sample_caps = sample.get_caps ();
if(sample_caps == null)
return;
unowned Gst.Structure structure = sample_caps.get_structure(0);
int width = (int)structure.get_value ("width");
int height = (int)structure.get_value ("height");
var memory = sample.get_buffer().get_memory (0);
Gst.MapInfo info;
memory.map (out info, Gst.MapFlags.READ);
uint8[] data = info.data;
It's an old question but I still haven't found it documented anywhere.
I found that the following worked on a playing video with Gstreamer 1.0
import gi
import time
gi.require_version('Gst', '1.0')
from gi.repository import Gst
def get_frame():
caps = Gst.Caps('image/png')
pipeline = Gst.ElementFactory.make("playbin", "playbin")
pipeline.set_property('uri','file:///home/rolf/GWPE.mp4')
pipeline.set_state(Gst.State.PLAYING)
#Allow time for it to start
time.sleep(0.5)
# jump 30 seconds
seek_time = 30 * Gst.SECOND
pipeline.seek(1.0, Gst.Format.TIME,(Gst.SeekFlags.FLUSH | Gst.SeekFlags.ACCURATE),Gst.SeekType.SET, seek_time , Gst.SeekType.NONE, -1)
#Allow video to run to prove it's working, then take snapshot
time.sleep(1)
buffer = pipeline.emit('convert-sample', caps)
buff = buffer.get_buffer()
result, map = buff.map(Gst.MapFlags.READ)
if result:
data = map.data
pipeline.set_state(Gst.State.NULL)
return data
else:
return
if __name__ == '__main__':
Gst.init(None)
image = get_frame()
with open('frame.png', 'wb') as snapshot:
snapshot.write(image)
The code should run with both Python2 and Python3, I hope it helps someone.
Use playbin2. set the uri to the media file, use gst_element_seek_simple to seek to the desired time position and then use g_signal_emit to invoke the "convert-frame" action signal.

How to get duration of steaming data with GStreamer

I'm writing a program for converting media file to mp3 file with GStreamer. It works, but I would like to know the duration of audio stream, too. Following is the simplified code.
import logging
import pygst
pygst.require('0.10')
import gst
# this is very important, without this, callbacks from gstreamer thread
# will messed our program up
import gobject
gobject.threads_init()
def on_new_buffer(appsink):
buf = appsink.emit('pull-buffer')
print 'new buffer', len(buf)
def on_new_preroll(appsink):
buf = appsink.emit('pull-preroll')
print 'new preroll', len(buf)
def on_pad_added(decoder, pad):
print 'Pad added'
decoder.link(converter)
pipeline.set_state(gst.STATE_PLAYING)
def on_msg(msg):
if msg.type == gst.MESSAGE_ERROR:
error, debug = msg.parse_error()
print error, debug
elif msg.type == gst.MESSAGE_EOS:
duration = pipeline.query_duration(gst.FORMAT_TIME)
print 'Duration', duration
pipeline = gst.Pipeline('pipeline')
appsrc = gst.element_factory_make('appsrc', 'src')
decoder = gst.element_factory_make('decodebin2', 'decoder')
converter = gst.element_factory_make('audioconvert', 'converter')
lame = gst.element_factory_make('lamemp3enc', 'lame')
appsink = gst.element_factory_make('appsink', 'sink')
pipeline.add(appsrc, decoder, lame, converter, appsink)
gst.element_link_many(appsrc, decoder)
gst.element_link_many(converter, lame, appsink)
# -- setup appskink --
# -- setup decoder --
decoder.connect('pad-added', on_pad_added)
# -- setup mp3 encoder --
lame.set_property('bitrate', 128)
# -- setup appsink --
# this makes appsink emit singals
appsink.set_property('emit-signals', True)
# turns off sync to make decoding as fast as possible
appsink.set_property('sync', False)
appsink.connect('new-buffer', on_new_buffer)
appsink.connect('new-preroll', on_new_preroll)
pipeline.set_state(gst.STATE_PAUSED)
data = open(r'D:\Musics\Fiona Fung - Proud Of You.mp3', 'rb').read()
buf = gst.Buffer(data)
appsrc.emit('push-buffer', buf)
appsrc.emit('end-of-stream')
bus = pipeline.get_bus()
while True:
msg = bus.poll(gst.MESSAGE_ANY, -1)
on_msg(msg)
I didn't use filesrc as source, I use appsrc instead. I would like to read streaming data from Internet rather than a file. Strangely, as result, the output duration is -1
....
new buffer 315
new buffer 320
new buffer 335
new buffer 553
Duration (-1L, <enum GST_FORMAT_TIME of type GstFormat>)
If I switch the appsrc to filesrc, then the duration is correct
import logging
import pygst
pygst.require('0.10')
import gst
# this is very important, without this, callbacks from gstreamer thread
# will messed our program up
import gobject
gobject.threads_init()
def on_new_buffer(appsink):
buf = appsink.emit('pull-buffer')
print 'new buffer', len(buf)
def on_new_preroll(appsink):
buf = appsink.emit('pull-preroll')
print 'new preroll', len(buf)
def on_pad_added(decoder, pad):
print 'Pad added'
decoder.link(converter)
pipeline.set_state(gst.STATE_PLAYING)
def on_msg(msg):
if msg.type == gst.MESSAGE_ERROR:
error, debug = msg.parse_error()
print error, debug
elif msg.type == gst.MESSAGE_EOS:
duration = pipeline.query_duration(gst.FORMAT_TIME)
print 'Duration', duration
pipeline = gst.Pipeline('pipeline')
filesrc = gst.element_factory_make('filesrc', 'src')
decoder = gst.element_factory_make('decodebin2', 'decoder')
converter = gst.element_factory_make('audioconvert', 'converter')
lame = gst.element_factory_make('lamemp3enc', 'lame')
appsink = gst.element_factory_make('appsink', 'sink')
pipeline.add(filesrc, decoder, lame, converter, appsink)
gst.element_link_many(filesrc, decoder)
gst.element_link_many(converter, lame, appsink)
# -- setup filesrc --
filesrc.set_property('location', r'D:\Musics\Fiona Fung - Proud Of You.mp3')
# -- setup decoder --
decoder.connect('pad-added', on_pad_added)
# -- setup mp3 encoder --
lame.set_property('bitrate', 128)
# -- setup appsink --
# this makes appsink emit singals
appsink.set_property('emit-signals', True)
# turns off sync to make decoding as fast as possible
appsink.set_property('sync', False)
appsink.connect('new-buffer', on_new_buffer)
appsink.connect('new-preroll', on_new_preroll)
pipeline.set_state(gst.STATE_PAUSED)
bus = pipeline.get_bus()
while True:
msg = bus.poll(gst.MESSAGE_ANY, -1)
on_msg(msg)
As you can see, the result is correct now.
new buffer 322
new buffer 323
new buffer 315
new buffer 320
new buffer 549
Duration (189459000000L, <enum GST_FORMAT_TIME of type GstFormat>)
So, my question is - how to get correct duration of audio stream data from appsrc?
Thanks.
Unfortunately with appsrc it is not possible to get the exact duration of the stream, although with some formats that have fixed bitrate it is possible to estimate it based on file length, but other formats that use variable bitrates report an unknown length.
Because appsrc works on the incoming buffers (either push or pull model), by it receiving a chunk of data, consuming it and then either it requests or is supplied next chunk of data and therefore making the estimation of media duration almost impossible. Also, in push model it is not possible to seek media.

Categories