gstreamer appsink can not get flvmux data - python

I use the gstreamer and python-gi to get encoded video stream data. My launch is like gst-launch-1.0 v4l2src device=/dev/video0 ! x264enc bitrate=1000 ! h264parse ! flvmux ! appsink.
Now I code in python like:
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstApp', '1.0')
from gi.repository import GObject, Gst, GstApp
GObject.threads_init()
Gst.init(None)
class Example:
def __init__(self):
self.mainloop = GObject.MainLoop()
self.pipeline = Gst.Pipeline()
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect('message::eos', self.on_eos)
self.bus.connect('message::error', self.on_error)
# Create elements
self.src = Gst.ElementFactory.make('v4l2src', None)
self.encoder = Gst.ElementFactory.make('x264enc', None)
self.parse = Gst.ElementFactory.make('h264parse', None)
self.mux = Gst.ElementFactory.make('flvmux', None)
self.sink = Gst.ElementFactory.make('appsink', None)
# Add elements to pipeline
self.pipeline.add(self.src)
self.pipeline.add(self.encoder)
self.pipeline.add(self.parse)
self.pipeline.add(self.mux)
self.pipeline.add(self.sink)
# Set properties
self.src.set_property('device', "/dev/video0")
# Link elements
self.src.link(self.encoder)
self.encoder.link(self.parse)
self.parse.link(self.mux)
self.mux.link(self.sink)
def run(self):
self.pipeline.set_state(Gst.State.PLAYING)
# self.mainloop.run()
appsink_sample = GstApp.AppSink.pull_sample(self.sink)
while True:
buff = appsink_sample.get_buffer()
size, offset, maxsize = buff.get_sizes()
frame_data = buff.extract_dup(offset, size)
print(frame_data)
def kill(self):
self.pipeline.set_state(Gst.State.NULL)
self.mainloop.quit()
def on_eos(self, bus, msg):
print('on_eos()')
self.kill()
def on_error(self, bus, msg):
print('on_error():', msg.parse_error())
self.kill()
example = Example()
example.run()
But I got the same data for every time, just like "FLV\0x01\0x01".
And than I use C languare to code the function, but I got the same result. Why? Could anyone help me?

print prints strings I assume? The buffer contains binary data. It just starts with data that resembles a string. So probably it starts with FLV\0x01\0x01\0x00.. followed by more binary data. String functions will treat 0x00 as the end marker of a string and will stop printing (as the print function does not take a size argument this is the agreement where your data ends). The size property should change though.. unless all the data has the same data chunk size.. but you need to find another function that prints your binary data - although I'm not sure if that is really what you want. Maybe you want to write this data to a file instead?

The problem is gstreamer's process. We should use signal function to get the stream data in appsink.

Related

Gstreamer leaky queue stops the pipeline

The following code get in inputs a list of url and it create a Gstreamer pipeline. Specifically, for each url, a uridecodebin element is initialized and attached to a queue element. Each queue has the following properties: leaky=2, max-size-buffers=1 and flush-on-eos=1. When I start the pipeline, I can see from nvidia-smi dmon that some video is decoded (NVDEC is used). After a second, everything stops.
I would expect that decoding to keep going, and to push frames into the queue with each queue dropping the old frame every time it receives a new one. Am I wrong?
Code to initialize a Bin to decode the video (source_bin.py). You probably don't need to read it, but here it is:
import sys
from gi.repository import Gst
from pipeline.utils.pipeline import create_gst_elemement
class SourceBin:
#classmethod
def create_source_bin(cls, index: int, uri: str):
# Create a source GstBin to abstract this bin's content from the rest of the pipeline
bin_name = "source-bin-%02d" % index
nbin = Gst.Bin.new(bin_name)
if not nbin:
sys.stderr.write(" Unable to create source bin \n")
# Source element for reading from the uri.
# We will use decodebin and let it figure out the container format of the
# stream and the codec and plug the appropriate demux and decode plugins.
uri_decode_bin = create_gst_elemement("uridecodebin", f"uridecodebin_{index}")
# We set the input uri to the source element
uri_decode_bin.set_property("uri", uri)
# Connect to the "pad-added" signal of the decodebin which generates a
# callback once a new pad for raw data has been created by the decodebin
uri_decode_bin.connect("pad-added", cls.callback_newpad, nbin)
uri_decode_bin.connect("pad-removed", cls.callback_pad_removed, nbin)
uri_decode_bin.connect("no-more-pads", cls.callback_no_more_pads, nbin)
uri_decode_bin.connect("child-added", cls.decodebin_child_added, nbin)
# We need to create a ghost pad for the source bin which will act as a proxy
# for the video decoder src pad. The ghost pad will not have a target right
# now. Once the decode bin creates the video decoder and generates the
# cb_newpad callback, we will set the ghost pad target to the video decoder
# src pad.
Gst.Bin.add(nbin, uri_decode_bin)
bin_pad = nbin.add_pad(
Gst.GhostPad.new_no_target("src", Gst.PadDirection.SRC)
)
if not bin_pad:
sys.stderr.write(" Failed to add ghost pad in source bin \n")
return None
# Connect bus
return nbin
#classmethod
def callback_newpad(cls, uridecodebin, uridecodebin_new_src_pad, data):
print(f"SourceBin: added pad {uridecodebin_new_src_pad.name} to {uridecodebin.name}")
caps = uridecodebin_new_src_pad.get_current_caps()
gststruct = caps.get_structure(0)
gstname = gststruct.get_name()
source_bin = data
features = caps.get_features(0)
# Need to check if the pad created by the decodebin is for video and not audio.
if gstname.find("video") != -1:
# Link the decodebin pad only if decodebin has picked nvidia decoder plugin nvdec_*.
# We do this by checking if the pad caps contain NVMM memory features.
if features.contains("memory:NVMM"):
# Get the source bin ghost pad
bin_ghost_pad = source_bin.get_static_pad("src")
if not bin_ghost_pad.set_target(uridecodebin_new_src_pad):
sys.stderr.write(
"Failed to link decoder src pad to source bin ghost pad\n"
)
else:
sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")
#classmethod
def decodebin_child_added(cls, child_proxy, Object, name, user_data):
if name.find("decodebin") != -1:
Object.connect("child-added", cls.decodebin_child_added, user_data)
#classmethod
def callback_pad_removed(cls, uridecodebin, uridecodebin_removed_src_pad, data):
print(f"SourceBin: Removed pad {uridecodebin_removed_src_pad.name} from {uridecodebin.name}")
#classmethod
def callback_no_more_pads(cls, uridecodebin, data):
print(f"SourceBin: No more pads for {uridecodebin.name}")
Pipeline:
import sys
sys.path.append("../")
import gi
gi.require_version("Gst", "1.0")
gi.require_version("GstRtspServer", "1.0")
from gi.repository import GObject, Gst
from source_bin import SourceBin
def create_gst_elemement(factory_name, instance_name):
element = Gst.ElementFactory.make(factory_name, instance_name)
if not element:
sys.stderr.write(f" Unable to create {factory_name} {instance_name} \n")
return element
ai_rules = fususcore_api.get_ai_rules()
urls = [
"my rtsp url..."
]
GObject.threads_init()
Gst.init(None)
pipeline = Gst.Pipeline()
source_bins = [
SourceBin.create_source_bin(i, url)
for i, url in enumerate(urls)
]
frames_queues = list()
for i in range(len(source_bins)):
frames_queue = create_gst_elemement("queue", f"frame_queue_{i}")
frames_queue.set_property("leaky", 2)
frames_queue.set_property("max-size-buffers", 1)
frames_queue.set_property("flush-on-eos", 1)
frames_queues.append(frames_queue)
for source_bin, frames_queue in zip(source_bins, frames_queues):
pipeline.add(source_bin)
pipeline.add(frames_queue)
source_bin.link(frames_queue)
# loop = GObject.MainLoop()
# bus = pipeline.get_bus()
# bus.add_signal_watch()
# bus.connect("message", bus_call, loop)
pipeline.set_state(Gst.State.PLAYING)
For your convenience, you can find a graph of the pipeline: https://drive.google.com/file/d/1Vu8DR1Puam14k-fUKVBW2bG1gN4AgSPm/view?usp=sharing . Everything you see on the left of a queue is the Bin to decode the video. Each row represent a stream and it's identical to the other ones.

Capture gstreamer network video with Python

I am trying to capture and display with Python a network video stream. The stream has been created (on my laptop) with the following command:
gst-launch-1.0 v4l2src ! videorate ! video/x-raw,framerate=2/1,width=640,height=480 ! x264enc pass=qual quantizer=20 tune=zerolatency ! rtph264pay config-interval=10 pt=96 ! udpsink host=127.0.0.1 port=5000
It takes the webcam input and streams it over a UDP port. I can capture the stream and display it with the following command:
gst-launch-1.0 udpsrc port=5000 ! "application/x-rtp, payload=127" ! rtph264depay ! avdec_h264 ! xvimagesink sync=false
Now I am trying to do the same (capture) with a Python script, but without lack. Here is my code:
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
udpPipe = Gst.pipeline("player")
source = Gst.ElementFactory.make('udpsrc', None)
source.set_property("port", 5000)
source.set_property("host", "127.0.0.1")
rdepay = Gst.ElementFactory.make('rtph264depay', 'rdepay')
vdecode = Gst.ElementFactory.make('avdec_h264', 'vdecode')
sink = Gst.ElementFactory.make('xvimagesink', None)
udpPipe.add(source, rdepay, vdecode, sink)
gst.element_link_many(source, rdepay, vdecode, sink)
udpPipe.set_state(gst.STATE_PLAYING)
The error I am getting is:
/usr/lib/python2.7/dist-packages/gi/overrides/Gst.py:56: Warning: /build/glib2.0-prJhLS/glib2.0-2.48.2/./gobject/gsignal.c:1674: parameter 1 of type '<invalid>' for signal "GstBus::sync_message" is not a value type
Gst.Bin.__init__(self, name=name)
/usr/lib/python2.7/dist-packages/gi/overrides/Gst.py:56: Warning: /build/glib2.0-prJhLS/glib2.0-2.48.2/./gobject/gsignal.c:1674: parameter 1 of type '<invalid>' for signal "GstBus::message" is not a value type
Gst.Bin.__init__(self, name=name)
Traceback (most recent call last):
File "getUdp.py", line 13, in <module>
source = Gst.ElementFactory.make('udpsrc', None)
File "/usr/lib/python2.7/dist-packages/gi/overrides/Gst.py", line 217, in make
return Gst.ElementFactory.make(factory_name, instance_name)
TypeError: unbound method fake_method() must be called with ElementFactory instance as first argument (got str instance instead)
Any ideas? :-(
I also got the same error on Debian 9.3 (stretch) today.
Explicitly calling Gst.init resolved the problem.
Following code popped up a xvimagesink window on my system with both python 2.7 and 3.5.
#!/usr/bin/python
import sys
import gi
gi.require_version('GLib', '2.0')
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst
Gst.init(sys.argv)
udpPipe = Gst.Pipeline("player")
source = Gst.ElementFactory.make('udpsrc', None)
source.set_property("port", 5000)
#source.set_property("host", "127.0.0.1")
caps = Gst.caps_from_string("application/x-rtp, payload=127")
source.set_property("caps", caps)
rdepay = Gst.ElementFactory.make('rtph264depay', 'rdepay')
vdecode = Gst.ElementFactory.make('avdec_h264', 'vdecode')
sink = Gst.ElementFactory.make('xvimagesink', None)
sink.set_property("sync", False)
udpPipe.add(source, rdepay, vdecode, sink)
#Gst.element_link_many(source, rdepay, vdecode, sink)
source.link(rdepay)
rdepay.link(vdecode)
vdecode.link(sink)
udpPipe.set_state(Gst.State.PLAYING)
GLib.MainLoop().run()
I think it is necessary to call Gst.init and run mainloop to convert gst-launch command line into python script with PyGObject.
You can use "mjpeg" stream as follows:
gst-launch-1.0 videotestsrc ! videoconvert ! videoscale ! video/x-raw,format=I420,width=800,height=600,framerate=25/1 ! jpegenc ! rtpjpegpay ! udpsink host=127.0.0.1 port=5000
In python3 you can get the frames like this:
#!/usr/bin/env python
import cv2
import gi
import numpy as np
gi.require_version('Gst', '1.0')
from gi.repository import Gst
class Video():
"""BlueRov video capture class constructor
Attributes:
port (int): Video UDP port
video_codec (string): Source h264 parser
video_decode (string): Transform YUV (12bits) to BGR (24bits)
video_pipe (object): GStreamer top-level pipeline
video_sink (object): Gstreamer sink element
video_sink_conf (string): Sink configuration
video_source (string): Udp source ip and port
"""
def __init__(self, port=5000):
"""Summary
Args:
port (int, optional): UDP port
"""
Gst.init(None)
self.port = port
self._frame = None
# [Software component diagram](https://www.ardusub.com/software/components.html)
# UDP video stream (:5000)
self.video_source = 'udpsrc port={}'.format(self.port)
# [Rasp raw image](http://picamera.readthedocs.io/en/release-0.7/recipes2.html#raw-image-capture-yuv-format)
# Cam -> CSI-2 -> H264 Raw (YUV 4-4-4 (12bits) I420)
# self.video_codec = '! application/x-rtp, payload=96 ! rtph264depay ! h264parse ! avdec_h264'
self.video_codec = '! application/x-rtp, payload=26 ! rtpjpegdepay ! jpegdec'
# Python don't have nibble, convert YUV nibbles (4-4-4) to OpenCV standard BGR bytes (8-8-8)
self.video_decode = \
'! decodebin ! videoconvert ! video/x-raw,format=(string)BGR ! videoconvert'
# Create a sink to get data
self.video_sink_conf = \
'! appsink emit-signals=true sync=false max-buffers=2 drop=true'
self.video_pipe = None
self.video_sink = None
self.run()
def start_gst(self, config=None):
""" Start gstreamer pipeline and sink
Pipeline description list e.g:
[
'videotestsrc ! decodebin', \
'! videoconvert ! video/x-raw,format=(string)BGR ! videoconvert',
'! appsink'
]
Args:
config (list, optional): Gstreamer pileline description list
"""
if not config:
config = \
[
'videotestsrc ! decodebin',
'! videoconvert ! video/x-raw,format=(string)BGR ! videoconvert',
'! appsink'
]
command = ' '.join(config)
self.video_pipe = Gst.parse_launch(command)
self.video_pipe.set_state(Gst.State.PLAYING)
self.video_sink = self.video_pipe.get_by_name('appsink0')
#staticmethod
def gst_to_opencv(sample):
"""Transform byte array into np array
Args:
sample (TYPE): Description
Returns:
TYPE: Description
"""
buf = sample.get_buffer()
caps = sample.get_caps()
array = np.ndarray(
(
caps.get_structure(0).get_value('height'),
caps.get_structure(0).get_value('width'),
3
),
buffer=buf.extract_dup(0, buf.get_size()), dtype=np.uint8)
return array
def frame(self):
""" Get Frame
Returns:
iterable: bool and image frame, cap.read() output
"""
return self._frame
def frame_available(self):
"""Check if frame is available
Returns:
bool: true if frame is available
"""
return type(self._frame) != type(None)
def run(self):
""" Get frame to update _frame
"""
self.start_gst(
[
self.video_source,
self.video_codec,
self.video_decode,
self.video_sink_conf
])
self.video_sink.connect('new-sample', self.callback)
def callback(self, sink):
sample = sink.emit('pull-sample')
new_frame = self.gst_to_opencv(sample)
self._frame = new_frame
return Gst.FlowReturn.OK
if __name__ == '__main__':
# Create the video object
# Add port= if is necessary to use a different one
video = Video(port=5000)
while True:
# Wait for the next frame
if not video.frame_available():
continue
frame = video.frame()
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
REF
1: http://www.einarsundgren.se/gstreamer-basic-real-time-streaming-tutorial/
2: https://gist.github.com/patrickelectric/443645bb0fd6e71b34c504d20d475d5a

Why the program only can run once.(PYTHON)

import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject #,Gtk
from gi.repository import Gst as gst
import datetime
def take_photo():
GObject.threads_init()
gst.init(None)
pipeline = gst.Pipeline()
video_source = gst.ElementFactory.make('v4l2src', 'video_source')
vconvert = gst.ElementFactory.make('videoconvert', 'vconvert')
clock = gst.ElementFactory.make('clockoverlay', 'clock')
timer= gst.ElementFactory.make('timeoverlay','timer')
vrate = gst.ElementFactory.make('videorate', 'vrate')
sconvert = gst.ElementFactory.make('videoconvert', 'sconvert')
png = gst.ElementFactory.make('pngenc', 'png')
multi_sink = gst.ElementFactory.make('multifilesink', 'multi_sink')
caps = gst.caps_from_string("video/x-raw,format=RGB,width=800,height=600,framerate=5/1")
timer.set_property('valignment','bottom')
timer.set_property('halignment','right')
clock.set_property('time-format','%Y/%m/%d %H:%M:%S')
clock.set_property('valignment','bottom')
caps1 = gst.caps_from_string("video/x-raw,framerate=1/1")
png.set_property('snapshot',True)
multi_sink.set_property('location','/home/pi/frame%05d.png')
filter = gst.ElementFactory.make("capsfilter", "filter")
filter.set_property("caps", caps)
filter1 = gst.ElementFactory.make("capsfilter", "filter1")
filter1.set_property("caps", caps1)
pipeline.add(video_source)
pipeline.add(vconvert)
pipeline.add(timer)
pipeline.add(clock)
pipeline.add(filter)
pipeline.add(vrate)
pipeline.add(filter1)
pipeline.add(sconvert)
pipeline.add(png)
pipeline.add(multi_sink)
video_source.link(filter)
filter.link(vconvert)
vconvert.link(timer)
timer.link(clock)
clock.link(vrate)
vrate.link(filter1)
filter1.link(sconvert)
sconvert.link(png)
png.link(multi_sink)
bus = pipeline.get_bus()
pipeline.set_state(gst.State.PLAYING)
print "Capture started"
bus = pipeline.get_bus()#class
msg = bus.timed_pop_filtered(gst.CLOCK_TIME_NONE,gst.MessageType.ERROR | gst.MessageType.EOS)
print msg
pipeline.set_state(gst.State.NULL)
Once the program run for the first time, it capture the image and when i run the second time, nothing happen. And i need to restart the whole python program in order to it to run again. Anybody can help me solve it?
What about implementing it as class, so do a proper initializationof gstreamer stuff.. and in take_photo you just play the pipe and stop it afterwards.. in should be reusable then (from gstreamer point of view.. pipe can go to NULL and PLAYING over and over):
class TakePhoto:
def __init__(self):
GObject.threads_init()
gst.init(None)
self.pipeline = gst.Pipeline()
.. do the element creation and their linking etc ..
def take_photo(self): #this is reusable
bus = self.pipeline.get_bus()
self.pipeline.set_state(gst.State.PLAYING)
print "Capture started"
bus = self.pipeline.get_bus()#class
msg = bus.timed_pop_filtered(gst.CLOCK_TIME_NONE,gst.MessageType.ERROR | gst.MessageType.EOS)
print msg
self.pipeline.set_state(gst.State.NULL)
Then python shell you can initialize one instance and call take_photo multiple times:
TakePhoto tp
tp.take_photo()
tp.take_photo()
This code is typed out of my head so I take no responsibility if it boils your HDD or anything.. and also I am not used to code in python.. so it may be full of bugs :D
but HTH

How to get duration of steaming data with GStreamer

I'm writing a program for converting media file to mp3 file with GStreamer. It works, but I would like to know the duration of audio stream, too. Following is the simplified code.
import logging
import pygst
pygst.require('0.10')
import gst
# this is very important, without this, callbacks from gstreamer thread
# will messed our program up
import gobject
gobject.threads_init()
def on_new_buffer(appsink):
buf = appsink.emit('pull-buffer')
print 'new buffer', len(buf)
def on_new_preroll(appsink):
buf = appsink.emit('pull-preroll')
print 'new preroll', len(buf)
def on_pad_added(decoder, pad):
print 'Pad added'
decoder.link(converter)
pipeline.set_state(gst.STATE_PLAYING)
def on_msg(msg):
if msg.type == gst.MESSAGE_ERROR:
error, debug = msg.parse_error()
print error, debug
elif msg.type == gst.MESSAGE_EOS:
duration = pipeline.query_duration(gst.FORMAT_TIME)
print 'Duration', duration
pipeline = gst.Pipeline('pipeline')
appsrc = gst.element_factory_make('appsrc', 'src')
decoder = gst.element_factory_make('decodebin2', 'decoder')
converter = gst.element_factory_make('audioconvert', 'converter')
lame = gst.element_factory_make('lamemp3enc', 'lame')
appsink = gst.element_factory_make('appsink', 'sink')
pipeline.add(appsrc, decoder, lame, converter, appsink)
gst.element_link_many(appsrc, decoder)
gst.element_link_many(converter, lame, appsink)
# -- setup appskink --
# -- setup decoder --
decoder.connect('pad-added', on_pad_added)
# -- setup mp3 encoder --
lame.set_property('bitrate', 128)
# -- setup appsink --
# this makes appsink emit singals
appsink.set_property('emit-signals', True)
# turns off sync to make decoding as fast as possible
appsink.set_property('sync', False)
appsink.connect('new-buffer', on_new_buffer)
appsink.connect('new-preroll', on_new_preroll)
pipeline.set_state(gst.STATE_PAUSED)
data = open(r'D:\Musics\Fiona Fung - Proud Of You.mp3', 'rb').read()
buf = gst.Buffer(data)
appsrc.emit('push-buffer', buf)
appsrc.emit('end-of-stream')
bus = pipeline.get_bus()
while True:
msg = bus.poll(gst.MESSAGE_ANY, -1)
on_msg(msg)
I didn't use filesrc as source, I use appsrc instead. I would like to read streaming data from Internet rather than a file. Strangely, as result, the output duration is -1
....
new buffer 315
new buffer 320
new buffer 335
new buffer 553
Duration (-1L, <enum GST_FORMAT_TIME of type GstFormat>)
If I switch the appsrc to filesrc, then the duration is correct
import logging
import pygst
pygst.require('0.10')
import gst
# this is very important, without this, callbacks from gstreamer thread
# will messed our program up
import gobject
gobject.threads_init()
def on_new_buffer(appsink):
buf = appsink.emit('pull-buffer')
print 'new buffer', len(buf)
def on_new_preroll(appsink):
buf = appsink.emit('pull-preroll')
print 'new preroll', len(buf)
def on_pad_added(decoder, pad):
print 'Pad added'
decoder.link(converter)
pipeline.set_state(gst.STATE_PLAYING)
def on_msg(msg):
if msg.type == gst.MESSAGE_ERROR:
error, debug = msg.parse_error()
print error, debug
elif msg.type == gst.MESSAGE_EOS:
duration = pipeline.query_duration(gst.FORMAT_TIME)
print 'Duration', duration
pipeline = gst.Pipeline('pipeline')
filesrc = gst.element_factory_make('filesrc', 'src')
decoder = gst.element_factory_make('decodebin2', 'decoder')
converter = gst.element_factory_make('audioconvert', 'converter')
lame = gst.element_factory_make('lamemp3enc', 'lame')
appsink = gst.element_factory_make('appsink', 'sink')
pipeline.add(filesrc, decoder, lame, converter, appsink)
gst.element_link_many(filesrc, decoder)
gst.element_link_many(converter, lame, appsink)
# -- setup filesrc --
filesrc.set_property('location', r'D:\Musics\Fiona Fung - Proud Of You.mp3')
# -- setup decoder --
decoder.connect('pad-added', on_pad_added)
# -- setup mp3 encoder --
lame.set_property('bitrate', 128)
# -- setup appsink --
# this makes appsink emit singals
appsink.set_property('emit-signals', True)
# turns off sync to make decoding as fast as possible
appsink.set_property('sync', False)
appsink.connect('new-buffer', on_new_buffer)
appsink.connect('new-preroll', on_new_preroll)
pipeline.set_state(gst.STATE_PAUSED)
bus = pipeline.get_bus()
while True:
msg = bus.poll(gst.MESSAGE_ANY, -1)
on_msg(msg)
As you can see, the result is correct now.
new buffer 322
new buffer 323
new buffer 315
new buffer 320
new buffer 549
Duration (189459000000L, <enum GST_FORMAT_TIME of type GstFormat>)
So, my question is - how to get correct duration of audio stream data from appsrc?
Thanks.
Unfortunately with appsrc it is not possible to get the exact duration of the stream, although with some formats that have fixed bitrate it is possible to estimate it based on file length, but other formats that use variable bitrates report an unknown length.
Because appsrc works on the incoming buffers (either push or pull model), by it receiving a chunk of data, consuming it and then either it requests or is supplied next chunk of data and therefore making the estimation of media duration almost impossible. Also, in push model it is not possible to seek media.

gstreamer appsrc works for xvimagesink but no in theoraenc ! oggmux

I am trying to stream cast a computer generated video using gstreamer and icecast, but I cannot get gstreamer appsrc to work. My app works as expected if I use xvimagesink as the sink(see commented code below). But once I pipe it to theoraenc it does not run.
I exchanged shout2send with filesink to check if the problem was icecast, the result is that no data is written to the file. Substituting appsrc with testvideosrc works as expected. Any suggestion?
#!/usr/bin/env python
import sys, os, pygtk, gtk, gobject
import pygst
pygst.require("0.10")
import gst
import numpy as np
class GTK_Main:
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.connect("destroy", gtk.main_quit, "WM destroy")
vbox = gtk.VBox()
window.add(vbox)
self.button = gtk.Button("Start")
self.button.connect("clicked", self.start_stop)
vbox.add(self.button)
window.show_all()
self.player = gst.Pipeline("player")
source = gst.element_factory_make("appsrc", "source")
caps = gst.Caps("video/x-raw-gray,bpp=16,endianness=1234,width=320,height=240,framerate=(fraction)10/1")
source.set_property('caps',caps)
source.set_property('blocksize',320*240*2)
source.connect('need-data', self.needdata)
colorspace = gst.element_factory_make('ffmpegcolorspace')
enc = gst.element_factory_make('theoraenc')
mux = gst.element_factory_make('oggmux')
shout = gst.element_factory_make('shout2send')
shout.set_property("ip","localhost")
shout.set_property("password","hackme")
shout.set_property("mount","/stream")
caps = gst.Caps("video/x-raw-yuv,width=320,height=240,framerate=(fraction)10/1,format=(fourcc)I420")
enc.caps = caps
videosink = gst.element_factory_make('xvimagesink')
videosink.caps = caps
self.player.add(source, colorspace, enc, mux, shout)
gst.element_link_many(source, colorspace, enc, mux, shout)
#self.player.add(source, colorspace, videosink)
#gst.element_link_many(source, colorspace, videosink)
def start_stop(self, w):
if self.button.get_label() == "Start":
self.button.set_label("Stop")
self.player.set_state(gst.STATE_PLAYING)
else:
self.player.set_state(gst.STATE_NULL)
self.button.set_label("Start")
def needdata(self, src, length):
bytes = np.int16(np.random.rand(length/2)*30000).data
src.emit('push-buffer', gst.Buffer(bytes))
GTK_Main()
gtk.gdk.threads_init()
gtk.main()
I think that your problem is most likely to do with timestamping of the buffers. I've done some quick testing, using that code and replacing the shout element with oggdemux, theoradec, ffmpegcolorspace and ximagesink. At first, I got no output, but after I dispensed with the muxing/demuxing altogether, I got a static image, along with some debug messages about timestamps. I got the correct output after setting the is-live and do-timestamp properties to true on appsrc.
I assume that it should be possible to directly set the timestamps on the buffers that you are pushing out of appsrc, but alas I've not discovered how to do that.

Categories