Python3 error with Gstreamer - python

I run:
raspivid -t 999999 -w 1080 -h 720 -fps 25 -hf -b 2000000 -o - | \gst-launch-1.0 -v fdsrc ! h264parse ! rtph264pay config-interval=1 pt=96 \! gdppay ! tcpserversink host=serverIp port=5000
on the Raspberry Pi and run:
gst-launch-1.0 -v tcpclientsrc host=serverIp port=5000 \! gdpdepay ! rtph264depay ! avdec_h264 ! videoconvert ! autovideosink sync=false
on my computer. and I received the video streamed from Raspberry.
now I want write a python code to do so in my computer.my code is :
#!/usr/bin/python3
from os import path
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst, Gtk
# Needed for window.get_xid(), xvimagesink.set_window_handle(), respectively:
from gi.repository import GdkX11, GstVideo
GObject.threads_init()
Gst.init(None)
class Player(object):
def __init__(self):
self.pipeline = Gst.Pipeline()
self.tcpsrc = Gst.ElementFactory.make('tcpclientsrc','tcpsrc')
self.tcpsrc.set_property("host",'192.168.1.12')
self.tcpsrc.set_property("port",5000)
self.gdepay = Gst.ElementFactory.make('gdpdepay', 'gdepay')
self.rdepay = Gst.ElementFactory.make('rtph264depay', 'rdepay')
self.avdec = Gst.ElementFactory.make('avdec_h264', 'avdec')
self.vidconvert = Gst.ElementFactory.make('videoconvert', 'vidconvert')
self.asink = Gst.ElementFactory.make('appsink', 'asink')
self.asink.set_property('sync', False)
#self.asink.set_property('emit-signals', True)
#self.set_property('drop', True)
self.pipeline.add(self.tcpsrc)
self.pipeline.add(self.gdepay)
self.pipeline.add(self.rdepay)
self.pipeline.add(self.avdec)
self.pipeline.add(self.vidconvert)
self.pipeline.add(self.asink)
self.tcpsrc.link(self.gdepay)
self.gdepay.link(self.rdepay)
self.rdepay.link(self.avdec)
self.avdec.link(self.vidconvert)
self.vidconvert.link(self.asink)
def run(self):
self.pipeline.set_state(Gst.State.PLAYING)
p = Player()
p.run()
but I get the following error:
(DO.py:3618): GStreamer-WARNING **:
gstpad.c:4555:store_sticky_event: Sticky event
misordering, got 'segment' before 'caps'
(DO.py:3618): GStreamer-WARNING **:
gstpad.c:4555:store_sticky_event: Sticky event
misordering, got 'segment' before 'caps'
Best Regards,
Mostafa

I answered My question: I replaced this code:
#!/usr/bin/python3
from os import path
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst, Gtk
# Needed for window.get_xid(), xvimagesink.set_window_handle(), respectively:
from gi.repository import GdkX11, GstVideo
GObject.threads_init()
Gst.init(None)
class Player(object):
def __init__(self):
self.pipeline = Gst.Pipeline()
self.tcpsrc = Gst.ElementFactory.make('tcpclientsrc','tcpsrc')
self.tcpsrc.set_property("host", '192.168.1.13')
self.tcpsrc.set_property("port", 5000)
self.gdepay = Gst.ElementFactory.make('gdpdepay', 'gdepay')
self.rdepay = Gst.ElementFactory.make('rtph264depay', 'rdepay')
self.avdec = Gst.ElementFactory.make('avdec_h264', 'avdec')
self.vidconvert = Gst.ElementFactory.make('videoconvert', 'vidconvert')
self.asink = Gst.ElementFactory.make('autovideosink', 'asink')
self.asink.set_property('sync', False)
#self.asink.set_property('emit-signals', True)
#self.set_property('drop', True)
self.pipeline.add(self.tcpsrc)
self.pipeline.add(self.gdepay)
self.pipeline.add(self.rdepay)
self.pipeline.add(self.avdec)
self.pipeline.add(self.vidconvert)
self.pipeline.add(self.asink)
self.tcpsrc.link(self.gdepay)
self.gdepay.link(self.rdepay)
self.rdepay.link(self.avdec)
self.avdec.link(self.vidconvert)
self.vidconvert.link(self.asink)
def run(self):
self.pipeline.set_state(Gst.State.PLAYING)
Gtk.main()
p = Player()
p.run()
first, I forgotten to add Gtk.main() to the run function.
second I changed self.asink = Gst.ElementFactory.make('appsink', 'asink') to self.asink = Gst.ElementFactory.make('autovideosink', 'asink')

Related

Capture gstreamer network video with Python

I am trying to capture and display with Python a network video stream. The stream has been created (on my laptop) with the following command:
gst-launch-1.0 v4l2src ! videorate ! video/x-raw,framerate=2/1,width=640,height=480 ! x264enc pass=qual quantizer=20 tune=zerolatency ! rtph264pay config-interval=10 pt=96 ! udpsink host=127.0.0.1 port=5000
It takes the webcam input and streams it over a UDP port. I can capture the stream and display it with the following command:
gst-launch-1.0 udpsrc port=5000 ! "application/x-rtp, payload=127" ! rtph264depay ! avdec_h264 ! xvimagesink sync=false
Now I am trying to do the same (capture) with a Python script, but without lack. Here is my code:
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst
udpPipe = Gst.pipeline("player")
source = Gst.ElementFactory.make('udpsrc', None)
source.set_property("port", 5000)
source.set_property("host", "127.0.0.1")
rdepay = Gst.ElementFactory.make('rtph264depay', 'rdepay')
vdecode = Gst.ElementFactory.make('avdec_h264', 'vdecode')
sink = Gst.ElementFactory.make('xvimagesink', None)
udpPipe.add(source, rdepay, vdecode, sink)
gst.element_link_many(source, rdepay, vdecode, sink)
udpPipe.set_state(gst.STATE_PLAYING)
The error I am getting is:
/usr/lib/python2.7/dist-packages/gi/overrides/Gst.py:56: Warning: /build/glib2.0-prJhLS/glib2.0-2.48.2/./gobject/gsignal.c:1674: parameter 1 of type '<invalid>' for signal "GstBus::sync_message" is not a value type
Gst.Bin.__init__(self, name=name)
/usr/lib/python2.7/dist-packages/gi/overrides/Gst.py:56: Warning: /build/glib2.0-prJhLS/glib2.0-2.48.2/./gobject/gsignal.c:1674: parameter 1 of type '<invalid>' for signal "GstBus::message" is not a value type
Gst.Bin.__init__(self, name=name)
Traceback (most recent call last):
File "getUdp.py", line 13, in <module>
source = Gst.ElementFactory.make('udpsrc', None)
File "/usr/lib/python2.7/dist-packages/gi/overrides/Gst.py", line 217, in make
return Gst.ElementFactory.make(factory_name, instance_name)
TypeError: unbound method fake_method() must be called with ElementFactory instance as first argument (got str instance instead)
Any ideas? :-(
I also got the same error on Debian 9.3 (stretch) today.
Explicitly calling Gst.init resolved the problem.
Following code popped up a xvimagesink window on my system with both python 2.7 and 3.5.
#!/usr/bin/python
import sys
import gi
gi.require_version('GLib', '2.0')
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst
Gst.init(sys.argv)
udpPipe = Gst.Pipeline("player")
source = Gst.ElementFactory.make('udpsrc', None)
source.set_property("port", 5000)
#source.set_property("host", "127.0.0.1")
caps = Gst.caps_from_string("application/x-rtp, payload=127")
source.set_property("caps", caps)
rdepay = Gst.ElementFactory.make('rtph264depay', 'rdepay')
vdecode = Gst.ElementFactory.make('avdec_h264', 'vdecode')
sink = Gst.ElementFactory.make('xvimagesink', None)
sink.set_property("sync", False)
udpPipe.add(source, rdepay, vdecode, sink)
#Gst.element_link_many(source, rdepay, vdecode, sink)
source.link(rdepay)
rdepay.link(vdecode)
vdecode.link(sink)
udpPipe.set_state(Gst.State.PLAYING)
GLib.MainLoop().run()
I think it is necessary to call Gst.init and run mainloop to convert gst-launch command line into python script with PyGObject.
You can use "mjpeg" stream as follows:
gst-launch-1.0 videotestsrc ! videoconvert ! videoscale ! video/x-raw,format=I420,width=800,height=600,framerate=25/1 ! jpegenc ! rtpjpegpay ! udpsink host=127.0.0.1 port=5000
In python3 you can get the frames like this:
#!/usr/bin/env python
import cv2
import gi
import numpy as np
gi.require_version('Gst', '1.0')
from gi.repository import Gst
class Video():
"""BlueRov video capture class constructor
Attributes:
port (int): Video UDP port
video_codec (string): Source h264 parser
video_decode (string): Transform YUV (12bits) to BGR (24bits)
video_pipe (object): GStreamer top-level pipeline
video_sink (object): Gstreamer sink element
video_sink_conf (string): Sink configuration
video_source (string): Udp source ip and port
"""
def __init__(self, port=5000):
"""Summary
Args:
port (int, optional): UDP port
"""
Gst.init(None)
self.port = port
self._frame = None
# [Software component diagram](https://www.ardusub.com/software/components.html)
# UDP video stream (:5000)
self.video_source = 'udpsrc port={}'.format(self.port)
# [Rasp raw image](http://picamera.readthedocs.io/en/release-0.7/recipes2.html#raw-image-capture-yuv-format)
# Cam -> CSI-2 -> H264 Raw (YUV 4-4-4 (12bits) I420)
# self.video_codec = '! application/x-rtp, payload=96 ! rtph264depay ! h264parse ! avdec_h264'
self.video_codec = '! application/x-rtp, payload=26 ! rtpjpegdepay ! jpegdec'
# Python don't have nibble, convert YUV nibbles (4-4-4) to OpenCV standard BGR bytes (8-8-8)
self.video_decode = \
'! decodebin ! videoconvert ! video/x-raw,format=(string)BGR ! videoconvert'
# Create a sink to get data
self.video_sink_conf = \
'! appsink emit-signals=true sync=false max-buffers=2 drop=true'
self.video_pipe = None
self.video_sink = None
self.run()
def start_gst(self, config=None):
""" Start gstreamer pipeline and sink
Pipeline description list e.g:
[
'videotestsrc ! decodebin', \
'! videoconvert ! video/x-raw,format=(string)BGR ! videoconvert',
'! appsink'
]
Args:
config (list, optional): Gstreamer pileline description list
"""
if not config:
config = \
[
'videotestsrc ! decodebin',
'! videoconvert ! video/x-raw,format=(string)BGR ! videoconvert',
'! appsink'
]
command = ' '.join(config)
self.video_pipe = Gst.parse_launch(command)
self.video_pipe.set_state(Gst.State.PLAYING)
self.video_sink = self.video_pipe.get_by_name('appsink0')
#staticmethod
def gst_to_opencv(sample):
"""Transform byte array into np array
Args:
sample (TYPE): Description
Returns:
TYPE: Description
"""
buf = sample.get_buffer()
caps = sample.get_caps()
array = np.ndarray(
(
caps.get_structure(0).get_value('height'),
caps.get_structure(0).get_value('width'),
3
),
buffer=buf.extract_dup(0, buf.get_size()), dtype=np.uint8)
return array
def frame(self):
""" Get Frame
Returns:
iterable: bool and image frame, cap.read() output
"""
return self._frame
def frame_available(self):
"""Check if frame is available
Returns:
bool: true if frame is available
"""
return type(self._frame) != type(None)
def run(self):
""" Get frame to update _frame
"""
self.start_gst(
[
self.video_source,
self.video_codec,
self.video_decode,
self.video_sink_conf
])
self.video_sink.connect('new-sample', self.callback)
def callback(self, sink):
sample = sink.emit('pull-sample')
new_frame = self.gst_to_opencv(sample)
self._frame = new_frame
return Gst.FlowReturn.OK
if __name__ == '__main__':
# Create the video object
# Add port= if is necessary to use a different one
video = Video(port=5000)
while True:
# Wait for the next frame
if not video.frame_available():
continue
frame = video.frame()
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
REF
1: http://www.einarsundgren.se/gstreamer-basic-real-time-streaming-tutorial/
2: https://gist.github.com/patrickelectric/443645bb0fd6e71b34c504d20d475d5a

Why the program only can run once.(PYTHON)

import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject #,Gtk
from gi.repository import Gst as gst
import datetime
def take_photo():
GObject.threads_init()
gst.init(None)
pipeline = gst.Pipeline()
video_source = gst.ElementFactory.make('v4l2src', 'video_source')
vconvert = gst.ElementFactory.make('videoconvert', 'vconvert')
clock = gst.ElementFactory.make('clockoverlay', 'clock')
timer= gst.ElementFactory.make('timeoverlay','timer')
vrate = gst.ElementFactory.make('videorate', 'vrate')
sconvert = gst.ElementFactory.make('videoconvert', 'sconvert')
png = gst.ElementFactory.make('pngenc', 'png')
multi_sink = gst.ElementFactory.make('multifilesink', 'multi_sink')
caps = gst.caps_from_string("video/x-raw,format=RGB,width=800,height=600,framerate=5/1")
timer.set_property('valignment','bottom')
timer.set_property('halignment','right')
clock.set_property('time-format','%Y/%m/%d %H:%M:%S')
clock.set_property('valignment','bottom')
caps1 = gst.caps_from_string("video/x-raw,framerate=1/1")
png.set_property('snapshot',True)
multi_sink.set_property('location','/home/pi/frame%05d.png')
filter = gst.ElementFactory.make("capsfilter", "filter")
filter.set_property("caps", caps)
filter1 = gst.ElementFactory.make("capsfilter", "filter1")
filter1.set_property("caps", caps1)
pipeline.add(video_source)
pipeline.add(vconvert)
pipeline.add(timer)
pipeline.add(clock)
pipeline.add(filter)
pipeline.add(vrate)
pipeline.add(filter1)
pipeline.add(sconvert)
pipeline.add(png)
pipeline.add(multi_sink)
video_source.link(filter)
filter.link(vconvert)
vconvert.link(timer)
timer.link(clock)
clock.link(vrate)
vrate.link(filter1)
filter1.link(sconvert)
sconvert.link(png)
png.link(multi_sink)
bus = pipeline.get_bus()
pipeline.set_state(gst.State.PLAYING)
print "Capture started"
bus = pipeline.get_bus()#class
msg = bus.timed_pop_filtered(gst.CLOCK_TIME_NONE,gst.MessageType.ERROR | gst.MessageType.EOS)
print msg
pipeline.set_state(gst.State.NULL)
Once the program run for the first time, it capture the image and when i run the second time, nothing happen. And i need to restart the whole python program in order to it to run again. Anybody can help me solve it?
What about implementing it as class, so do a proper initializationof gstreamer stuff.. and in take_photo you just play the pipe and stop it afterwards.. in should be reusable then (from gstreamer point of view.. pipe can go to NULL and PLAYING over and over):
class TakePhoto:
def __init__(self):
GObject.threads_init()
gst.init(None)
self.pipeline = gst.Pipeline()
.. do the element creation and their linking etc ..
def take_photo(self): #this is reusable
bus = self.pipeline.get_bus()
self.pipeline.set_state(gst.State.PLAYING)
print "Capture started"
bus = self.pipeline.get_bus()#class
msg = bus.timed_pop_filtered(gst.CLOCK_TIME_NONE,gst.MessageType.ERROR | gst.MessageType.EOS)
print msg
self.pipeline.set_state(gst.State.NULL)
Then python shell you can initialize one instance and call take_photo multiple times:
TakePhoto tp
tp.take_photo()
tp.take_photo()
This code is typed out of my head so I take no responsibility if it boils your HDD or anything.. and also I am not used to code in python.. so it may be full of bugs :D
but HTH

Audio recording script quality bad using Gsreamer

Thanks to this amazing community i finished my project about capturing audio from an audio input and saving it to a faile using gstreamer
Now i would like to refine the quality of the capture, since my script saves the audio as mono with 80kbps on 44100 hz. I would like to make it atleast stereo on 128 kbps
here is the code. P.S. feel free to use it anywhere you wish!
import gi
import datetime, time
import sys
import signal
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst, Gtk
GObject.threads_init()
Gst.init(None)
pipeline = Gst.Pipeline()
autoaudiosrc = Gst.ElementFactory.make("autoaudiosrc", "autoaudiosrc")
audioconvert = Gst.ElementFactory.make("audioconvert", "audioconvert")
audioresample= Gst.ElementFactory.make("audioresample","audioresample")
vorbisenc = Gst.ElementFactory.make("vorbisenc", "vorbisenc")
oggmux = Gst.ElementFactory.make("oggmux", "oggmux")
filesink = Gst.ElementFactory.make("filesink", "filesink")
url = datetime.datetime.now()
audioresample.quality=10
filesink.set_property("location",url)
pipeline.add( autoaudiosrc)
pipeline.add( audioconvert)
pipeline.add( vorbisenc)
pipeline.add( oggmux)
pipeline.add( filesink)
pipeline.add( audioresample)
autoaudiosrc.link( audioconvert)
audioconvert.link( audioresample)
audioresample.link( vorbisenc)
vorbisenc.link( oggmux)
oggmux.link( filesink)
pipeline.set_state(Gst.State.PLAYING)
Gtk.main()
import gi
import datetime, time
import sys
import signal
signal.alarm(15)
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst, Gtk
GObject.threads_init()
Gst.init(None)
pipeline = Gst.Pipeline()
autoaudiosrc = Gst.ElementFactory.make("autoaudiosrc", "autoaudiosrc")
audioconvert = Gst.ElementFactory.make("audioconvert", "audioconv")
audioresample= Gst.ElementFactory.make("audioresample","audioresample")
vorbisenc = Gst.ElementFactory.make("vorbisenc", "vorbisenc")
oggmux = Gst.ElementFactory.make("oggmux", "oggmux")
filesink = Gst.ElementFactory.make("filesink", "filesink")
url = datetime.datetime.now()
audioresample.set_property("quality", 10)
vorbisenc.set_property("quality", 1)
filesink.set_property("location",url)
pipeline.add( autoaudiosrc)
pipeline.add( audioconvert)
pipeline.add( vorbisenc)
pipeline.add( oggmux)
pipeline.add( filesink)
pipeline.add( audioresample)
autoaudiosrc.link( audioconvert)
audioconvert.link( audioresample)
audioresample.link(vorbisenc)
vorbisenc.link( oggmux)
oggmux.link( filesink)
pipeline.set_state(Gst.State.PLAYING)
Gtk.main()
There,it's smooth and ok now, i like it ( i probably don't have to have audioconvert, but it smooths the quality a tad bit, was trying a lot of other things but this is the best quality i can get)

GStreamer error "assertion 'GST_IS_ELEMENT (src)' failed" when linking elements

I'm working on a GStreamer-based program using Python and the GObject introspection bindings. I'm trying to build this pipeline:
videomixer name=mix ! autovideosink \
uridecodebin uri=v4l2:///dev/video0 ! mix.
The pipeline works perfectly using gst-launch-1.0, but my Python program gives the errors:
(minimal.py:12168): GStreamer-CRITICAL **: gst_element_link_pads_full: assertion 'GST_IS_ELEMENT (src)' failed
on_error(): (GError('Internal data flow error.',), 'gstbasesrc.c(2865): gst_base_src_loop (): /GstPipeline:pipeline0/GstURIDecodeBin:uridecodebin0/GstV4l2Src:source:\nstreaming task paused, reason not-linked (-1)')
My code:
#!/usr/bin/python3
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst, Gtk, GdkX11, GstVideo
GObject.threads_init()
Gst.init(None)
class Source:
def __init__(self, uri, pipeline, mixer):
self.uri = uri
self.pipeline = pipeline
self.mixer = mixer
self.src = Gst.ElementFactory.make('uridecodebin', None)
self.pipeline.add(self.src)
self.src.set_property('uri', uri)
self.src.connect('pad-added', self.on_pad_added, self.src, self.mixer)
def on_pad_added(self, element, pad, src, dest):
name = pad.query_caps(None).to_string()
print('on_pad_added:', name)
if name.startswith('video/'):
src.link(dest)
class Main:
def __init__(self):
self.window = Gtk.Window()
self.window.connect('destroy', self.quit)
self.window.set_default_size(1280, 720)
self.drawingarea = Gtk.DrawingArea()
self.window.add(self.drawingarea)
self.pipeline = Gst.Pipeline()
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect('message::error', self.on_error)
self.bus.enable_sync_message_emission()
self.bus.connect('sync-message::element', self.on_sync_message)
self.mixer = Gst.ElementFactory.make('videomixer', None)
self.sink = Gst.ElementFactory.make('autovideosink', None)
self.pipeline.add(self.mixer)
self.pipeline.add(self.sink)
self.mixer.link(self.sink)
video = Source('v4l2:///dev/video0', self.pipeline, self.mixer)
def run(self):
self.window.show_all()
# You need to get the XID after window.show_all(). You shouldn't get it
# in the on_sync_message() handler because threading issues will cause
# segfaults there.
self.xid = self.drawingarea.get_property('window').get_xid()
self.pipeline.set_state(Gst.State.PLAYING)
Gtk.main()
def quit(self, window):
self.pipeline.set_state(Gst.State.NULL)
Gtk.main_quit()
def on_sync_message(self, bus, msg):
if msg.get_structure().get_name() == 'prepare-window-handle': msg.src.set_window_handle(self.xid)
def on_error(self, bus, msg):
print('on_error():', msg.parse_error())
main = Main()
main.run()
I figured out the problem, I was linking the dynamically-created pad incorrectly:
src.link(dest)
Should have been:
pad.link(dest.get_compatible_pad(pad, None))
If the element is not added with the pipeline, then this error will occur. Ensure that the problematic element is added with the pipeline.

gstreamer appsrc works for xvimagesink but no in theoraenc ! oggmux

I am trying to stream cast a computer generated video using gstreamer and icecast, but I cannot get gstreamer appsrc to work. My app works as expected if I use xvimagesink as the sink(see commented code below). But once I pipe it to theoraenc it does not run.
I exchanged shout2send with filesink to check if the problem was icecast, the result is that no data is written to the file. Substituting appsrc with testvideosrc works as expected. Any suggestion?
#!/usr/bin/env python
import sys, os, pygtk, gtk, gobject
import pygst
pygst.require("0.10")
import gst
import numpy as np
class GTK_Main:
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
window.connect("destroy", gtk.main_quit, "WM destroy")
vbox = gtk.VBox()
window.add(vbox)
self.button = gtk.Button("Start")
self.button.connect("clicked", self.start_stop)
vbox.add(self.button)
window.show_all()
self.player = gst.Pipeline("player")
source = gst.element_factory_make("appsrc", "source")
caps = gst.Caps("video/x-raw-gray,bpp=16,endianness=1234,width=320,height=240,framerate=(fraction)10/1")
source.set_property('caps',caps)
source.set_property('blocksize',320*240*2)
source.connect('need-data', self.needdata)
colorspace = gst.element_factory_make('ffmpegcolorspace')
enc = gst.element_factory_make('theoraenc')
mux = gst.element_factory_make('oggmux')
shout = gst.element_factory_make('shout2send')
shout.set_property("ip","localhost")
shout.set_property("password","hackme")
shout.set_property("mount","/stream")
caps = gst.Caps("video/x-raw-yuv,width=320,height=240,framerate=(fraction)10/1,format=(fourcc)I420")
enc.caps = caps
videosink = gst.element_factory_make('xvimagesink')
videosink.caps = caps
self.player.add(source, colorspace, enc, mux, shout)
gst.element_link_many(source, colorspace, enc, mux, shout)
#self.player.add(source, colorspace, videosink)
#gst.element_link_many(source, colorspace, videosink)
def start_stop(self, w):
if self.button.get_label() == "Start":
self.button.set_label("Stop")
self.player.set_state(gst.STATE_PLAYING)
else:
self.player.set_state(gst.STATE_NULL)
self.button.set_label("Start")
def needdata(self, src, length):
bytes = np.int16(np.random.rand(length/2)*30000).data
src.emit('push-buffer', gst.Buffer(bytes))
GTK_Main()
gtk.gdk.threads_init()
gtk.main()
I think that your problem is most likely to do with timestamping of the buffers. I've done some quick testing, using that code and replacing the shout element with oggdemux, theoradec, ffmpegcolorspace and ximagesink. At first, I got no output, but after I dispensed with the muxing/demuxing altogether, I got a static image, along with some debug messages about timestamps. I got the correct output after setting the is-live and do-timestamp properties to true on appsrc.
I assume that it should be possible to directly set the timestamps on the buffers that you are pushing out of appsrc, but alas I've not discovered how to do that.

Categories