How change source of file, while playing? - GStreamer - python

I am trying to create a program, that creates a HLS stream of two images, which switch when I input into the keyboard. I have this sample code:
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst, GLib
from threading import Thread
Gst.init(None)
main_loop = GLib.MainLoop()
thread = Thread(target=main_loop.run)
thread.start()
pipeline = Gst.Pipeline()
source = Gst.ElementFactory.make("filesrc", "source")
source.set_property("location", "image1.jpeg")
decoder = Gst.ElementFactory.make("jpegdec", "decoder")
freeze = Gst.ElementFactory.make("imagefreeze", "freeze")
convert = Gst.ElementFactory.make("videoconvert", "convert")
encoder = Gst.ElementFactory.make("x264enc", "encoder")
encoder.set_property("speed-preset", "ultrafast")
encoder.set_property("tune", "zerolatency")
muxer = Gst.ElementFactory.make("mpegtsmux", "muxer")
queue = Gst.ElementFactory.make("queue", "queue")
hls = Gst.ElementFactory.make("hlssink", "hls")
hls.set_property("max-files", 0)
hls.set_property("playlist-length", 0)
hls.set_property("target-duration", 1)
pipeline.add(source, decoder, freeze, convert, encoder, muxer, queue, hls)
source.link(decoder)
decoder.link(freeze)
freeze.link(convert)
convert.link(encoder)
encoder.link(muxer)
muxer.link(queue)
queue.link(hls)
pipeline.set_state(Gst.State.PLAYING)
input()
source.set_property("location", "image2.jpeg")
input()
pipeline.set_state(Gst.State.NULL)
main_loop.quit()
The problem is when I run it and then I hit enter, the playlist.m3u8 resets, which I don't want. This is because of the state change to READY, when I remove this state change it works but I get a warning. Is there an approach that is safe and doesn't produce a warning. Is there a way to change source while play?
I did find gst-interpipe, but I couldn't get it to work and have filed an appropriate bug report there.
Any help appreciated.
Thanks.

Related

gstreamer pipeline in python not saving .mp4 video correctly

I have a script in Python where I record the stream from four cameras to mp4 through gstreamer. I define a signal so that the capture terminates if Ctrl-C is pressed, and it works fine. In the gstreamer pipeline itself, I have a property added at the source numbuffers = 600 because I want the capture to stop after 600 frames anyway if I don't press Ctrl-C before then.
My problem is this, if I interrupt through the keyboard all four mp4 videos are saved correctly, but if I let it finish by itself after the 600 frames the second to fourth videos are fine while the first video will have "no playable stream", even if having the same size as the other videos.
I don't understand why only the first video is not saved or closed correctly, any hints?
This is my code:
import gi
import signal
import threading
import logging
from time import time, sleep
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GObject
logging.basicConfig(level=logging.INFO)
def on_message(bus: Gst.Bus, message: Gst.Message, loop: GObject.MainLoop):
mtype = message.type
"""
Gstreamer Message Types and how to parse
"""
if mtype == Gst.MessageType.EOS:
logging.info("End-of-stream\n")
loop.quit()
elif mtype == Gst.MessageType.ERROR:
err, debug = message.parse_error()
logging.info("Warning: %s: %s\n" % (err, debug))
loop.quit()
elif mtype == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
logging.info("Error: %s: %s\n" % (err, debug))
return True
def signal_handler(signal, frame):
for i in range(0,n_cams):
pipelines[i].send_event(Gst.Event.new_eos())
signal.signal(signal.SIGINT, signal_handler)
# Initialize
GObject.threads_init()
Gst.init(None)
n_cams = 4
buses = []
pipelines = []
for i in range(0,n_cams):
logging.info("Starting camera " + str(i))
command = ("nvarguscamerasrc sensor-id={} num-buffers=600 ! "
"video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, format=(string)NV12, framerate=(fraction)30/1 ! "
"queue max-size-buffers=4 leaky=2 ! "
"nvv4l2h265enc control-rate=1 bitrate=8000000 ! video/x-h265, stream-format=(string)byte-stream ! "
"h265parse ! qtmux ! filesink location=test_{}.mp4").format(i)
logging.info("Parse launch " + command)
pipelines.append(Gst.parse_launch(command))
loop = GObject.MainLoop()
for i in range(0,n_cams):
buses.append(pipelines[i].get_bus())
buses[i].add_signal_watch()
buses[i].connect("message", on_message, loop)
logging.info("Starting pipelines")
for i in range(0,n_cams):
pipelines[i].set_state(Gst.State.PLAYING)
loop.run()
# stop
for i in range(0,n_cams):
pipelines[i].send_event(Gst.Event.new_eos())
pipelines[i].set_state(Gst.State.NULL)
I think you are getting lucky in the ctrl-c case in the first place. In the ctrl-case when you send an EOS, you should wait for the EOS to reach the bus before setting the pipeline sate to NULL.
Similar problem with your actual described problem. You have 4 pipelines but only one main loop. You quit the loop after the first pipeline reaches EOS. Instead you should wait until all pipelines have reached EOS.
P.s. I think the send_event after # stop does not actually do anything.

convert gstreamer pipeline to python code

Im trying to convert gstreamer pipeline to python code using gi library.
This is the pipeline which is running successfully in terminal:
gst-launch-1.0 rtspsrc location="rtsp://admin:123456#192.168.0.150:554/H264?ch=1&subtype=0&proto=Onvif" latency=300 ! rtph264depay ! h264parse ! nvv4l2decoder drop-frame-interval=1 ! nvvideoconvert ! video/x-raw,width=1920,height=1080,formate=I420 ! queue ! nveglglessink window-x=0 window-y=0 window-width=1080 window-height=720
but while running the same pipeline using python code, there is no output window displaying rtsp stream and also no error on the terminal. The terminal simply stuck until i press ctrl+c.
This is the code that im using to run the gstreamer command:
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst, GObject
def main(device):
GObject.threads_init()
Gst.init(None)
pipeline = Gst.Pipeline()
source = Gst.ElementFactory.make("rtspsrc", "video-source")
source.set_property("location", device)
source.set_property("latency", 300)
pipeline.add(source)
depay = Gst.ElementFactory.make("rtph264depay", "depay")
pipeline.add(depay)
source.link(depay)
parse = Gst.ElementFactory.make("h264parse", "parse")
pipeline.add(parse)
depay.link(parse)
decoder = Gst.ElementFactory.make("nvv4l2decoder", "decoder")
decoder.set_property("drop-frame-interval", 2)
pipeline.add(decoder)
parse.link(decoder)
convert = Gst.ElementFactory.make("nvvideoconvert", "convert")
pipeline.add(convert)
decoder.link(convert)
caps = Gst.Caps.from_string("video/x-raw,width=1920,height=1080,formate=I420")
filter = Gst.ElementFactory.make("capsfilter", "filter")
filter.set_property("caps", caps)
pipeline.add(filter)
convert.link(filter)
queue = Gst.ElementFactory.make("queue", "queue")
pipeline.add(queue)
filter.link(queue)
sink = Gst.ElementFactory.make("nveglglessink", "video-sink")
sink.set_property("window-x", 0)
sink.set_property("window-y", 0)
sink.set_property("window-width", 1280)
sink.set_property("window-height", 720)
pipeline.add(sink)
queue.link(sink)
loop = GObject.MainLoop()
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
pipeline.set_state(Gst.State.NULL)
if __name__ == "__main__":
main("rtsp://admin:123456#192.168.0.150:554/H264?ch=1&subtype=0&proto=Onvif")
Does anyone know what is the mistake? Thank you!
The reason it doesn't work is because rtspsrc's source pad is a so-called "Sometimes pad". The link here explains it quite well, but basically you cannot know upfront how many pads will become available on the rtspsrc, since this depends on the SDP provided by the RTSP server.
As such, you should listen to the "pad-added" signal of the rtspsrc, where you can link the rest of your pipeline to the source pad that just showed up in the callback.
So summarised:
def main(device):
GObject.threads_init()
Gst.init(None)
pipeline = Gst.Pipeline()
source = Gst.ElementFactory.make("rtspsrc", "video-source")
source.set_property("location", device)
source.set_property("latency", 300)
source.connect("pad-added", on_rtspsrc_pad_added)
pipeline.add(source)
# We will add/link the rest of the pipeline later
loop = GObject.MainLoop()
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
pipeline.set_state(Gst.State.NULL)
def on_rtspsrc_pad_added(rtspsrc, pad, *user_data):
# Create the rest of your pipeline here and link it
depay = Gst.ElementFactory.make("rtph264depay", "depay")
pipeline.add(depay)
rtspsrc.link(depay)
# and so on ....

GStreamer: textoverlay is not dynamically updated during play

I wanted to see the current CPU load on top of the video image (source is /dev/video0), and I thought textoverlay element would be perfect for this.
I have constructed a (seemingly) working pipeline, except that the textoverlay keeps showing the value originally set to it.
The pipeline is currently like this:
v4l2src > qtdemux > queue > ffmpegcolorspace > textoverlay > xvimagesink
And code looks like this (I have removed bunch of gtk window, thread handling code and some other signal handling, and only left the relevant part):
#!/usr/bin/env python
import sys, os, time, signal
import pygtk, gtk, gobject
import pygst
pygst.require("0.10")
import gst
# For cpu load stats
import psutil
from multiprocessing import Process, Value, Lock # For starting threads
class Video:
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
vbox = gtk.VBox()
window.add(vbox)
self.movie_window = gtk.DrawingArea()
vbox.add(self.movie_window)
window.show_all()
# Set up the gstreamer pipeline
self.pipeline = gst.Pipeline("pipeline")
self.camera = gst.element_factory_make("v4l2src","camera")
self.camera.set_property("device","""/dev/video0""")
self.pipeline.add(self.camera)
# Demuxer
self.demuxer = gst.element_factory_make("qtdemux","demuxer")
# Create a dynamic callback for the demuxer
self.demuxer.connect("pad-added", self.demuxer_callback)
self.pipeline.add(self.demuxer)
# Demuxer doesnt have static pads, but they are created at runtime, we will need a callback to link those
self.videoqueue = gst.element_factory_make("queue","videoqueue")
self.pipeline.add(self.videoqueue)
self.videoconverter = gst.element_factory_make("ffmpegcolorspace","videoconverter")
self.pipeline.add(self.videoconverter)
## Text overlay stuff
self.textoverlay = gst.element_factory_make("textoverlay","textoverlay")
self.overlay_text = "cpu load, initializing"
self.textoverlay.set_property("text",self.overlay_text)
self.textoverlay.set_property("halign", "left")
self.textoverlay.set_property("valign", "top")
self.textoverlay.set_property("shaded-background","true")
self.pipeline.add(self.textoverlay)
self.videosink = gst.element_factory_make("xvimagesink","videosink")
self.pipeline.add(self.videosink)
self.camera.link(self.videoqueue)
gst.element_link_many(self.videoqueue, self.videoconverter, self.textoverlay, self.videosink)
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.enable_sync_message_emission()
# Start stream
self.pipeline.set_state(gst.STATE_PLAYING)
# CPU stats calculator thread
cpu_load_thread = Process(target=self.cpu_load_calculator, args=())
cpu_load_thread.start()
def demuxer_callback(self, dbin, pad):
if pad.get_property("template").name_template == "video_%02d":
print "Linking demuxer & videopad"
qv_pad = self.videoqueue.get_pad("sink")
pad.link(qv_pad)
def cpu_load_calculator(self):
cpu_num = len( psutil.cpu_percent(percpu=True))
while True:
load = psutil.cpu_percent(percpu=True)
self.parsed_load = ""
for i in range (0,cpu_num):
self.parsed_load = self.parsed_load + "CPU%d: %s%% " % (i, load[i])
print self.textoverlay.get_property("text") # Correctly prints previous cycle CPU load
self.textoverlay.set_property("text",self.parsed_load)
time.sleep(2)
c = Video()
gtk.threads_init()
gtk.main()
The cpu_load_calculator keeps running in the background, and before I set the new value, I print out the previous using the get_property() function, and it is set properly. However on the actual video outputwindow, it keeps to the initial value..
How can I make the textoverlay to update properly also to the video window ?
The problem is that you are trying to update textoverlay from different Process. And processes unlike threads run in separate address space.
You can switch to threads:
from threading import Thread
...
# CPU stats calculator thread
cpu_load_thread = Thread(target=self.cpu_load_calculator, args=())
cpu_load_thread.start()
Or you can run cpu_load_calculator loop from the main thread. This will work because self.pipeline.set_state(gst.STATE_PLAYING) starts it's own thread in background.
So this will be enough:
# Start stream
self.pipeline.set_state(gst.STATE_PLAYING)
# CPU stats calculator loop
self.cpu_load_calculator()

python gstreamer play multiple video streams

I'm participating in an art project which includes remotely playing videos. I've implemented a simple python application with a HTTP server and a gstreamer video player. I'm able to catch a http request and change the video which is currently playing, but I'd like to just add the new video in the same window and continue playing two videos simultaniously.
I've used playbin2 to play videos, but I think it can only play one uri at the time. I've tried to find other solutions which could play several videos at the same time, but no use...
Could anyone please post a simple example of playing multiple streams at the same time, or give me some pointers to the documentation or other resources??
Thanks in advance!!
PS. Here's the code I wrote: the VideoPlayer class initializes the stream and the the playCurrent function switches the currently played video - I'd like that function just to add the new video to the stream.
#!/usr/bin/python
import threading
import time
import BaseHTTPServer
from BaseHTTPServer import HTTPServer
from urlparse import urlparse, parse_qs
from os import path
import gst
import gtk
HOST_NAME = 'localhost' # !!!REMEMBER TO CHANGE THIS!!!
PORT_NUMBER = 9000 # Maybe set this to 9000.
#################################################################
# VIDEO DICTIONARY
# Manages the video database
#################################################################
# VideoDictionary class
#################################################################
# This class allows to access the video database
# used by the video player - for best performance, it's a native
# python dictionary
class VideoDictionary():
# declaring filenames
filename = path.join(path.dirname(path.abspath(__file__)), 'large.mp4')
filename_02 = path.join(path.dirname(path.abspath(__file__)), '01.avi')
# declaring uris
uri = 'file://' + filename
uri_02 = 'file://' + filename_02
# combining it all into a dictionary
videoDict = {}
videoDict["01"] = uri
videoDict["02"] = uri_02
# setting the current video
currentVideo = "01"
#################################################################
# VIDEO DICTIONARY END
#################################################################
#################################################################
# VIDEO PLAYER
# Manages all the video playing
#################################################################
# VideoPlayer class
#################################################################
# This class initializes the GST pipe context and it
# handles different events related to video stream playing
class VideoPlayer(object, VideoDictionary):
VideoDictionary = ""
def __init__(self, VideoDictionary):
self.VideoDictionary = VideoDictionary
self.window = gtk.Window()
self.window.connect('destroy', self.quit)
self.window.set_default_size(1024, 768)
self.drawingarea = gtk.DrawingArea()
self.window.add(self.drawingarea)
# Create GStreamer pipeline
self.pipeline = gst.Pipeline()
# Create bus to get events from GStreamer pipeline
self.bus = self.pipeline.get_bus()
# This is needed to make the video output in our DrawingArea:
self.bus.enable_sync_message_emission()
self.bus.connect('sync-message::element', self.on_sync_message)
# Create GStreamer elements
self.playbin = gst.element_factory_make('playbin2')
# Add playbin2 to the pipeline
self.pipeline.add(self.playbin)
self.window.show_all()
self.xid = self.drawingarea.window.xid
print('DEBUG INFO: player initialization finished')
def playCurrent(self):
print('DEBUG INFO: getting running video ')
print(self.VideoDictionary.currentVideo)
self.pipeline.set_state(gst.STATE_READY)
self.playbin.set_property('uri', self.VideoDictionary.videoDict[self.VideoDictionary.currentVideo])
self.pipeline.set_state(gst.STATE_PLAYING)
def quit(self, window):
print('DEBUG INFO: quitting player')
self.pipeline.set_state(gst.STATE_NULL)
gtk.main_quit()
def on_sync_message(self, bus, msg):
if msg.structure.get_name() == 'prepare-xwindow-id':
msg.src.set_property('force-aspect-ratio', True)
msg.src.set_xwindow_id(self.xid)
def on_eos(self, bus, msg):
print('DEBUG INFO: EOS detected')
print('on_eos(): seeking to start of video')
self.pipeline.seek_simple(
gst.FORMAT_TIME,
gst.SEEK_FLAG_FLUSH | gst.SEEK_FLAG_KEY_UNIT,
0L
)
def on_error(self, bus, msg):
print('DEBUG INFO: error detected')
print('on_error():', msg.parse_error())
#################################################################
# VIDEO PLAYER END
#################################################################
#################################################################
# HTTP SERVER
# implements the http listener in a separate thread
# the listener plays the videos depending on the
# received parameters in the GET request
#################################################################
# HttpHandler class
#################################################################
# uses global variables to operate videos
class HttpHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
# initialize the currently played video
global VideoDictionary
print('DEBUG INFO: GET running playCurrent')
if VideoDictionary.currentVideo == "01":
VideoDictionary.currentVideo = "02"
else:
VideoDictionary.currentVideo = "01"
# play the video we have just set
global player
player.playCurrent()
# HttpThread class
#################################################################
# initializes the http listener in a separate thread
class HttpThread (threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
gtk.gdk.threads_enter()
server_class = BaseHTTPServer.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), HttpHandler)
print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER)
gtk.gdk.threads_leave()
return
#################################################################
# HTTP SERVER END
#################################################################
if __name__ == '__main__':
VideoDictionary = VideoDictionary()
player = VideoPlayer(VideoDictionary)
gtk.gdk.threads_init()
thread2 = HttpThread()
thread2.run()
gtk.gdk.threads_enter()
gtk.main()
gtk.gdk.threads_leave()
Here is a simple example of code that plays multiple video streams at the same time.
It works with Python 2 and 3 and it uses the standard Python GUI (Tk) and Gstreamer 1.0. It should therefore be portable, but I only tested it under Ubuntu 16.04.
(The ffmpeg fork libav created problems under Ubuntu 14.04, which seem to be solved under 16.04. Note that you need the package gstreamer1.0-libav in addition to gstreamer1.0-plugins-*.)
The code is configured to create eight frames in a column, and to associate a Gstreamer player with each of them. You are required to give a list of (up to eight) valid local video file names as arguments to the file into which you saved it (say multivid.py), like this:
$ python3 multivid.py video1.webm video2.mp4
The sound channels are simply mixed together. You probably want to change this.
My solution does not address remote playing, but you have already solved that part.
I previously posted the same code in an answer to another question on video files in tkinter, where the question did not ask for simultaneous streams. It is therefore more appropriate here.
import sys
import os
if sys.version_info[0] < 3:
import Tkinter as tkinter
else:
import tkinter
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GObject
# Needed for set_window_handle():
gi.require_version('GstVideo', '1.0')
from gi.repository import GstVideo
def set_frame_handle(bus, message, frame_id):
if not message.get_structure() is None:
if message.get_structure().get_name() == 'prepare-window-handle':
display_frame = message.src
display_frame.set_property('force-aspect-ratio', True)
display_frame.set_window_handle(frame_id)
NUMBER_OF_FRAMES = 8 # with more frames than arguments, videos are repeated
relative_height = 1 / float(NUMBER_OF_FRAMES)
# Only argument number checked, not validity.
number_of_file_names_given = len(sys.argv) - 1
if number_of_file_names_given < 1:
print('Give at least one video file name.')
sys.exit()
if number_of_file_names_given < NUMBER_OF_FRAMES:
print('Up to', NUMBER_OF_FRAMES, 'video file names can be given.')
file_names = list()
for index in range(number_of_file_names_given):
file_names.append(sys.argv[index + 1])
window = tkinter.Tk()
window.title("Multiple videos in a column using Tk and GStreamer 1.0")
window.geometry('480x960')
Gst.init(None)
GObject.threads_init()
for number in range(NUMBER_OF_FRAMES):
display_frame = tkinter.Frame(window, bg='')
relative_y = number * relative_height
display_frame.place(relx = 0, rely = relative_y,
anchor = tkinter.NW, relwidth = 1, relheight = relative_height)
frame_id = display_frame.winfo_id()
player = Gst.ElementFactory.make('playbin', None)
fullname = os.path.abspath(file_names[number % len(file_names)])
player.set_property('uri', 'file://%s' % fullname)
player.set_state(Gst.State.PLAYING)
bus = player.get_bus()
bus.enable_sync_message_emission()
bus.connect('sync-message::element', set_frame_handle, frame_id)
window.mainloop()
If you save the handles to the players (say in player_list), you can later change the uri that it is playing in one of them like this:
player_list[index].set_state(Gst.State.NULL)
player_list[index].set_property('uri', 'file://%s' % fileName)
player_list[index].set_state(Gst.State.PLAYING)

Gstreamer of python's gst.LinkError problem

I am wiring a gstreamer application with Python. And I get a LinkError with following code:
import pygst
pygst.require('0.10')
import gst
import pygtk
pygtk.require('2.0')
import gtk
# this is very important, without this, callbacks from gstreamer thread
# will messed our program up
gtk.gdk.threads_init()
def main():
pipeline = gst.Pipeline('pipleline')
filesrc = gst.element_factory_make("filesrc", "filesrc")
filesrc.set_property('location', 'C:/a.mp3')
decode = gst.element_factory_make("decodebin", "decode")
convert = gst.element_factory_make('audioconvert', 'convert')
sink = gst.element_factory_make("autoaudiosink", "sink")
pipeline.add(filesrc, decode, convert, sink)
gst.element_link_many(filesrc, decode, convert, sink)
pipeline.set_state(gst.STATE_PLAYING)
gtk.main()
main()
And the error:
ImportError: could not import gio
Traceback (most recent call last):
File "H:\workspace\ggg\src\test2.py", line 37, in <module>
main()
File "H:\workspace\ggg\src\test2.py", line 31, in main
gst.element_link_many(filesrc, decode, convert, sink)
gst.LinkError: failed to link decode with convert
It is very strange, with same pipeline, but built with parse_launch, it works. Here is the code:
import pygst
pygst.require('0.10')
import gst
import pygtk
pygtk.require('2.0')
import gtk
# this is very important, without this, callbacks from gstreamer thread
# will messed our program up
gtk.gdk.threads_init()
def main():
player = gst.parse_launch('filesrc location=C:/a.mp3 ! decodebin ! audioconvert ! autoaudiosink')
player.set_state(gst.STATE_PLAYING)
gtk.main()
main()
Here comes the question, why the manual one failed, but the parsed one success? What's wrong with that? How can I fix it?
Thanks.
your problem is here:
gst.element_link_many(filesrc, decode, convert, sink)
the reason is that not all elements have simple, static inputs and outputs. at this point in your program, your decodebin does not have any source pads (that is: no outputs).
a pad is like a nipple - it's an input / output to an element. pads can appear, disappear or just sit there. there are three classes of pads: static pads (the easiest and what you would expect), request pads (that appear only when you ask for them) and sometimes pads (that appear only when the element wants to make them appear). the outputs of decodebin are sometimes pads.
if you inspect the output of gst-inspect decodebin, you can see this for yourself:
Pad Templates:
SINK template: 'sink'
Availability: Always
Capabilities:
ANY
SRC template: 'src%d'
Availability: Sometimes
Capabilities:
ANY
at line 26 of your program, you can't link decode to anything, because it doesn't have any source pads to link with. source pads on a decodebin appear only as the input stream is decoded: this doesn't happen instantaneously. any number of source pads may appear (e.g one for an audio stream, two for a video stream with audio, none for an un-decodable stream).
you need to wait until the pads are created, and then link them. decodebin emits a signal, "new-decoded-pad" to tell you when this happens (this is also documented in gst-inspect decodebin). you must connect a callback function to this signal, and link your decode and audioconvert in the callback. here is your corrected code:
#!/usr/bin/python
import pygst
pygst.require('0.10')
import gst
import pygtk
pygtk.require('2.0')
import gtk
# this is very important, without this, callbacks from gstreamer thread
# will messed our program up
gtk.gdk.threads_init()
def on_new_decoded_pad(dbin, pad, islast):
decode = pad.get_parent()
pipeline = decode.get_parent()
convert = pipeline.get_by_name('convert')
decode.link(convert)
pipeline.set_state(gst.STATE_PLAYING)
print "linked!"
def main():
pipeline = gst.Pipeline('pipleline')
filesrc = gst.element_factory_make("filesrc", "filesrc")
filesrc.set_property('location', 'C:/a.mp3')
decode = gst.element_factory_make("decodebin", "decode")
convert = gst.element_factory_make('audioconvert', 'convert')
sink = gst.element_factory_make("autoaudiosink", "sink")
pipeline.add(filesrc, decode, convert, sink)
gst.element_link_many(filesrc, decode)
gst.element_link_many(convert, sink)
decode.connect("new-decoded-pad", on_new_decoded_pad)
pipeline.set_state(gst.STATE_PAUSED)
gtk.main()
main()
gst.parse_launch works because it takes care of all these niggly details for you. there is also the high level element playbin which automatically creates and links a decodebin internally.

Categories