I have an error where the code below works fine the first time I run it but after I kill the process and try it again, new_sample is never called and no data is received despite the creating the pipeline and starting a main loop successfully.
In order to actually get it to run again, I have to restart my computer. I think it must be something related to cleaning up the gstreamer pipeline but I have already tried numerous things and I can't find the gstreamer process in ps aux for the life of me. Also, I should mention that if I run gst-launch-1.0 with the equivalent pipeline, it works every time. I think the problem is specific to the python implementation.
def new_sample(appsink):
sample = appsink.emit('pull-sample')
print "pull sample"
buffer = sample.get_buffer()
print "got buffer"
data = buffer.extract_dup(0, buffer.get_size())
save_image(data)
return False
'''
gst-launch-1.0 -v tcpclientsrc host=YOUR-PI-IP-ADDRESS port=5000 ! gdpdepay ! rtph264depay ! avdec_h264 ! videoconvert ! autovideosink sync=false
'''
def start_consume(ip=DEFAULT_IP_ADDRESS, port=DEFAULT_PORT):
global pipeline
GObject.threads_init()
Gst.init(None)
pipeline = Gst.Pipeline()
tcpsrc = Gst.ElementFactory.make('tcpclientsrc','tcpsrc')
tcpsrc.set_property("host", ip)
tcpsrc.set_property("port", port)
gdepay = Gst.ElementFactory.make('gdpdepay', 'gdepay')
rdepay = Gst.ElementFactory.make('rtph264depay', 'rdepay')
avdec = Gst.ElementFactory.make('avdec_h264', 'avdec')
vidconvert = Gst.ElementFactory.make('videoconvert', 'vidconvert')
asink = Gst.ElementFactory.make('appsink', 'asink')
asink.set_property('sync', False)
asink.set_property('emit-signals', True)
asink.set_property('drop', True)
asink.connect('new-sample', new_sample)
asink.connect('pull-sample', pull_sample)
asink.connect('pull-preroll', pull_preroll)
pipeline.add(tcpsrc)
pipeline.add(gdepay)
pipeline.add(rdepay)
pipeline.add(avdec)
pipeline.add(vidconvert)
pipeline.add(asink)
tcpsrc.link(gdepay)
gdepay.link(rdepay)
rdepay.link(avdec)
avdec.link(vidconvert)
vidconvert.link(asink)
pipeline.set_state(Gst.State.PLAYING)
return pipeline
if __name__ == "__main__":
try:
loop = GObject.MainLoop()
pipeline = start_consume()
loop.run()
except KeyboardInterrupt:
print "Closing pipeline"
pipeline.set_state(Gst.State.NULL)
loop.quit()
I found that the error was closing the gstreamer pipeline. Closing with control + \ works while closing with control + c doesn't.
Related
New to Gstreamer, trying to create an RTSP server that consumes a source once per output stream. Code follows. In the factory do_create_element(), if I use the source_pipeline as a return value, I am able to connect to the server and consume the stream. However, as expected, if I connect multiple clients, the server crashes with an error saying the src pad is already connected. My idea was to use the tee element, add a queue after it, and wrap the queue in a pipeline (since I need a GstBin element to give back from the factory). However, this does not work. When I connect a first client to server, nothing happens server-side (as-in, no crashes. The function is called and exits successfully). However, client side, no stream is read, with a 'SDP contains no stream error' (see the console output after the code).
I tried connecting the tee manually to the queue (line left in comment) instead of using the pipeline to do it, but nothing changes. I suspect the queue needs something to consume it, but I'm not sure how/what to use (or if that's the actual issue).
Here is a minimal example:
import gi
gi.require_version("Gst", "1.0")
gi.require_version("GstRtspServer", "1.0")
# noinspection PyUnresolvedReferences
from gi.repository import Gst, GstRtspServer, GObject
def create_source():
s_src = "videotestsrc ! video/x-raw,rate=30,width=320,height=240,format=I420"
s_h264 = "x264enc tune=zerolatency"
pipeline_str = "( {s_src} ! queue max-size-buffers=1000 name=q_enc ! {s_h264} ! rtph264pay name=pay0 pt=96 )".format(
**locals()
)
return Gst.parse_launch(pipeline_str)
def _main():
GObject.threads_init()
Gst.init(None)
source = create_source()
tee = Gst.ElementFactory.make("tee", "tee")
source_pipeline = Gst.Pipeline()
source_pipeline.add(source)
source_pipeline.add(tee)
source_pipeline.set_state(Gst.State.PLAYING)
class Factory(GstRtspServer.RTSPMediaFactoryURI):
def __init__(self):
super(Factory, self).__init__()
self._n = 0
def do_create_element(self, url): # -> GstBin (Pipeline)
print("in [Factory] do_create_element")
source_pipeline.set_state(Gst.State.PAUSED)
stream_pipeline = Gst.Pipeline()
# Create a new queue to buffer the tee's output
q = Gst.ElementFactory.make("queue")
stream_pipeline.add(q)
source_pipeline.add(tee)
source_pipeline.add(stream_pipeline)
# tee.link(q)
stream_pipeline.set_state(Gst.State.PLAYING)
source_pipeline.set_state(Gst.State.PLAYING)
return stream_pipeline
factory = Factory()
factory.set_shared(True)
server = GstRtspServer.RTSPServer()
server.get_mount_points().add_factory("/endpoint", factory)
server.attach(None)
loop = GObject.MainLoop()
loop.run()
if __name__ == "__main__":
_main()
Here is the shell output when connecting:
> gst-launch-1.0 rtspsrc location=rtsp://127.0.0.1:8554/endpoint ! rtph264depay ! h264parse ! mp4mux ! filesink location=file.mp4
Setting pipeline to PAUSED ...
Pipeline is live and does not need PREROLL ...
Progress: (open) Opening Stream
Pipeline is PREROLLED ...
Prerolled, waiting for progress to finish...
Progress: (connect) Connecting to rtsp://127.0.0.1:8554/endpoint
Progress: (open) Retrieving server options
Progress: (open) Retrieving media info
ERROR: from element /GstPipeline:pipeline0/GstRTSPSrc:rtspsrc0: Could not get/set settings from/on resource.
Additional debug info:
../gst/rtsp/gstrtspsrc.c(7637): gst_rtspsrc_setup_streams_start (): /GstPipeline:pipeline0/GstRTSPSrc:rtspsrc0:
SDP contains no streams
ERROR: pipeline doesn't want to preroll.
Setting pipeline to NULL ...
Freeing pipeline ...
Thanks ahead!
I want to create a program in Python that allows to edit text that moves and show on top of a video. I want to do that in a website overlay (WPEWebKit).
I have found some code to start a pipeline with wpe and gst-launch-1.0, problem is that when I run gst-launch-1.0 -v wpesrc location="https://gstreamer.freedesktop.org" ! queue ! glimagesink from https://gstreamer.freedesktop.org/documentation/wpe/wpesrc.html it says WARNING: erroneous pipeline: no element "wpesrc". I have checked and I have installed gstreamer1.0-plugins-bad.
The other challange is that I want to create it in a python application. Here's my code so far:
import threading
import time
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst, GLib
Gst.init()
main_loop = GLib.MainLoop()
thread = threading.Thread(target=main_loop.run)
thread.start()
pipeline = Gst.parse_launch("videotestsrc ! decodebin ! videoconvert ! autovideosink")
pipeline.set_state(Gst.State.PLAYING)
try:
while True:
time.sleep(0.1)
except KeyboardInterrupt:
pass
pipeline.set_state(Gst.State.NULL)
main_loop.quit()
I will be probably rewrite this because I want it in a GTK application.
I have a script in Python where I record the stream from four cameras to mp4 through gstreamer. I define a signal so that the capture terminates if Ctrl-C is pressed, and it works fine. In the gstreamer pipeline itself, I have a property added at the source numbuffers = 600 because I want the capture to stop after 600 frames anyway if I don't press Ctrl-C before then.
My problem is this, if I interrupt through the keyboard all four mp4 videos are saved correctly, but if I let it finish by itself after the 600 frames the second to fourth videos are fine while the first video will have "no playable stream", even if having the same size as the other videos.
I don't understand why only the first video is not saved or closed correctly, any hints?
This is my code:
import gi
import signal
import threading
import logging
from time import time, sleep
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GObject
logging.basicConfig(level=logging.INFO)
def on_message(bus: Gst.Bus, message: Gst.Message, loop: GObject.MainLoop):
mtype = message.type
"""
Gstreamer Message Types and how to parse
"""
if mtype == Gst.MessageType.EOS:
logging.info("End-of-stream\n")
loop.quit()
elif mtype == Gst.MessageType.ERROR:
err, debug = message.parse_error()
logging.info("Warning: %s: %s\n" % (err, debug))
loop.quit()
elif mtype == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
logging.info("Error: %s: %s\n" % (err, debug))
return True
def signal_handler(signal, frame):
for i in range(0,n_cams):
pipelines[i].send_event(Gst.Event.new_eos())
signal.signal(signal.SIGINT, signal_handler)
# Initialize
GObject.threads_init()
Gst.init(None)
n_cams = 4
buses = []
pipelines = []
for i in range(0,n_cams):
logging.info("Starting camera " + str(i))
command = ("nvarguscamerasrc sensor-id={} num-buffers=600 ! "
"video/x-raw(memory:NVMM), width=(int)1920, height=(int)1080, format=(string)NV12, framerate=(fraction)30/1 ! "
"queue max-size-buffers=4 leaky=2 ! "
"nvv4l2h265enc control-rate=1 bitrate=8000000 ! video/x-h265, stream-format=(string)byte-stream ! "
"h265parse ! qtmux ! filesink location=test_{}.mp4").format(i)
logging.info("Parse launch " + command)
pipelines.append(Gst.parse_launch(command))
loop = GObject.MainLoop()
for i in range(0,n_cams):
buses.append(pipelines[i].get_bus())
buses[i].add_signal_watch()
buses[i].connect("message", on_message, loop)
logging.info("Starting pipelines")
for i in range(0,n_cams):
pipelines[i].set_state(Gst.State.PLAYING)
loop.run()
# stop
for i in range(0,n_cams):
pipelines[i].send_event(Gst.Event.new_eos())
pipelines[i].set_state(Gst.State.NULL)
I think you are getting lucky in the ctrl-c case in the first place. In the ctrl-case when you send an EOS, you should wait for the EOS to reach the bus before setting the pipeline sate to NULL.
Similar problem with your actual described problem. You have 4 pipelines but only one main loop. You quit the loop after the first pipeline reaches EOS. Instead you should wait until all pipelines have reached EOS.
P.s. I think the send_event after # stop does not actually do anything.
I have 2 pieces of code that works fine up until now. Code A (FileRecord) creates a pipeline and writes whatever it hears from mic to an .ogg file named file.ogg. Code B (FilePlayer) must start after the user stops manual (Ctrl + C) the execution of script A and plays in pulsesink (user's headset) whatever recorder recorded during execution(file.ogg).
My goal is to make the 2 pieces of code work simultaneously and playing file from code B while code A records.
Code A (FileRecord)
from time import sleep
import gi
gi.require_version("Gst","1.0")
from gi.repository import Gst
from gi.repository import GLib
import signal
signal.signal(signal.SIGTSTP, signal.SIG_IGN) #When Ctrl + Z is pressed file is not playable (this command ignores Ctrl + Z)
print("In order to stop press: Ctrl C.")
Gst.init()
main_loop = GLib.MainLoop()
main_loop_thread = Thread(target = main_loop.run)
main_loop_thread.start()
#buffer = gst_buffer_new ()
pipeline = Gst.parse_launch('autoaudiosrc ! audioconvert ! tee name="source" ! queue ! vorbisenc ! oggmux ! filesink location=file.ogg')
pipeline.set_state(Gst.State.PLAYING)
try:
while True:
sleep(0.1)
except KeyboardInterrupt:
pass
pipeline.set_state(Gst.State.NULL)
main_loop.quit()
main_loop_thread.join()
Code B (FilePlayer)
from time import sleep
import gi
gi.require_version("Gst","1.0")
from gi.repository import Gst
from gi.repository import GLib
import signal
Gst.init()
main_loop = GLib.MainLoop()
main_loop_thread = Thread(target = main_loop.run)
main_loop_thread.start()
pipeline = Gst.parse_launch('filesrc location=file.ogg ! decodebin ! pulsesink')
pipeline.set_state(Gst.State.PLAYING)
try:
while True:
sleep(0.1)
except KeyboardInterrupt:
pass
pipeline.set_state(Gst.State.NULL)
main_loop.quit()
main_loop_thread.join()
I don't know how to achieve sound stream. Please help me!
Im trying to convert gstreamer pipeline to python code using gi library.
This is the pipeline which is running successfully in terminal:
gst-launch-1.0 rtspsrc location="rtsp://admin:123456#192.168.0.150:554/H264?ch=1&subtype=0&proto=Onvif" latency=300 ! rtph264depay ! h264parse ! nvv4l2decoder drop-frame-interval=1 ! nvvideoconvert ! video/x-raw,width=1920,height=1080,formate=I420 ! queue ! nveglglessink window-x=0 window-y=0 window-width=1080 window-height=720
but while running the same pipeline using python code, there is no output window displaying rtsp stream and also no error on the terminal. The terminal simply stuck until i press ctrl+c.
This is the code that im using to run the gstreamer command:
import gi
gi.require_version("Gst", "1.0")
from gi.repository import Gst, GObject
def main(device):
GObject.threads_init()
Gst.init(None)
pipeline = Gst.Pipeline()
source = Gst.ElementFactory.make("rtspsrc", "video-source")
source.set_property("location", device)
source.set_property("latency", 300)
pipeline.add(source)
depay = Gst.ElementFactory.make("rtph264depay", "depay")
pipeline.add(depay)
source.link(depay)
parse = Gst.ElementFactory.make("h264parse", "parse")
pipeline.add(parse)
depay.link(parse)
decoder = Gst.ElementFactory.make("nvv4l2decoder", "decoder")
decoder.set_property("drop-frame-interval", 2)
pipeline.add(decoder)
parse.link(decoder)
convert = Gst.ElementFactory.make("nvvideoconvert", "convert")
pipeline.add(convert)
decoder.link(convert)
caps = Gst.Caps.from_string("video/x-raw,width=1920,height=1080,formate=I420")
filter = Gst.ElementFactory.make("capsfilter", "filter")
filter.set_property("caps", caps)
pipeline.add(filter)
convert.link(filter)
queue = Gst.ElementFactory.make("queue", "queue")
pipeline.add(queue)
filter.link(queue)
sink = Gst.ElementFactory.make("nveglglessink", "video-sink")
sink.set_property("window-x", 0)
sink.set_property("window-y", 0)
sink.set_property("window-width", 1280)
sink.set_property("window-height", 720)
pipeline.add(sink)
queue.link(sink)
loop = GObject.MainLoop()
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
pipeline.set_state(Gst.State.NULL)
if __name__ == "__main__":
main("rtsp://admin:123456#192.168.0.150:554/H264?ch=1&subtype=0&proto=Onvif")
Does anyone know what is the mistake? Thank you!
The reason it doesn't work is because rtspsrc's source pad is a so-called "Sometimes pad". The link here explains it quite well, but basically you cannot know upfront how many pads will become available on the rtspsrc, since this depends on the SDP provided by the RTSP server.
As such, you should listen to the "pad-added" signal of the rtspsrc, where you can link the rest of your pipeline to the source pad that just showed up in the callback.
So summarised:
def main(device):
GObject.threads_init()
Gst.init(None)
pipeline = Gst.Pipeline()
source = Gst.ElementFactory.make("rtspsrc", "video-source")
source.set_property("location", device)
source.set_property("latency", 300)
source.connect("pad-added", on_rtspsrc_pad_added)
pipeline.add(source)
# We will add/link the rest of the pipeline later
loop = GObject.MainLoop()
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
pipeline.set_state(Gst.State.NULL)
def on_rtspsrc_pad_added(rtspsrc, pad, *user_data):
# Create the rest of your pipeline here and link it
depay = Gst.ElementFactory.make("rtph264depay", "depay")
pipeline.add(depay)
rtspsrc.link(depay)
# and so on ....