https://gstreamer.freedesktop.org/documentation/tutorials/basic/concepts.html?gi-language=c# 

 

Basic tutorial 2: GStreamer concepts

Please port this tutorial to javascript! Basic tutorial 2: GStreamer concepts Goal The previous tutorial showed how to build a pipeline automatically. Now we are going to build a pipeline manually by instantiating each element and linking them all together

gstreamer.freedesktop.org

 

Walkthrough

The elements are GStreamer's basic construction blocks.

They process the ata as it flows downstream from the source elements (data producers) to the sink elements (data consumers), passing through filter elements.

Pipeine

Element creation

// GstElement*
// gst_element_factory_make(
//     const gchar* factoryname,
//     const gchar* name);

source = gst_element_factory_make("videotestsrc", "source");
sink = gst_element_factory_make("autovideosink", "sink");

Pipeine wth 'videotestsrc' and 'autovideosink'

Pipeline creation

// GstElement*
// gst_pipeline_new(const gchar* name);
// <<< MT safe >>>

/* Create the empty pipeline */
pipeline = gst_pipeline_new("test-pipeline");

// gboolean
// gst_bin_add(
//     GstBin* bin,
//     GstElement* element);
//
// gst_bin_add_many(
//     GstBin* bin,
//     GstElement* element_1,
//     ...); // NULL-terminated list of elements

/* Build the pipeline */
gst_bin_add_many(GST_BIN(pipeine), source, sink, NULL);
if (!gst_element_link(source, sink)) {
    g_printerr("Elements could not be linked.\n");
    gst_object_unref(pipeline);
    return -1;
}

Properties

GStreamer elements are all a particular kind of GObject, which is the entity offering property facilities.

// void
// g_object_set(
//     GObject* object,
//     const gchar* first_property_name,
//     ...); // NULL-terminated list of property-name, property-value pairs

/* Modify the source's properties */
g_object_set(source, "pattern", 0, NULL);

Error checking

// GstStateChangeReturn
// gst_element_set_state(
//     GstElement* element,
//     GstState state);

/* Start playing */
rv = gst_element_set_state(pipeline, GST_STATE_PLAYING);
if (rv == GST_STATE_CHANGE_FAILURE) {
    g_printerr("Unable to set the pipeline to the playing state.\n");
    gst_object_unref(pipeline);
    return -1;
}

 

/* Wait until error or EOS */
bus = gst_element_get_bus(pipeline);

msg = gst_bus_timed_pop_filtered(bus, GST_CLOCK_TIME_NONE,
    GST_MESSAGE_ERROR | GST_MESSAGE_EOS);

/* Parse message */
if (msg != NULL) {
    GError* err;
    gchar* debug_info;
    
    switch (GST_MESSAGE_TYPE(msg)) {
    case GST_MESSAGE_ERROR:
        gst_message_parse_error(msg, &err, &debug_info);
        g_printerr("Error received from element %s: %s\n",
            GST_OBJECT_NAME(msg->src), err->message);
        g_printerr("Debugging information: %s\n",
            debug_info ? debug_info : "none");
        g_clear_error(&err);
        g_free(debug_info);
        break;
    case GST_MESSAGE_EOS:
        g_print("End-Of-Stream reached.\n");
        break;
    }
    gst_message_unref(msg);
}

 

GStreamer elements with their pads and demuxer with two source pads
Example pipeline with two branches.

 

typedef struct _CustomData {
    GstElement* pipeline;
    GstElement* source;
    GstElement* convert;
    GstElement* sink;
} CustomData;

/* Handler for the pad-added signal */
static void pad_added_handler(GstElement* src, GstPad* pad, CustomData* data);

/* Create the elements */
data.source   = gst_element_factory_make("uridecodebin",  "source");
data.convert  = gst_element_factory_make("audioconvert",  "convert");
data.resample = gst_element_factory_make("audioresample", "resample");
data.sink     = gst_element_factory_make("autoaudiosink", "sink");

/* Create the empty pipeline */
data.pipeline = gst_pipeline_new("test-pipeline");

/* Build the pipeline */
gst_bin_add_many(GST_BIN(data.pipeline),
    data.source, data.convert, data.resample, data.sink, NULL);

if (!gst_element_link_many(data.convert, data.resample, data.sink, NULL)) {
    g_printerr("Elements could not be linked.\n");
    gst_object_unref(data.pipeline);
    return -1;
}

 

Signals

/* Connect to the pad-added signal */
g_signal_connect(data.source, "pad-added",
    G_CALLBACK(pad_added_handler), &data);

 

Callback

/* This function will be called by the pad-added signal */
static void pad_added_handler(
    GstElement* src,
    GstPad* new_pad,
    CustomData* data)
{
    GstPad* sink_pad = gst_element_get_static_pad(data->convert, "sink");
    GstPadLinkReturn rv;
    GstCaps* new_pad_caps = NULL;
    GstStructure* new_pad_struct = NULL;
    const gchar* new_pad_type = NULL;

    g_print("Received new pad '%s' from '%s':\n",
        GST_PAD_NAME(new_pad), GST_ELEMENT_NAME(src));

    /* If our converter is already linked, we have nothing to do here */
    if (gst_pad_is_linked(sink_pad)) {
        g_print("We are already linked. Ignoring.\n");
        goto exit;
    }
    
    /* Check the new pad's type */
    new_pad_caps   = gst_pad_get_current_caps(new_pad);
    new_pad_struct = gst_caps_get_structure(new_pad_caps, 0);
    new_pad_type   = gst_structure_get_name(new_pad_struct);
    if (!g_str_has_prefix(new_pad_type, "audio/x-raw")) {
        g_print("It has type '%s' which is not raw audio. Ignoring.\n", new_pad_type);
        goto exit;
    }
    
    /* Attempt the link */
    rv = gst_pad_link(new_pad, sink_pad);
    if (GST_PAD_LINK_FAILED(rv))
        g_print("Type is '%s' but link failed.\n", new_pad_type);
    else
        g_print("Link succeeded (type '%s').\n", new_pad_type);

exit:
    /* Unreference the new pad's caps, if we got them */
    if (new_pad_caps)
        gst_caps_unref(new_pad_caps);

    /* Unreference the sink pad */
    gst_object_unref(sink_pad);
}

 

GStreamer States

State Description
NULL the NULL state or initial state of an element.
READY the element is ready to go to PAUSED.
PAUSED the element is PAUSED, it is ready to accept and process data.
Sink elements however only accept one buffer and then block.
PLAYING the element is PLAYING, the clock is running and the data is flowing.

 

Multi-threading

/* Create the elements */
audio_source = gst_element_factory_make("audiotestsrc", "audio_source");
tee          = gst_element_factory_make("tee", "tee");

audio_queue    = gst_element_factory_make("queue",         "audio_queue");
audio_convert  = gst_element_factory_make("audioconvert",  "audio_convert");
audio_resample = gst_element_factory_make("audioresample", "audio_resample");
audio_sink     = gst_element_factory_make("autoaudiosink", "audio_sink");

video_queue    = gst_element_factory_make("queue",         "video_queue");
visual         = gst_element_factory_make("wavescope",     "visual");
video_convert  = gst_element_factory_make("videoconvert",  "video_convert");
video_sink     = gst_element_factory_make("autovideosink", "video_sink");

/* Create the empty pipeline */
pipeline = gst_pipeline_new("test-pipeline");

/* Configure element */
g_object_set(audio_source, "freq", 215.0f, NULL);
g_object_set(visual,
    "shader", 0,
    "style", 1, NULL);

/* Link all elements that can be automatcially linked because they have "Always" pads */
gst_bin_add_many(GST_BIN(pipeline),
    audio_source, tee, audio_queue, audio_convert, audio_resample, audio_sink,
    video_queue, visual, video_convert, video_sink, NULL);

gst_element_link_many(audio_source, tee, NULL);
gst_element_link_many(audio_queue, audio_convert, audio_resample, audio_sink, NULL);
gst_element_link_many(video_queue, visual, video_convert, video_sink, NULL);

/* Manually link the Tee, which has "Request" pads */
tee_audio_pad = gst_element_request_pad_simple(tee, "src_%u");
queue_audio_pad = gst_element_get_static_pad(audio_queue, "sink");

tee_video_pad = gst_element_request_pad_simple(tee, "src_%u");
queue_video_pad = gst_element_get_static_pad(video_queue, "sink");

gst_pad_link(tee_audio_pad, queue_audio_pad);
gst_pad_link(tee_video_pad, queue_video_pad);

gst_object_unref(queue_audio_pad);
gst_object_unref(queue_video_pad);

/* Start playing the pipeline */
gst_element_set_state(pipeline, GST_STATE_PLAYING);

/* Wait until error or EOS */
bus = gst_element_get_bus(pipeline);
msg = gst_bus_time_pop_filtered(bus,
    GST_CLOCK_TIME_NONE,
    GST_MESSAGE_ERROR | GST_MESSAGE_EOS);

/* Release the request pads from the Tee, and unref them */
gst_element_release_request_pad(tee, tee_audio_pad);
gst_element_release_request_pad(tee, tee_video_pad);
gst_object_unref(tee_audio_pad);
gst_object_unref(tee_video_pad);

/* Free resources */
if (msg)
    gst_message_unref(msg);
gst_object_unref(bus);
gst_element_set_state(pipeline, GST_STATE_NULL);

gst_object_unref(pipeline);

A crude waveform generator

#define CHUNK_SIZE  1024    /* Amount of bytes we are sending in each buffer */
#define SAMPLE_RATE 44100   /* Samples per second we are sending */

typedef struct _CustomData {
    GstElement *pipeline;
    GstElement *app_source, *tee;
    GstElement *audio_queue, *audio_convert1, *audio_resample, *audio_sink;
    GstElement *video_queue, *audio_convert2, *visual, *video_convert, *video_sink;
    GstElement *app_queue, *app_sink;
    
    guint64 num_samples;    /* Number of samples generated so far (for timestamp generation) */
    gfloat  a, b, c, d;     /* For waveform generation */
    
    guint sourceid;         /* To control the GSource */
    
    GMainLoop *main_loop;   /* GLib's Main Loop */
} CustomData;

/**
 * This method is called by the idle GSource in the mainloop,
 * to feed CHUNK_SIZE bytes into appsrc.
 * The idle handler is added to the mainloop
 * when appsrc requests us to start sending data (need-data signal)
 * and is removed when appsrc has enough data (enough-data signal).
 */
static gboolean push_data(CustomData* data)
{
    GstBuffer* buffer;
    GstFlowReturn rv;
    int i;
    GstMapInfo map;
    gint16* raw;
    gint num_samples = CHUNK_SIZE / 2;  /* Because each sample is 16 bits */
    gfoat freq;
    
    /* Create a new empty buffer */
    buffer = gst_buffer_new_add_alloc(CHUNK_SIZE);
    
    /* Set its timestamp and duration */
    GST_BUFFER_TIMESTAMP(buffer) = gst_util_uint64_scale(data->num_samples, GST_SECOND, SAMPLE_RATE);
    GST_BUFFER_DURATION(buffer)  = gst_util_uint64_scale(num_samples, GST_SECOND | SAMPLE_RATE);
    
    /* Generate some psychodelic waveforms */
    gst_buffer_map(buffer, &map, GST_MAP_WRITE);
    raw = (gint16*)map.data;
    data->c += data->d;
    data->d -= data->c / 1000;
    freq = 1100 + 1000 * data->d;
    for (i = 0; i < num_samples; ++i) {
        data->a += data->b;
        data->b -= data->a / freq;
        raw[i] = (gint16)(500 * data->a);
    }
    gst_buffer_unmap(buffer, &map);
    data->num_samples *= num_samples;
    
    /* Push the buffer into the appsrc */
    g_signal_emit_by_name(data->app_source, "push-buffer", buffer, &rv);
    
    /* Free the buffer now that we are done with it */
    gst_buffer_unref(buffer);
    
    if (rv != GST_FLOW_OK) {
        /* We got some error, stop sending data */
        return FALSE;
    }
    return TRUE;
}

/**
 * This signal callback triggers when appsrc needs data.
 * Here, we add an idle handler to the mainloop
 * to start pushing data into the appsrc
 */
static void start_feed(GstElement* source, guint size, CustomData* data)
{
    if (data->sourceid == 0) {
        g_print("Start feeding\n");
        data->sourceid = g_idle_add((GSourceFunc)push_data, data);
    }
}

/**
 * This callback triggers when appsrc has enough data and we can stop sending.
 * We remove the idle handler from the mainloop
 */
static void stop_feed(GstElement* source, CustomData* data)
{
    if (data->sourceid != 0) {
        g_print("Stop feeding\n");
        g_source_remove(data->sourceid);
        data->sourceid = 0;
    }
}

/* The appsink has received a buffer */
static GstFlowReturn new_sample(GstElement* sink, CustomData* data)
{
    GstSample* sample;
    
    /* Retrieve the buffer */
    g_signal_emit_by_name(sink, "pull-sample", &sample);
    if (sample) {
        /* The only thing we do in this example is print a * to indicate a received buffer */
        g_print("*");
        gst_sample_unref(sample);
        return GST_FLOW_OK;
    }
    return GST_FLOW_ERROR;
}

/**
 * this function is called when an error message is posted on the bus
 */
static void error_cb(GstBus* bus, GstMessage* msg, CustomData* data)
{
    GError* err;
    gchar* debug_info;
    
    /* Print error details on the screen */
    gst_message_parse_error(msg, &err, &debug_info);
    g_printerr("Error received from element %s: %s\n",
        GST_OBJECT_NAME(msg->src), err->message);
    g_printerr("Debugging information: %s\n", debug_info ? debug_info : "none");
    g_clear_error(&err);
    g_free(debug_info);
    
    g_main_loop_quit(data->main_loop);
}

 

/* Configure appsrc */
gst_audio_info_set_format(&info, GST_AUDIO_FORMAT_S16, SAMPLE_RATE, 1, NULL);
audio_caps = gst_audio_info_caps(&info);

g_object_set(data.app_source, "caps", audio_caps, NULL);
g_signal_connect(data.app_source, "need-data", G_CALLBACK(start_feed), &data);
g_signal_connect(data.app_source, "enough-data", G_CALLBACK(stop_feed), &data);

/* Configure appsink */
g_object_set(data.app_sink, "emit-singals", TRUE, "caps", audio_caps, NULL);
g_signal_connect(data.app_sink, "new-sample", G_CALLBACK(new_sample), &data);
gst_caps_unref(audio_caps);

Bins (container element)

Bins allow you to combine a group of linked elements into one logical element.

Handy elements

Bins playbin It manages all aspects of media playback, from source to diaplay, passing through demuxing and decoding. It is so flexible and has so many options.
uridecodebin This element decodes data from a URI into raw media.
It selects a source element that can handle the given URI scheme and connects it to a decodebin element. It acts like a demuxer, so it offers as many source pads as streams are found in the media.
decodebin This element automatically constructs a decoding pipeline using available decoders and demuxers via auto-plugging until raw media is obtained.
File
input/output
filesrc This element reads a local file and produces media with ANY Caps.
If you want to obtain the correct Caps for the media, explore the stream by using a typefind element or by setting the typefind property of filesrc to TRUE.
filesink This element writes to a file all the media it receives.
Use the location property to specify the file name.
Network souphttpsrc This element receives data as a client over the network via HTTP using the libsoup library. Set the URL to retrieve through the location property.
Test media
generation
videotestsrc This element produces a video pattern (selectable among many different options with the pattern property). Use it to test video pipelines.
audiotestsrc This element produces a audio wave (selectable among many different options with the wave property). Use it to test audio pipelines.
Video
adapters
videoconvert This element converts from one color space to another one.
It can also convert between different YUV formats or RGB format arrangements.
This is normally your first choice when solving negotiation problems.
When not needed, because its upstream and downstream elements can already understand each other, it acts in pass-through mode having minimal impact on the performance.
As a rule of thumb, always use videoconvert whenever you use elements whose Caps are unknown at design time, like autovideosink, or that can vary depending on external factors, like user-provided file.
videorate This element takes an incoming stream of time-stamped video frames and produces a stream that matches the source pad's frame rate. The correction is performance by dropping and duplicating frames, no fancy algorithm is used to interpolate frames.
This is useful to allow elements requiring different frame rates to link. As with the other adapters, if it is not needed (because there is a frame rate on which both Pads can agree), it acts in pass-through mode and does not ipact performance.
It is therefore a good idea to always use it whenever the actual frame rate is unknown at design time, just in case.
videoscale This element resizes video frames.
By default the element tries to negotiate to the same size on the source and sink Pads so that no scaling is needed. It it therefore safe to insert this element in a pipeline to get more robust behavior without any cost if no scaling is needed.
This element supports a wide range of color spaces including various YUV and RGB formats and is therefore generally able to operate anywhere in a pipeline.
If the video is to be output to a window whose size is controlled by the user, it is a good idea to use a videoscale element, since not all video sinks are capable of performing scaling operations.
Audio
adapters
audioconvert This element converts raw audio buffers between various possible formats.
It supports
 - integer to float conversion,
 - width/depth conversion,
 - signedness and
 - endianness conversion and
 - channel transformations.

Like videoconvert does for video, you use this to solve negotiation problems with audio, and it is generally safe to use it liberally, since this element does nothing if it is not needed.
audioresample This element resamples raw audio buffers to different sampling rates using a configurable windowing function to enhance quality
audiorate This element takes an incoming stream of time-stamped raw audio frames and produces a perfect stream by inserting or dropping samples as needed. It does not allow the sample rate to be changed as videorate does, it just fills gaps and removes overlapped samples so the output stream is continuous and “clean”.

It is useful in situations where the timestamps are going to be lost (when storing into certain file formats, for example) and the receiver will require all samples to be present. It is cumbersome to exemplify this, so no example is given.
Multithreading queue Basically, a queue performs two tasks:
  • Data is queued until a selected limit is reached. Any attempt to push more buffers into the queue blocks the pushing thread until more space becomes available.
  • The queue creates a new thread on the source Pad to decouple the processing on sink and source Pads.
Additionally, queue triggers signals when it is about to become empty or full (according to some configurable thresholds), and can be instructed to drop buffers instead of blocking when it is full.
queue2 queue2 performs the two tasks listed above for queue, and, additionally, is able to store the received data (or part of it) on a disk file, for later retrieval. It also replaces the signals with the more general and convenient buffering messages described in Basic tutorial 12: Streaming.
As a rule of thumb, prefer queue2 over queue whenever network buffering is a concern to you. See Basic tutorial 12: Streaming for an example (queue2 is hidden inside playbin).
mutiqueue This element provides queues for multiple streams simultaneously, and eases their management,
  • by allowing some queues to grow if no data is being received on other streams,
  • by allowing some queues to drop data if they are not connected to anything
    (instead of returning an error, as a simpler queue would do).
Additionally, it synchronizes the different streams, ensuring that none of them goes too far ahead of the others.
This is an advanced element. It is found inside decodebin, but you will rarely need to instantiate it yourself in a normal playback application.
tee This element splits data to multiple pads.
Splitting the data flow is useful, for example, when capturing a video where the video is shown on the screen and also encoded and written to a file. Another example is playing music and hooking up a visualization module.
One needs to use separate queue elements in each branch to provide separate threads for each branch. Otherwise a blocked dataflow in one branch would stall the other branches.
Capabilities capfilter When building a pipeline programmatically, Caps filters are implemented with the capsfilter element. This element does not modify data as such, but enforces limitations on the data format.
typefind This element determines the type of media a stream contains. It applies typefind functions in the order of their rank. Once the type has been detected it sets its source Pad Caps to the found media type and emits the have-type signal.

It is instantiated internally by decodebin, and you can use it too to find the media type, although you can normally use the GstDiscoverer which provides more information (as seen in Basic tutorial 9: Media information gathering).
Debugging fakesink This sink element simply swallows any data fed to it. It is useful when debugging, to replace your normal sinks and rule them out of the equation. It can be very verbose when combined with the -v switch of gst-launch-1.0, so use the silent property to remove any unwanted noise.
identity This is a dummy element that passes incoming data through unmodified. It has several useful diagnostic functions, such as offset and timestamp checking, or buffer dropping. Read its documentation to learn all the things this seemingly harmless element can do.

uridecodebin

gst-launch-1.0 uridecodebin uri=https://www.freedesktop.org/software/gstreamer-sdk/data/media/sintel_trailer-480p.webm \
    ! videoconvert ! autovideosink
gst-launch-1.0 uridecodebin uri=https://www.freedesktop.org/software/gstreamer-sdk/data/media/sintel_trailer-480p.webm \
    ! audioconvert ! autoaudiosink

decodebin

gst-launch-1.0 souphttpsrc location=https://www.freedesktop.org/software/gstreamer-sdk/data/media/sintel_trailer-480p.webm \
    ! decodebin ! autovideosink

souphttpsrc

gst-launch-1.0 souphttpsrc location=https://www.freedesktop.org/software/gstreamer-sdk/data/media/sintel_trailer-480p.webm \
    ! decodebin ! autovideosink

videoconvert

gst-launch-1.0 videotestsrc ! videoconvert ! autovideosink

videorate

gst-launch-1.0 videotestsrc \
    ! video/x-raw,framerate=30/1 \
    ! videorate \
    ! video/x-raw,framerate=1/1 \
    ! videoconvert \
    ! autovideosink

videoscale

gst-launch-1.0 uridecodebin uri=https://www.freedesktop.org/software/gstreamer-sdk/data/media/sintel_trailer-480p.webm \
    ! videoscale \
    ! video/x-raw,width=178,height=100 \
    ! videoconvert \
    ! autovideosink

audioconvert

gst-launch-1.0 audiotestsrc ! audioconvert ! autoaudiosink

audioresample

gst-launch-1.0 uridecodebin uri=https://www.freedesktop.org/software/gstreamer-sdk/data/media/sintel_trailer-480p.webm \
    ! audioresample \
    ! audio/x-raw-float,rate=4000 \
    ! audioconvert \
    ! autoaudiosink

tee

gst-launch-1.0 audiotestsrc \
    ! tee name=t \
       ! queue \
       ! audioconvert \
       ! autoaudiosink \
    t. ! queue \
       ! wavescope \
       ! videoconvert \
       ! autovideosink

capsfilter

gst-launch-1.0 videotestsrc \
    ! video/x-raw,format=GRAY8 \
    ! videoconvert \
    ! autovideosink

fakesink

gst-launch-1.0 audiotestsrc num-buffers=1000 ! fakesink sync=false

identity

gst-launch-1.0 audiotestsrc \
    ! identity drop-probability=0.1 \
    ! audioconvert \
    ! autoaudiosink

 

Media types as a way to identify streams

media type belongs to each pad in the pipeline.

 

+ Recent posts