如何在GStreamer中同时播放音频和视频?

发布于 2024-11-06 00:43:33 字数 131 浏览 0 评论 0原文

除了 playbin/playbin2 之外,如何在 GStreamer 应用程序中一起播放音频和视频?

解复用后如何在音频接收器中播放音频并在视频接收器中播放视频?

请回复。如果可能请给出一个代码示例。 提前致谢。

How I can play audio and video together in GStreamer application except playbin/playbin2 ?

after demuxing how I can play audio in audio sink and video in video sink ?

Please reply.If possible please give one code example.
Thanks in advance.

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(2

瀞厅☆埖开 2024-11-13 00:43:33

这是 ogg 文件的示例

gst-launch filesrc location=test.ogg ! oggdemux name=demuxer \  
    demuxer. ! queue ! vorbisdec ! audioconvert ! audioresample ! autoaudiosink \  
    demuxer. ! queue ! theoradec ! ffmpegcolorspace ! autovideosink

Here's an example of an ogg file

gst-launch filesrc location=test.ogg ! oggdemux name=demuxer \  
    demuxer. ! queue ! vorbisdec ! audioconvert ! audioresample ! autoaudiosink \  
    demuxer. ! queue ! theoradec ! ffmpegcolorspace ! autovideosink
人心善变 2024-11-13 00:43:33

使用 C 语言对我来说效果很好:

//vdoAdoOGGPlayer.c

#include <gst/gst.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

typedef struct Queue {
    GstElement *queue[2];
} Queue;

void get_stream_type (char **result, char *file);
static gboolean bus_call (GstBus *bus, GstMessage *msg, gpointer data);
static void on_pad_added (GstElement *element, GstPad *pad, gpointer data);

int main (int argc, char *argv[])
{
    GMainLoop *loop;

    GstElement *pipeline, *source, *demuxer, *decoder, *conv, *sink;
    GstBus *bus;
    guint bus_watch_id;

    /* Check input arguments */
    if (argc != 2) {
        g_printerr ("Usage: %s <Ogg/Vorbis filename>\n", argv[0]);
        return -1;
    }

    /* Initialisation */
    gst_init (&argc, &argv);

    loop = g_main_loop_new (NULL, FALSE);

    /* Create gstreamer elements */
    pipeline = gst_pipeline_new ("audio-player");
    source   = gst_element_factory_make ("filesrc",       "file-source");
    demuxer  = gst_element_factory_make ("oggdemux",      "ogg-demuxer");

    /*视频解析转流输出元素*/
    decoder  = gst_element_factory_make ("theoradec",     "vorbis-decoder"); 
    conv     = gst_element_factory_make ("videoconvert",  "converter");     
    sink     = gst_element_factory_make ("autovideosink", "video-output"); 

    /*音频解析转流输出元素*/
    GstElement *decoder_audio = gst_element_factory_make ("vorbisdec", "audio-decoder");
    GstElement *converter_audio = gst_element_factory_make ("audioconvert", "audio_converter");
    GstElement *sink_audio = gst_element_factory_make ("autoaudiosink", "audio-ouput");

    if (!pipeline || !source || !demuxer || !decoder || !conv || !sink ) {
        g_printerr ("One element could not be created. Exiting.\n");
        return -1;
    }

    /* Set up the pipeline */

    g_object_set (G_OBJECT (source), "location", argv[1], NULL);

    GstElement *vdoQueue = NULL;
    GstElement *audioQueue = NULL;

    char format1[10] = { 0 };
    char format2[10] = { 0 };
    char *result[2];
    result[0] = format1;
    result[1] = format2;

    /*获取包含的流编码格式*/
    get_stream_type(result, argv[1]);

    /*如果包含theora编码格式,则含有视频流,进行相关元素的添加和链接*/
    if (strcmp (format1, "theora") == 0 || strcmp (format2, "theora") == 0)  {

        vdoQueue = gst_element_factory_make ("queue", "video-queue");
        gst_bin_add_many (GST_BIN(pipeline), vdoQueue, decoder, conv, sink , NULL);
        gst_element_link_many (vdoQueue, decoder, conv, sink, NULL);
    }

    if (strcmp (format1, "vorbis") == 0 || strcmp (format2, "vorbis") == 0) {

        audioQueue = gst_element_factory_make ("queue", "audio-queue");
        gst_bin_add_many (GST_BIN(pipeline), audioQueue, 
                decoder_audio, converter_audio, sink_audio, NULL);

        gst_element_link_many (audioQueue, decoder_audio, 
                converter_audio, sink_audio, NULL);
    }

    /* we add a message handler */
    bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
    bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
    gst_object_unref (bus);

    gst_bin_add_many (GST_BIN (pipeline), source, demuxer,NULL);

    if ( gst_element_link (source, demuxer) != TRUE ){
        g_print("Elements couldn't be linked\n");
        return 1;
    }

    Queue queue;
    queue.queue[0] = audioQueue;
    queue.queue[1] = vdoQueue;

    g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), (void*)&queue);

    /* note that the demuxer will be linked to the decoder dynamically.
       The reason is that Ogg may contain various streams (for example
       audio and video). The source pad(s) will be created at run time,
       by the demuxer when it detects the amount and nature of streams.
       Therefore we connect a callback function which will be executed
       when the "pad-added" is emitted.*/

    /* Set the pipeline to "playing" state*/
    g_print ("Now playing: %s\n", argv[1]);
    gst_element_set_state (pipeline, GST_STATE_PLAYING);

    /* Iterate */
    g_print ("Running...\n");
    g_main_loop_run (loop);

    /* Out of the main loop, clean up nicely */
    g_print ("Returned, stopping playback\n");
    gst_element_set_state (pipeline, GST_STATE_NULL);

    g_print ("Deleting pipeline\n");
    gst_object_unref (GST_OBJECT (pipeline));
    g_source_remove (bus_watch_id);
    g_main_loop_unref (loop);

    return 0;
}

static void on_pad_added (GstElement *element, GstPad *pad, gpointer data)
{
    GstPad *sinkpad_audio, *sinkpad_vdo;
    Queue *queue = (Queue *) data;

    /* We can now link this pad with the vorbis-decoder sink pad */
    g_print ("Dynamic pad created, linking demuxer/decoder\n");

    /*queue元素值不为空则进行pad的连接*/
    if ( queue->queue[0] ) {
        sinkpad_audio = gst_element_get_static_pad (queue->queue[0], "sink");
        GstPad *pad1 = gst_element_get_compatible_pad (element, sinkpad_audio, NULL);
        if ( pad1 ) {
            gst_pad_link(pad1, sinkpad_audio);
            gst_object_unref (GST_OBJECT (pad1));
            g_print("link audio\n");
        }
        gst_object_unref (sinkpad_audio);
    }
    if ( queue->queue[1] ) {
        sinkpad_vdo = gst_element_get_static_pad (queue->queue[1], "sink");
        GstPad *pad2 = gst_element_get_compatible_pad (element, sinkpad_vdo, NULL);

        if ( pad2 ) {
            gst_pad_link(pad2, sinkpad_vdo);
            gst_object_unref (GST_OBJECT (pad2));
            g_print("link video\n");
        }
        gst_object_unref (sinkpad_vdo);
    }
}

void get_stream_type(char **result, char *file) {

    char buf[110];
    FILE *fp = fopen(file, "r");

    if ( !fp ) {
        fprintf(stderr, "Open file failed\n");
        exit(1);
    }

    if ( fread (buf, sizeof(char), sizeof(buf), fp) < 0) {

        fprintf (stderr, "fread failed\n");
        exit(1);
    }

    /*extract vorbis or theora format name if it's exist
     * and record its index*/
    int i1 = 0, i2 = 0;
    for ( int i=0; i<110; i++ ) {
        if (buf[i]=='t' || buf[i]=='v') {
            if ( !i1 )
                i1 = i;
            else
                i2 = i;
        }
    }

    buf[i1+6] = buf[i2+6] = '\0';
    strcpy(result[0], &buf[i1]);
    strcpy(result[1], &buf[i2]);

    result[0][6] = result[1][6] = '\0';

    printf("%s\n", result[0]);
    printf("%s\n", result[1]);

    fclose (fp);
}

static gboolean bus_call (GstBus *bus, GstMessage *msg, gpointer data)
{
    GMainLoop *loop = (GMainLoop *) data;

    switch (GST_MESSAGE_TYPE (msg)) {

        case GST_MESSAGE_EOS:
            g_print ("End of stream\n");
            g_main_loop_quit (loop);
            break;

        case GST_MESSAGE_ERROR: 
            {
                gchar  *debug;
                GError *error;
                gst_message_parse_error (msg, &error, &debug);
                g_free (debug);

                g_printerr ("Error: %s\n", error->message);
                g_error_free (error);

                g_main_loop_quit (loop);
                break;
            }
        default:
            break;
    }
    return TRUE;
}

使用命令编译:

gcc `pkg-config --cflags --libs gstreamer-1.0` vdoAdoOGGPlayer.c -g -o vdoAdoOGGPlayer

执行:

./vdoAdoOGGPlayer file.ogg

有关此代码的更多信息,请参阅本文:
<一href="https://poemdear.com/2019/08/13/%E5%8C%85%E5%90%AB%E9%9F%B3%E9%A2%91%E5%92%8C%E8% A7%86%E 9%A2%91%E7%9A%84ogg%E6%96%87%E4%BB%B6%E7%9A%84%E6%92%AD%E6%94%BE-gstreamer%E5%AE%9E% E7%8E%B0/" rel="nofollow noreferrer">点我​​

It works fine for me by using C language:

//vdoAdoOGGPlayer.c

#include <gst/gst.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>

typedef struct Queue {
    GstElement *queue[2];
} Queue;

void get_stream_type (char **result, char *file);
static gboolean bus_call (GstBus *bus, GstMessage *msg, gpointer data);
static void on_pad_added (GstElement *element, GstPad *pad, gpointer data);

int main (int argc, char *argv[])
{
    GMainLoop *loop;

    GstElement *pipeline, *source, *demuxer, *decoder, *conv, *sink;
    GstBus *bus;
    guint bus_watch_id;

    /* Check input arguments */
    if (argc != 2) {
        g_printerr ("Usage: %s <Ogg/Vorbis filename>\n", argv[0]);
        return -1;
    }

    /* Initialisation */
    gst_init (&argc, &argv);

    loop = g_main_loop_new (NULL, FALSE);

    /* Create gstreamer elements */
    pipeline = gst_pipeline_new ("audio-player");
    source   = gst_element_factory_make ("filesrc",       "file-source");
    demuxer  = gst_element_factory_make ("oggdemux",      "ogg-demuxer");

    /*视频解析转流输出元素*/
    decoder  = gst_element_factory_make ("theoradec",     "vorbis-decoder"); 
    conv     = gst_element_factory_make ("videoconvert",  "converter");     
    sink     = gst_element_factory_make ("autovideosink", "video-output"); 

    /*音频解析转流输出元素*/
    GstElement *decoder_audio = gst_element_factory_make ("vorbisdec", "audio-decoder");
    GstElement *converter_audio = gst_element_factory_make ("audioconvert", "audio_converter");
    GstElement *sink_audio = gst_element_factory_make ("autoaudiosink", "audio-ouput");

    if (!pipeline || !source || !demuxer || !decoder || !conv || !sink ) {
        g_printerr ("One element could not be created. Exiting.\n");
        return -1;
    }

    /* Set up the pipeline */

    g_object_set (G_OBJECT (source), "location", argv[1], NULL);

    GstElement *vdoQueue = NULL;
    GstElement *audioQueue = NULL;

    char format1[10] = { 0 };
    char format2[10] = { 0 };
    char *result[2];
    result[0] = format1;
    result[1] = format2;

    /*获取包含的流编码格式*/
    get_stream_type(result, argv[1]);

    /*如果包含theora编码格式,则含有视频流,进行相关元素的添加和链接*/
    if (strcmp (format1, "theora") == 0 || strcmp (format2, "theora") == 0)  {

        vdoQueue = gst_element_factory_make ("queue", "video-queue");
        gst_bin_add_many (GST_BIN(pipeline), vdoQueue, decoder, conv, sink , NULL);
        gst_element_link_many (vdoQueue, decoder, conv, sink, NULL);
    }

    if (strcmp (format1, "vorbis") == 0 || strcmp (format2, "vorbis") == 0) {

        audioQueue = gst_element_factory_make ("queue", "audio-queue");
        gst_bin_add_many (GST_BIN(pipeline), audioQueue, 
                decoder_audio, converter_audio, sink_audio, NULL);

        gst_element_link_many (audioQueue, decoder_audio, 
                converter_audio, sink_audio, NULL);
    }

    /* we add a message handler */
    bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
    bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
    gst_object_unref (bus);

    gst_bin_add_many (GST_BIN (pipeline), source, demuxer,NULL);

    if ( gst_element_link (source, demuxer) != TRUE ){
        g_print("Elements couldn't be linked\n");
        return 1;
    }

    Queue queue;
    queue.queue[0] = audioQueue;
    queue.queue[1] = vdoQueue;

    g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), (void*)&queue);

    /* note that the demuxer will be linked to the decoder dynamically.
       The reason is that Ogg may contain various streams (for example
       audio and video). The source pad(s) will be created at run time,
       by the demuxer when it detects the amount and nature of streams.
       Therefore we connect a callback function which will be executed
       when the "pad-added" is emitted.*/

    /* Set the pipeline to "playing" state*/
    g_print ("Now playing: %s\n", argv[1]);
    gst_element_set_state (pipeline, GST_STATE_PLAYING);

    /* Iterate */
    g_print ("Running...\n");
    g_main_loop_run (loop);

    /* Out of the main loop, clean up nicely */
    g_print ("Returned, stopping playback\n");
    gst_element_set_state (pipeline, GST_STATE_NULL);

    g_print ("Deleting pipeline\n");
    gst_object_unref (GST_OBJECT (pipeline));
    g_source_remove (bus_watch_id);
    g_main_loop_unref (loop);

    return 0;
}

static void on_pad_added (GstElement *element, GstPad *pad, gpointer data)
{
    GstPad *sinkpad_audio, *sinkpad_vdo;
    Queue *queue = (Queue *) data;

    /* We can now link this pad with the vorbis-decoder sink pad */
    g_print ("Dynamic pad created, linking demuxer/decoder\n");

    /*queue元素值不为空则进行pad的连接*/
    if ( queue->queue[0] ) {
        sinkpad_audio = gst_element_get_static_pad (queue->queue[0], "sink");
        GstPad *pad1 = gst_element_get_compatible_pad (element, sinkpad_audio, NULL);
        if ( pad1 ) {
            gst_pad_link(pad1, sinkpad_audio);
            gst_object_unref (GST_OBJECT (pad1));
            g_print("link audio\n");
        }
        gst_object_unref (sinkpad_audio);
    }
    if ( queue->queue[1] ) {
        sinkpad_vdo = gst_element_get_static_pad (queue->queue[1], "sink");
        GstPad *pad2 = gst_element_get_compatible_pad (element, sinkpad_vdo, NULL);

        if ( pad2 ) {
            gst_pad_link(pad2, sinkpad_vdo);
            gst_object_unref (GST_OBJECT (pad2));
            g_print("link video\n");
        }
        gst_object_unref (sinkpad_vdo);
    }
}

void get_stream_type(char **result, char *file) {

    char buf[110];
    FILE *fp = fopen(file, "r");

    if ( !fp ) {
        fprintf(stderr, "Open file failed\n");
        exit(1);
    }

    if ( fread (buf, sizeof(char), sizeof(buf), fp) < 0) {

        fprintf (stderr, "fread failed\n");
        exit(1);
    }

    /*extract vorbis or theora format name if it's exist
     * and record its index*/
    int i1 = 0, i2 = 0;
    for ( int i=0; i<110; i++ ) {
        if (buf[i]=='t' || buf[i]=='v') {
            if ( !i1 )
                i1 = i;
            else
                i2 = i;
        }
    }

    buf[i1+6] = buf[i2+6] = '\0';
    strcpy(result[0], &buf[i1]);
    strcpy(result[1], &buf[i2]);

    result[0][6] = result[1][6] = '\0';

    printf("%s\n", result[0]);
    printf("%s\n", result[1]);

    fclose (fp);
}

static gboolean bus_call (GstBus *bus, GstMessage *msg, gpointer data)
{
    GMainLoop *loop = (GMainLoop *) data;

    switch (GST_MESSAGE_TYPE (msg)) {

        case GST_MESSAGE_EOS:
            g_print ("End of stream\n");
            g_main_loop_quit (loop);
            break;

        case GST_MESSAGE_ERROR: 
            {
                gchar  *debug;
                GError *error;
                gst_message_parse_error (msg, &error, &debug);
                g_free (debug);

                g_printerr ("Error: %s\n", error->message);
                g_error_free (error);

                g_main_loop_quit (loop);
                break;
            }
        default:
            break;
    }
    return TRUE;
}

Compile with command:

gcc `pkg-config --cflags --libs gstreamer-1.0` vdoAdoOGGPlayer.c -g -o vdoAdoOGGPlayer

Execute:

./vdoAdoOGGPlayer file.ogg

More information about this code in this article:
click me

~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文