marcoslucianops / DeepStream-Yolo

NVIDIA DeepStream SDK 7.0 / 6.4 / 6.3 / 6.2 / 6.1.1 / 6.1 / 6.0.1 / 6.0 / 5.1 implementation for YOLO models
MIT License
1.39k stars 344 forks source link

Multistream nvinfer gstnvinfer error #417

Closed ubergeekNZ closed 11 months ago

ubergeekNZ commented 11 months ago

Hi,

I am trying to get the multistream nvdcf tracker with yolov8n working but getting the following error. It works fine using the Nvidia resnet caffe model for multi stream and nvdcf tracker. It also works fine for a single stream yolov8n with multi-tracker. I am writing the code in C++.

m NvDsInferContextImpl::checkBackendParams() [UID = 1]: Backend has maxBatchSize 1 whereas 2 has been requested 0:05:17.864178821 1845 0x56206e548360 ERROR nvinfer gstnvinfer.cpp:632:gst_nvinfer_logger: NvDsInferContext[UID 1]: Error in NvDsInferContextImpl::buildModel() [UID = 1]: deserialized backend context :/usr/src/app/build/model_b2_gpu0_fp16.engine failed to match config params 0:05:17.987313658 1845 0x56206e548360 ERROR nvinfer gstnvinfer.cpp:632:gst_nvinfer_logger: NvDsInferContext[UID 1]: Error in NvDsInferContextImpl::generateBackendContext() [UID = 1]: build backend context failed 0:05:17.987342859 1845 0x56206e548360 ERROR nvinfer gstnvinfer.cpp:632:gst_nvinfer_logger: NvDsInferContext[UID 1]: Error in NvDsInferContextImpl::initialize() [UID = 1]: generate backend failed, check config file settings

My configuration and cpp code is shown below.

`[property] gpu-id=0 net-scale-factor=0.0039215697906911373 model-color-format=0 model-engine-file=/usr/src/app/models/yolov8n/ppe.engine onnx-file=/usr/src/app/models/yolov8n/yolov8n.onnx labelfile-path=/usr/src/app/models/yolov8n/labels.txt batch-size=1 network-mode=2 num-detected-classes=7 force-implicit-batch-dim=1 interval=0 gie-unique-id=1 process-mode=1 network-type=0 cluster-mode=2 maintain-aspect-ratio=1 symmetric-padding=1 parse-bbox-func-name=NvDsInferParseYolo custom-lib-path=/opt/nvidia/deepstream/deepstream/sources/DeepStream-Yolo/nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so engine-create-func-name=NvDsInferYoloCudaEngineGet

[class-attrs-all] nms-iou-threshold=0.45 pre-cluster-threshold=0.25 topk=300 detected-min-w=20 detected-min-h=20

using namespace std::chrono;

#define MAX_DISPLAY_LEN 64
#define PERF_INTERVAL 2
#define PGIE_CLASS_ID_VEHICLE 0
#define PGIE_CLASS_ID_PERSON 2

#define MAX_SOURCES 4

/* By default, OSD process-mode is set to CPU_MODE. To change mode, set as:
* 1: GPU mode (for Tesla only)
* 2: HW mode (For Jetson only)
*/
#define OSD_PROCESS_MODE 0

/* By default, OSD will not display text. To display text, change this to 1 */
#define OSD_DISPLAY_TEXT 1

/* The muxer output resolution must be set if the input streams will be of
* different resolution. The muxer will scale all the input frames to this
* resolution. */
#define MUXER_OUTPUT_WIDTH 1920
#define MUXER_OUTPUT_HEIGHT 1080

/* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set
* based on the fastest source's framerate. */
#define MUXER_BATCH_TIMEOUT_USEC 40000

#define TILED_OUTPUT_WIDTH 1280
#define TILED_OUTPUT_HEIGHT 720

/* NVIDIA Decoder source pad memory feature. This feature signifies that source
* pads having this capability will push GstBuffers containing cuda buffers. */
#define GST_CAPS_FEATURES_NVMM "memory:NVMM"

gchar pgie_classes_str[4][32] = { "Vehicle", "TwoWheeler", "Person",
  "RoadSign"
};

static guint frame_count = 0;
typedef struct {
  GstClockTime start_time;
  gint frame_count;
  gint source_id;
  gboolean is_first;
} FPSMonitor;

// Suppose you have defined `monitors` globally:
FPSMonitor monitors[MAX_SOURCES];  // assuming MAX_SOURCES is defined

void
  changeBBoxColor(gpointer obj_meta_data, int has_bg_color, float red, float green,
                  float blue, float alpha) {

    NvDsObjectMeta *obj_meta = (NvDsObjectMeta *)obj_meta_data;
    #ifndef PLATFORM_TEGRA
      obj_meta->rect_params.has_bg_color = has_bg_color;
      obj_meta->rect_params.bg_color.red = red;
      obj_meta->rect_params.bg_color.green = green;
      obj_meta->rect_params.bg_color.blue = blue;
      obj_meta->rect_params.bg_color.alpha = alpha;
    #endif
    obj_meta->rect_params.border_color.red = red;
    obj_meta->rect_params.border_color.green = green;
    obj_meta->rect_params.border_color.blue = blue;
    obj_meta->rect_params.border_color.alpha = alpha;
    obj_meta->text_params.font_params.font_size = 14;
  }

void
  addDisplayMeta(gpointer batch_meta_data, gpointer frame_meta_data, gdouble fps) {

    NvDsBatchMeta *batch_meta = (NvDsBatchMeta *)batch_meta_data;
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)frame_meta_data;

    // To access the data that will be used to draw
    NvDsDisplayMeta *display_meta = NULL;
    NvOSD_TextParams *txt_params = NULL;
    NvOSD_LineParams *line_params = NULL;

    int offset = 0;
    display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
    txt_params = display_meta->text_params;
    line_params = display_meta->line_params;
    display_meta->num_labels = 1;

    // txt_params->display_text = (char *)g_malloc0(MAX_DISPLAY_LEN);
    // offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Frame Number = %d, FPS = %.2f", frame_meta->frame_num, fps);

    // // if (txt_params->display_text)
    // //   g_free (txt_params->display_text);
    // txt_params->display_text = (char *)g_malloc0(MAX_DISPLAY_LEN);
    // offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Frame Number = %d", frame_meta->frame_num);

    txt_params->display_text = (char *)g_malloc0(MAX_DISPLAY_LEN);
    offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN, "Frame Number = %d, FPS = %.2f", frame_meta->frame_num, fps);

    /* Now set the offsets where the string should appear */
    txt_params->x_offset = 10;
    txt_params->y_offset = 12;

    /* Font , font-color and font-size */
    txt_params->font_params.font_name = (char *)"Serif";
    txt_params->font_params.font_size = 14;
    txt_params->font_params.font_color.red = 1.0;
    txt_params->font_params.font_color.green = 1.0;
    txt_params->font_params.font_color.blue = 1.0;
    txt_params->font_params.font_color.alpha = 1.0;

    /* Text background color */
    txt_params->set_bg_clr = 1;
    txt_params->text_bg_clr.red = 0.0;
    txt_params->text_bg_clr.green = 0.0;
    txt_params->text_bg_clr.blue = 0.0;
    txt_params->text_bg_clr.alpha = 1.0;

    nvds_add_display_meta_to_frame(frame_meta, display_meta);
  }

  GstPadProbeReturn
tiler_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info,
                          gpointer u_data) {

  // Total frames processed and start time should be defined as static.
  static gint total_frames = 0;
  static GstClockTime start_time = GST_CLOCK_TIME_NONE;

  GstBuffer *buf = (GstBuffer *)info->data;

  // To access the entire batch data
  NvDsBatchMeta *batch_meta = NULL;

  NvDsObjectMeta *obj_meta = NULL;
  NvDsFrameMeta *frame_meta = NULL;

  // Get original raw data
  GstMapInfo in_map_info;

  if (!gst_buffer_map(buf, &in_map_info, GST_MAP_READ)) {
    g_print("Error: Failed to map gst buffer\n");
    gst_buffer_unmap(buf, &in_map_info);
    return GST_PAD_PROBE_OK;
  }

  batch_meta = gst_buffer_get_nvds_batch_meta(buf);

  if (!batch_meta) {
    return GST_PAD_PROBE_OK;
  }

  GstClockTime current_time = GST_BUFFER_PTS (buf);

  for (NvDsMetaList *l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next) {
    frame_meta = (NvDsFrameMeta *)(l_frame->data);

    if (frame_meta == NULL) {
      // Ignore Null frame meta.
      continue;
    }

    FPSMonitor *monitor = &monitors[frame_meta->source_id];

    if (monitor->is_first) {
      monitor->start_time = current_time;
      monitor->is_first = FALSE;
    }

    if ((current_time - monitor->start_time) / GST_SECOND > 5.0) {
      gdouble fps = 0.0;
      if (monitor->frame_count != 0) {
        fps = monitor->frame_count / 5.0;
      }
      g_print ("FPS of stream %d: %.2f\n", frame_meta->source_id , fps);
      monitor->frame_count = 0;
      monitor->start_time = current_time;
    } else {
      monitor->frame_count++;
    }

    for (NvDsMetaList *l_obj = frame_meta->obj_meta_list; l_obj != NULL;
        l_obj = l_obj->next) {

      obj_meta = (NvDsObjectMeta *)(l_obj->data);

      if (obj_meta == NULL) {
        // Ignore Null object.
        continue;
      }

      gint class_index = obj_meta->class_id;
      if (frame_meta->source_id == 0) {
        changeBBoxColor(obj_meta, 1, 1.0, 0.0, 0.0, 0.25);
      }
    }
    gdouble fps = 0.0;
    if (current_time > monitor->start_time && monitor->frame_count != 0) {
        fps = (monitor->frame_count * GST_SECOND) / (current_time - monitor->start_time);
    }
    addDisplayMeta(batch_meta, frame_meta, fps);
  }
  gst_buffer_unmap(buf, &in_map_info);
  return GST_PAD_PROBE_OK;
}

static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
  GMainLoop *loop = (GMainLoop *) data;
  switch (GST_MESSAGE_TYPE (msg)) {
    case GST_MESSAGE_EOS:
      g_print ("End of stream\n");
      g_main_loop_quit (loop);
      break;
    case GST_MESSAGE_WARNING:
    {
      gchar *debug;
      GError *error;
      gst_message_parse_warning (msg, &error, &debug);
      g_printerr ("WARNING from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      g_free (debug);
      g_printerr ("Warning: %s\n", error->message);
      g_error_free (error);
      break;
    }
    case GST_MESSAGE_ERROR:
    {
      gchar *debug;
      GError *error;
      gst_message_parse_error (msg, &error, &debug);
      g_printerr ("ERROR from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      if (debug)
        g_printerr ("Error details: %s\n", debug);
      g_free (debug);
      g_error_free (error);
      g_main_loop_quit (loop);
      break;
    }
#ifndef PLATFORM_TEGRA
    case GST_MESSAGE_ELEMENT:
    {
      if (gst_nvmessage_is_stream_eos (msg)) {
        guint stream_id;
        if (gst_nvmessage_parse_stream_eos (msg, &stream_id)) {
          g_print ("Got EOS from stream %d\n", stream_id);
        }
      }
      break;
    }
#endif
    default:
      break;
  }
  return TRUE;
}

static void
cb_newpad (GstElement * decodebin, GstPad * decoder_src_pad, gpointer data)
{
  g_print ("In cb_newpad\n");
  GstCaps *caps = gst_pad_get_current_caps (decoder_src_pad);
  const GstStructure *str = gst_caps_get_structure (caps, 0);
  const gchar *name = gst_structure_get_name (str);
  GstElement *source_bin = (GstElement *) data;
  GstCapsFeatures *features = gst_caps_get_features (caps, 0);

  /* Need to check if the pad created by the decodebin is for video and not
  * audio. */
  if (!strncmp (name, "video", 5)) {
    /* Link the decodebin pad only if decodebin has picked nvidia
    * decoder plugin nvdec_*. We do this by checking if the pad caps contain
    * NVMM memory features. */
    if (gst_caps_features_contains (features, GST_CAPS_FEATURES_NVMM)) {
      /* Get the source bin ghost pad */
      GstPad *bin_ghost_pad = gst_element_get_static_pad (source_bin, "src");
      if (!gst_ghost_pad_set_target (GST_GHOST_PAD (bin_ghost_pad),
              decoder_src_pad)) {
        g_printerr ("Failed to link decoder src pad to source bin ghost pad\n");
      }
      gst_object_unref (bin_ghost_pad);
    } else {
      g_printerr ("Error: Decodebin did not pick nvidia decoder plugin.\n");
    }
  }
}

static void
decodebin_child_added (GstChildProxy * child_proxy, GObject * object,
    gchar * name, gpointer user_data)
{
  g_print ("Decodebin child added: %s\n", name);
  if (g_strrstr (name, "decodebin") == name) {
    g_signal_connect (G_OBJECT (object), "child-added",
        G_CALLBACK (decodebin_child_added), user_data);
  }
}

static GstElement *
create_source_bin (guint index, gchar * uri)
{
  GstElement *bin = NULL, *uri_decode_bin = NULL;
  gchar bin_name[16] = { };

  g_snprintf (bin_name, 15, "source-bin-%02d", index);
  /* Create a source GstBin to abstract this bin's content from the rest of the
  * pipeline */
  bin = gst_bin_new (bin_name);

  /* Source element for reading from the uri.
  * We will use decodebin and let it figure out the container format of the
  * stream and the codec and plug the appropriate demux and decode plugins. */
  uri_decode_bin = gst_element_factory_make ("uridecodebin", "uri-decode-bin");

  if (!bin || !uri_decode_bin) {
    g_printerr ("One element in source bin could not be created.\n");
    return NULL;
  }

  /* We set the input uri to the source element */
  g_object_set (G_OBJECT (uri_decode_bin), "uri", uri, NULL);

  /* Connect to the "pad-added" signal of the decodebin which generates a
  * callback once a new pad for raw data has beed created by the decodebin */
  g_signal_connect (G_OBJECT (uri_decode_bin), "pad-added",
      G_CALLBACK (cb_newpad), bin);
  g_signal_connect (G_OBJECT (uri_decode_bin), "child-added",
      G_CALLBACK (decodebin_child_added), bin);

  gst_bin_add (GST_BIN (bin), uri_decode_bin);

  /* We need to create a ghost pad for the source bin which will act as a proxy
  * for the video decoder src pad. The ghost pad will not have a target right
  * now. Once the decode bin creates the video decoder and generates the
  * cb_newpad callback, we will set the ghost pad target to the video decoder
  * src pad. */
  if (!gst_element_add_pad (bin, gst_ghost_pad_new_no_target ("src",
              GST_PAD_SRC))) {
    g_printerr ("Failed to add ghost pad in source bin\n");
    return NULL;
  }

  return bin;
}

int
main (int argc, char *argv[])
{
  GMainLoop *loop = NULL;
  GstElement *pipeline = NULL, *streammux = NULL, *sink = NULL, *pgie = NULL,
      *queue1, *queue2, *queue3, *queue4, *queue5, *queue6, *nvvidconv = NULL,
      *nvosd = NULL, *tiler = NULL;
  GstElement *encoder = NULL, *transform = NULL, *cap_filter = NULL, 
      /*****************************************/
      *codecparse = NULL, *mux = NULL;
//   GstElement *transform = NULL;
  GstCaps *caps = NULL;
  GstElement *tracker = NULL;
  GstBus *bus = NULL;
  guint bus_watch_id;
  GstPad *tiler_src_pad = NULL;
  guint i, num_sources;
  guint tiler_rows, tiler_columns;
  guint pgie_batch_size;

  int current_device = -1;
  cudaGetDevice(&current_device);
  struct cudaDeviceProp prop;
  cudaGetDeviceProperties(&prop, current_device);

  for (int i = 0; i < MAX_SOURCES; i++) {
    monitors[i].start_time = GST_CLOCK_TIME_NONE;
    monitors[i].frame_count = 0;
    monitors[i].source_id = i;
    monitors[i].is_first = TRUE;
  }

  /* Check input arguments */
  if (argc < 2) {
    g_printerr ("Usage: %s <uri1> [uri2] ... [uriN] \n", argv[0]);
    return -1;
  }
  num_sources = argc - 1;

  /* Standard GStreamer initialization */
  gst_init (&argc, &argv);
  loop = g_main_loop_new (NULL, FALSE);

  /* Create gstreamer elements */
  /* Create Pipeline element that will form a connection of other elements */
  pipeline = gst_pipeline_new ("dsyolo-pipeline");

  /* Create nvstreammux instance to form batches from one or more sources. */
  streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");

  if (!pipeline || !streammux) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }
  gst_bin_add (GST_BIN (pipeline), streammux);

  for (i = 0; i < num_sources; i++) {
    GstPad *sinkpad, *srcpad;
    gchar pad_name[16] = { };
    GstElement *source_bin = create_source_bin (i, argv[i + 1]);

    if (!source_bin) {
      g_printerr ("Failed to create source bin. Exiting.\n");
      return -1;
    }

    gst_bin_add (GST_BIN (pipeline), source_bin);

    g_snprintf (pad_name, 15, "sink_%u", i);
    sinkpad = gst_element_get_request_pad (streammux, pad_name);
    if (!sinkpad) {
      g_printerr ("Streammux request sink pad failed. Exiting.\n");
      return -1;
    }

    srcpad = gst_element_get_static_pad (source_bin, "src");
    if (!srcpad) {
      g_printerr ("Failed to get src pad of source bin. Exiting.\n");
      return -1;
    }

    if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
      g_printerr ("Failed to link source bin to stream muxer. Exiting.\n");
      return -1;
    }

    gst_object_unref (srcpad);
    gst_object_unref (sinkpad);
  }

  /* Use nvinfer to infer on batched frame. */
  pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");

  tracker = gst_element_factory_make ("nvtracker", "tracker");
  if (!tracker) {
    g_printerr ("Tracker element could not be created. Exiting.\n");
    return -1;
  }

  g_object_set (G_OBJECT (tracker), "tracker-width", 640, NULL);
  g_object_set (G_OBJECT (tracker), "tracker-height", 480, NULL);
  g_object_set (G_OBJECT (tracker), "ll-lib-file", "/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so", NULL);
  g_object_set (G_OBJECT (tracker), "ll-config-file", "/opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/tracker_config.yml", NULL);
  g_object_set (G_OBJECT (tracker), "enable-batch-process", 1, NULL);

  /* Add queue elements between every two elements */
  queue1 = gst_element_factory_make ("queue", "queue1");
  queue2 = gst_element_factory_make ("queue", "queue2");
  queue3 = gst_element_factory_make ("queue", "queue3");
  queue4 = gst_element_factory_make ("queue", "queue4");
  queue5 = gst_element_factory_make ("queue", "queue5");
  queue6 = gst_element_factory_make ("queue", "queue6");

  /* Use nvtiler to composite the batched frames into a 2D tiled array based
  * on the source of the frames. */
  tiler = gst_element_factory_make ("nvmultistreamtiler", "nvtiler");

  /* Use convertor to convert from NV12 to RGBA as required by nvosd */
  nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");

  /* Create OSD to draw on the converted RGBA buffer */
  nvosd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay");

  /* Finally render the osd output */
  transform = gst_element_factory_make ("nvvideoconvert", "transform");
  cap_filter = gst_element_factory_make ("capsfilter", "filter");
  caps = gst_caps_from_string ("video/x-raw(memory:NVMM), format=I420");
  g_object_set (G_OBJECT (cap_filter), "caps", caps, NULL);

  encoder = gst_element_factory_make ("nvv4l2h264enc", "h264-encoder");

/****************************************************************/
// rtppay = gst_element_factory_make ("rtph264pay", "rtppay-h264");
  codecparse = gst_element_factory_make ("h264parse", "h264-parser2");
// mux = gst_element_factory_make ("qtmux", "mux");
  mux = gst_element_factory_make ("avimux", "mux");
/****************************************************************/
  g_object_set (G_OBJECT (encoder), "bitrate", 4000000, NULL);
  //sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
//   sink = gst_element_factory_make("fakesink", "nvvideo-renderer");
  /* Create filesink to save output to file */
  sink = gst_element_factory_make ("filesink", "file-output");
  if (!sink) {
    g_printerr ("filesink element could not be created. Exiting.\n");
    return -1;
  }

  g_object_set (G_OBJECT (sink), "location", "file.mp4", NULL);

  if (!pgie || !tiler || !nvvidconv || !nvosd || !sink) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }

  if(!transform && prop.integrated) {
    g_printerr ("One tegra element could not be created. Exiting.\n");
    return -1;
  }

  g_object_set (G_OBJECT (streammux), "batch-size", num_sources, NULL);

  g_object_set (G_OBJECT (streammux), "width", MUXER_OUTPUT_WIDTH, "height",
      MUXER_OUTPUT_HEIGHT,
      "batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);

  /* Configure the nvinfer element using the nvinfer config file. */
  g_object_set (G_OBJECT (pgie),
      "config-file-path", "/usr/src/app/configs/model_yolov8n_onnx.txt", NULL);

  /* Override the batch-size set in the config file with the number of sources. */
  g_object_get (G_OBJECT (pgie), "batch-size", &pgie_batch_size, NULL);
  if (pgie_batch_size != num_sources) {
    g_printerr
        ("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",
        pgie_batch_size, num_sources);
    g_object_set (G_OBJECT (pgie), "batch-size", num_sources, NULL);
  }

  tiler_rows = (guint) sqrt (num_sources);

  tiler_columns = (guint) ceil (1.0 * num_sources / tiler_rows);
  /* we set the tiler properties here */
  g_object_set (G_OBJECT (tiler), "rows", tiler_rows, "columns", tiler_columns,
      "width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);

  g_object_set (G_OBJECT (nvosd), "process-mode", OSD_PROCESS_MODE,
      "display-text", OSD_DISPLAY_TEXT, NULL);

  g_object_set (G_OBJECT (sink), "qos", 0, NULL);

  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);

  gst_bin_add_many (GST_BIN (pipeline), queue1, pgie, queue2, tracker, queue3, tiler, queue4, nvvidconv, queue5, nvosd, transform, cap_filter, encoder, codecparse, mux, queue6, sink, NULL);

  if (!gst_element_link_many (streammux, queue1, pgie, queue2, tracker, queue3, tiler, queue4, nvvidconv, queue5, nvosd, transform, cap_filter, encoder, codecparse, mux, queue6, sink, NULL)) {
    g_printerr ("Elements could not be linked. Exiting.\n");
    return -1;
  }

  tiler_src_pad = gst_element_get_static_pad (pgie, "src");
  if (!tiler_src_pad)
    g_print ("Unable to get src pad\n");
  else
    gst_pad_add_probe (tiler_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
        tiler_src_pad_buffer_probe, NULL, NULL);
  gst_object_unref (tiler_src_pad);

  // GstPad *pad = gst_element_get_static_pad (sink, "sink");
  // gst_pad_add_probe (pad, GST_PAD_PROBE_TYPE_BUFFER,
  //     (GstPadProbeCallback) perf_cb, NULL, NULL);
  // gst_object_unref (pad);

  /* Set the pipeline to "playing" state */
  g_print ("Now playing:");
  for (i = 0; i < num_sources; i++) {
    g_print (" %s,", argv[i + 1]);
  }
  g_print ("\n");
  gst_element_set_state (pipeline, GST_STATE_PLAYING);

  /* Wait till pipeline encounters an error or EOS */
  g_print ("Running...\n");
  g_main_loop_run (loop);

  /* Out of the main loop, clean up nicely */
  g_print ("Returned, stopping playback\n");
  gst_element_set_state (pipeline, GST_STATE_NULL);
  g_print ("Deleting pipeline\n");
  gst_object_unref (GST_OBJECT (pipeline));
  g_source_remove (bus_watch_id);
  g_main_loop_unref (loop);
  return 0;
}  

`

ubergeekNZ commented 11 months ago

batch size of 1