Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
From: wenbin.chen-at-intel.com@ffmpeg.org
To: ffmpeg-devel@ffmpeg.org
Subject: [FFmpeg-devel] [PATCH 1/3] libavfilter/dnn_bakcend_openvino: Add automatic input/output detection
Date: Wed, 17 Jan 2024 15:21:49 +0800
Message-ID: <20240117072151.2155795-1-wenbin.chen@intel.com> (raw)

From: Wenbin Chen <wenbin.chen@intel.com>

Now when using openvino backend, user doesn't need to set input/output
names in command line. Model ports will be automatically detected.

For example:
ffmpeg -i input.png -vf \
dnn_detect=dnn_backend=openvino:model=model.xml:input=image:\
output=detection_out -y output.png

can be simplified to:
ffmpeg -i input.png -vf dnn_detect=dnn_backend=openvino:model=model.xml\
 -y output.png

Signed-off-by: Wenbin Chen <wenbin.chen@intel.com>
---
 libavfilter/dnn/dnn_backend_openvino.c | 64 ++++++++++++++++++++++----
 libavfilter/dnn_filter_common.c        | 21 +++++----
 2 files changed, 67 insertions(+), 18 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_openvino.c b/libavfilter/dnn/dnn_backend_openvino.c
index e207d44584..590ddd586c 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -205,6 +205,7 @@ static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
     ov_tensor_t* tensor = NULL;
     ov_shape_t input_shape = {0};
     ov_element_type_e precision;
+    char *port_name;
 #else
     dimensions_t dims;
     precision_e precision;
@@ -223,11 +224,23 @@ static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
         ov_output_const_port_free(ov_model->input_port);
         ov_model->input_port = NULL;
     }
-    status = ov_model_const_input_by_name(ov_model->ov_model, task->input_name, &ov_model->input_port);
+    if (task->input_name)
+        status = ov_model_const_input_by_name(ov_model->ov_model, task->input_name, &ov_model->input_port);
+    else
+        status = ov_model_const_input(ov_model->ov_model, &ov_model->input_port);
     if (status != OK) {
         av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
         return ov2_map_error(status, NULL);
     }
+    status = ov_port_get_any_name(ov_model->input_port, &port_name);
+    if (status != OK) {
+        av_log(ctx, AV_LOG_ERROR, "Failed to get input port name.\n");
+        return ov2_map_error(status, NULL);
+    }
+    av_log(ctx, AV_LOG_VERBOSE, "OpenVINO model input: %s\n", port_name);
+    ov_free(port_name);
+    port_name = NULL;
+
     status = ov_const_port_get_shape(ov_model->input_port, &input_shape);
     if (status != OK) {
         av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
@@ -620,7 +633,10 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
         goto err;
     }
 
-    status = ov_preprocess_prepostprocessor_get_input_info_by_name(ov_model->preprocess, input_name, &ov_model->input_info);
+    if (input_name)
+        status = ov_preprocess_prepostprocessor_get_input_info_by_name(ov_model->preprocess, input_name, &ov_model->input_info);
+    else
+        status = ov_preprocess_prepostprocessor_get_input_info(ov_model->preprocess, &ov_model->input_info);
     if (status != OK) {
         av_log(ctx, AV_LOG_ERROR, "Failed to get input info from preprocess.\n");
         ret = ov2_map_error(status, NULL);
@@ -673,10 +689,24 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
         goto err;
     }
 
+    if (!nb_outputs) {
+        size_t output_size;
+        status = ov_model_outputs_size(ov_model->ov_model, &output_size);
+        if (status != OK) {
+            av_log(ctx, AV_LOG_ERROR, "Failed to get output size.\n");
+            ret = ov2_map_error(status, NULL);
+            goto err;
+        }
+        nb_outputs = output_size;
+    }
     ov_model->nb_outputs = nb_outputs;
     for (int i = 0; i < nb_outputs; i++) {
-        status = ov_preprocess_prepostprocessor_get_output_info_by_name(
-                ov_model->preprocess, output_names[i], &ov_model->output_info);
+        if (output_names)
+            status = ov_preprocess_prepostprocessor_get_output_info_by_name(
+                    ov_model->preprocess, output_names[i], &ov_model->output_info);
+        else
+            status = ov_preprocess_prepostprocessor_get_output_info_by_index(
+                    ov_model->preprocess, i, &ov_model->output_info);
         if (status != OK) {
             av_log(ctx, AV_LOG_ERROR, "Failed to get output info from preprocess.\n");
             ret = ov2_map_error(status, NULL);
@@ -758,12 +788,25 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
         }
 
     for (int i = 0; i < nb_outputs; i++) {
-        status = ov_model_const_output_by_name(ov_model->ov_model, output_names[i],
-                                               &ov_model->output_ports[i]);
+        char *port_name;
+        if (output_names)
+            status = ov_model_const_output_by_name(ov_model->ov_model, output_names[i],
+                                            &ov_model->output_ports[i]);
+        else
+            status = ov_model_const_output_by_index(ov_model->ov_model, i,
+                                            &ov_model->output_ports[i]);
         if (status != OK) {
             av_log(ctx, AV_LOG_ERROR, "Failed to get output port %s.\n", output_names[i]);
             goto err;
         }
+        status = ov_port_get_any_name(ov_model->output_ports[i], &port_name);
+        if (status != OK) {
+            av_log(ctx, AV_LOG_ERROR, "Failed to get output port name.\n");
+            goto err;
+        }
+        av_log(ctx, AV_LOG_VERBOSE, "OpenVINO model outputs: %s\n", port_name);
+        ov_free(port_name);
+        port_name = NULL;
     }
     //compile network
     status = ov_core_compile_model(ov_model->core, ov_model->ov_model, device, 0, &ov_model->compiled_model);
@@ -1044,7 +1087,10 @@ static int get_input_ov(void *model, DNNData *input, const char *input_name)
     ov_element_type_e precision;
     int64_t* dims;
     ov_status_e status;
-    status = ov_model_const_input_by_name(ov_model->ov_model, input_name, &ov_model->input_port);
+    if (input_name)
+        status = ov_model_const_input_by_name(ov_model->ov_model, input_name, &ov_model->input_port);
+    else
+        status = ov_model_const_input(ov_model->ov_model, &ov_model->input_port);
     if (status != OK) {
         av_log(ctx, AV_LOG_ERROR, "Failed to get input port shape.\n");
         return ov2_map_error(status, NULL);
@@ -1241,7 +1287,7 @@ static int get_output_ov(void *model, const char *input_name, int input_width, i
     OVRequestItem *request;
     DNNExecBaseParams exec_params = {
         .input_name     = input_name,
-        .output_names   = &output_name,
+        .output_names   = output_name ? &output_name : NULL,
         .nb_output      = 1,
         .in_frame       = NULL,
         .out_frame      = NULL,
@@ -1297,7 +1343,7 @@ static int get_output_ov(void *model, const char *input_name, int input_width, i
     }
     if (!ov_model->exe_network) {
 #endif
-        ret = init_model_ov(ov_model, input_name, &output_name, 1);
+        ret = init_model_ov(ov_model, input_name, output_name ? &output_name : NULL, 1);
         if (ret != 0) {
             av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
             return ret;
diff --git a/libavfilter/dnn_filter_common.c b/libavfilter/dnn_filter_common.c
index 3b9182c1d1..f012d450a2 100644
--- a/libavfilter/dnn_filter_common.c
+++ b/libavfilter/dnn_filter_common.c
@@ -57,15 +57,17 @@ int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *fil
         av_log(filter_ctx, AV_LOG_ERROR, "model file for network is not specified\n");
         return AVERROR(EINVAL);
     }
-    if (!ctx->model_inputname) {
-        av_log(filter_ctx, AV_LOG_ERROR, "input name of the model network is not specified\n");
-        return AVERROR(EINVAL);
-    }
 
-    ctx->model_outputnames = separate_output_names(ctx->model_outputnames_string, "&", &ctx->nb_outputs);
-    if (!ctx->model_outputnames) {
-        av_log(filter_ctx, AV_LOG_ERROR, "could not parse model output names\n");
-        return AVERROR(EINVAL);
+    if (ctx->backend_type == DNN_TF) {
+        if (!ctx->model_inputname) {
+            av_log(filter_ctx, AV_LOG_ERROR, "input name of the model network is not specified\n");
+            return AVERROR(EINVAL);
+        }
+        ctx->model_outputnames = separate_output_names(ctx->model_outputnames_string, "&", &ctx->nb_outputs);
+        if (!ctx->model_outputnames) {
+            av_log(filter_ctx, AV_LOG_ERROR, "could not parse model output names\n");
+            return AVERROR(EINVAL);
+        }
     }
 
     ctx->dnn_module = ff_get_dnn_module(ctx->backend_type, filter_ctx);
@@ -113,8 +115,9 @@ int ff_dnn_get_input(DnnContext *ctx, DNNData *input)
 
 int ff_dnn_get_output(DnnContext *ctx, int input_width, int input_height, int *output_width, int *output_height)
 {
+    char * output_name = ctx->model_outputnames ? ctx->model_outputnames[0] : NULL;
     return ctx->model->get_output(ctx->model->model, ctx->model_inputname, input_width, input_height,
-                                    (const char *)ctx->model_outputnames[0], output_width, output_height);
+                                    (const char *)output_name, output_width, output_height);
 }
 
 int ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame)
-- 
2.34.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

             reply	other threads:[~2024-01-17  7:22 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-01-17  7:21 wenbin.chen-at-intel.com [this message]
2024-01-17  7:21 ` [FFmpeg-devel] [PATCH 2/3] libavfilter/dnn_interface: use dims to represent shapes wenbin.chen-at-intel.com
2024-01-17  7:21 ` [FFmpeg-devel] [PATCH 3/3] libavfilter/vf_dnn_detect: Use class confidence to filt boxes wenbin.chen-at-intel.com
2024-01-28  1:38   ` Guo, Yejun

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240117072151.2155795-1-wenbin.chen@intel.com \
    --to=wenbin.chen-at-intel.com@ffmpeg.org \
    --cc=ffmpeg-devel@ffmpeg.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git