Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
From: Zhao Zhili <quinkblack@foxmail.com>
To: ffmpeg-devel@ffmpeg.org
Cc: Zhao Zhili <zhilizhao@tencent.com>
Subject: [FFmpeg-devel] [PATCH v3 03/10] avfilter/dnn_backend_openvino: simplify memory allocation
Date: Tue, 30 Apr 2024 15:12:01 +0800
Message-ID: <tencent_AF40CC9A589F3FAB8D4E4F702E2D2A9A7608@qq.com> (raw)
In-Reply-To: <20240430071208.126817-1-quinkblack@foxmail.com>

From: Zhao Zhili <zhilizhao@tencent.com>

---
 libavfilter/dnn/dnn_backend_openvino.c | 47 +++++++++++---------------
 1 file changed, 20 insertions(+), 27 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_openvino.c b/libavfilter/dnn/dnn_backend_openvino.c
index 769ba0a54b..1acc54b791 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -41,8 +41,8 @@
 #include "dnn_backend_common.h"
 
 typedef struct OVModel{
+    DNNModel model;
     DnnContext *ctx;
-    DNNModel *model;
 #if HAVE_OPENVINO2
     ov_core_t *core;
     ov_model_t *ov_model;
@@ -300,11 +300,11 @@ static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
             return ov2_map_error(status, NULL);
         }
 #endif
-        switch (ov_model->model->func_type) {
+        switch (ov_model->model.func_type) {
         case DFT_PROCESS_FRAME:
             if (task->do_ioproc) {
-                if (ov_model->model->frame_pre_proc != NULL) {
-                    ov_model->model->frame_pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
+                if (ov_model->model.frame_pre_proc != NULL) {
+                    ov_model->model.frame_pre_proc(task->in_frame, &input, ov_model->model.filter_ctx);
                 } else {
                     ff_proc_from_frame_to_dnn(task->in_frame, &input, ctx);
                 }
@@ -442,11 +442,11 @@ static void infer_completion_callback(void *args)
     for (int i = 0; i < request->lltask_count; ++i) {
         task = request->lltasks[i]->task;
 
-        switch (ov_model->model->func_type) {
+        switch (ov_model->model.func_type) {
         case DFT_PROCESS_FRAME:
             if (task->do_ioproc) {
-                if (ov_model->model->frame_post_proc != NULL) {
-                    ov_model->model->frame_post_proc(task->out_frame, outputs, ov_model->model->filter_ctx);
+                if (ov_model->model.frame_post_proc != NULL) {
+                    ov_model->model.frame_post_proc(task->out_frame, outputs, ov_model->model.filter_ctx);
                 } else {
                     ff_proc_from_dnn_to_frame(task->out_frame, outputs, ctx);
                 }
@@ -458,23 +458,23 @@ static void infer_completion_callback(void *args)
             }
             break;
         case DFT_ANALYTICS_DETECT:
-            if (!ov_model->model->detect_post_proc) {
+            if (!ov_model->model.detect_post_proc) {
                 av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
                 goto end;
             }
-            ov_model->model->detect_post_proc(task->in_frame, outputs,
+            ov_model->model.detect_post_proc(task->in_frame, outputs,
                                               ov_model->nb_outputs,
-                                              ov_model->model->filter_ctx);
+                                              ov_model->model.filter_ctx);
             break;
         case DFT_ANALYTICS_CLASSIFY:
-            if (!ov_model->model->classify_post_proc) {
+            if (!ov_model->model.classify_post_proc) {
                 av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
                 goto end;
             }
             for (int output_i = 0; output_i < ov_model->nb_outputs; output_i++)
-                ov_model->model->classify_post_proc(task->in_frame, outputs,
+                ov_model->model.classify_post_proc(task->in_frame, outputs,
                                                     request->lltasks[i]->bbox_index,
-                                                    ov_model->model->filter_ctx);
+                                                    ov_model->model.filter_ctx);
             break;
         default:
             av_assert0(!"should not reach here");
@@ -571,7 +571,7 @@ static void dnn_free_model_ov(DNNModel **model)
     av_free(ov_model->all_input_names);
 #endif
     av_freep(&ov_model);
-    av_freep(model);
+    *model = NULL;
 }
 
 
@@ -598,7 +598,7 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
 #endif
     // We scale pixel by default when do frame processing.
     if (fabsf(ctx->ov_option.scale) < 1e-6f)
-        ctx->ov_option.scale = ov_model->model->func_type == DFT_PROCESS_FRAME ? 255 : 1;
+        ctx->ov_option.scale = ov_model->model.func_type == DFT_PROCESS_FRAME ? 255 : 1;
     // batch size
     if (ctx->ov_option.batch_size <= 0) {
         ctx->ov_option.batch_size = 1;
@@ -702,7 +702,7 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
             ret = ov2_map_error(status, NULL);
             goto err;
         }
-        if (ov_model->model->func_type != DFT_PROCESS_FRAME)
+        if (ov_model->model.func_type != DFT_PROCESS_FRAME)
             status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
         else if (fabsf(ctx->ov_option.scale - 1) > 1e-6f || fabsf(ctx->ov_option.mean) > 1e-6f)
             status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
@@ -1280,7 +1280,7 @@ static int get_output_ov(void *model, const char *input_name, int input_width, i
         .out_frame      = NULL,
     };
 
-    if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
+    if (ov_model->model.func_type != DFT_PROCESS_FRAME) {
         av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
         return AVERROR(EINVAL);
     }
@@ -1342,7 +1342,7 @@ static int get_output_ov(void *model, const char *input_name, int input_width, i
         goto err;
     }
 
-    ret = extract_lltask_from_task(ov_model->model->func_type, &task, ov_model->lltask_queue, NULL);
+    ret = extract_lltask_from_task(ov_model->model.func_type, &task, ov_model->lltask_queue, NULL);
     if (ret != 0) {
         av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
         goto err;
@@ -1378,19 +1378,12 @@ static DNNModel *dnn_load_model_ov(DnnContext *ctx, DNNFunctionType func_type, A
     IEStatusCode status;
 #endif
 
-    model = av_mallocz(sizeof(DNNModel));
-    if (!model){
-        return NULL;
-    }
-
     ov_model = av_mallocz(sizeof(OVModel));
-    if (!ov_model) {
-        av_freep(&model);
+    if (!ov_model)
         return NULL;
-    }
     ov_model->ctx = ctx;
+    model = &ov_model->model;
     model->model = ov_model;
-    ov_model->model = model;
 
 #if HAVE_OPENVINO2
     status = ov_core_create(&core);
-- 
2.25.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

  parent reply	other threads:[~2024-04-30  7:12 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20240430071208.126817-1-quinkblack@foxmail.com>
2024-04-30  7:11 ` [FFmpeg-devel] [PATCH v3 01/10] avfilter/dnn: Refactor DNN parameter configuration system Zhao Zhili
2024-05-06 11:48   ` Guo, Yejun
2024-05-06 13:42     ` Zhao Zhili
2024-05-07 16:35       ` Zhao Zhili
2024-04-30  7:12 ` [FFmpeg-devel] [PATCH v3 02/10] avfilter/dnn_backend_openvino: Fix free context at random place Zhao Zhili
2024-04-30  7:12 ` Zhao Zhili [this message]
2024-04-30  7:12 ` [FFmpeg-devel] [PATCH v3 04/10] avfilter/dnn_backend_tf: Remove one level of indentation Zhao Zhili
2024-04-30  7:12 ` [FFmpeg-devel] [PATCH v3 05/10] avfilter/dnn_backend_tf: Fix free context at random place Zhao Zhili
2024-04-30  7:12 ` [FFmpeg-devel] [PATCH v3 06/10] avfilter/dnn_backend_tf: Simplify memory allocation Zhao Zhili
2024-04-30  7:12 ` [FFmpeg-devel] [PATCH v3 07/10] avfilter/dnn_backend_torch: " Zhao Zhili
2024-04-30  7:12 ` [FFmpeg-devel] [PATCH v3 08/10] avfilter/dnn: Remove a level of dereference Zhao Zhili
2024-04-30  7:12 ` [FFmpeg-devel] [PATCH v3 09/10] avfilter/dnn: Use dnn_backend_info_list to search for dnn module Zhao Zhili
2024-04-30  7:12 ` [FFmpeg-devel] [PATCH v3 10/10] avfilter/vf_dnn_detect: Fix null pointer dereference Zhao Zhili

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=tencent_AF40CC9A589F3FAB8D4E4F702E2D2A9A7608@qq.com \
    --to=quinkblack@foxmail.com \
    --cc=ffmpeg-devel@ffmpeg.org \
    --cc=zhilizhao@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git