Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
From: Zhao Zhili <quinkblack@foxmail.com>
To: ffmpeg-devel@ffmpeg.org
Cc: Zhao Zhili <zhilizhao@tencent.com>
Subject: [FFmpeg-devel] [PATCH WIP 6/9] avfilter/dnn_backend_tf: Simplify memory allocation
Date: Sun, 28 Apr 2024 00:41:56 +0800
Message-ID: <tencent_0C489A62404B9AF09F36AB1D4E4670FC2D08@qq.com> (raw)
In-Reply-To: <20240427164159.82771-1-quinkblack@foxmail.com>

From: Zhao Zhili <zhilizhao@tencent.com>

---
 libavfilter/dnn/dnn_backend_tf.c | 33 +++++++++++++-------------------
 1 file changed, 13 insertions(+), 20 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 3b4de6d13f..c7716e696d 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -37,8 +37,8 @@
 #include <tensorflow/c/c_api.h>
 
 typedef struct TFModel {
+    DNNModel model;
     DnnContext *ctx;
-    DNNModel *model;
     TF_Graph *graph;
     TF_Session *session;
     TF_Status *status;
@@ -518,7 +518,7 @@ static void dnn_free_model_tf(DNNModel **model)
         TF_DeleteStatus(tf_model->status);
     }
     av_freep(&tf_model);
-    av_freep(&model);
+    *model = NULL;
 }
 
 static DNNModel *dnn_load_model_tf(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
@@ -526,18 +526,11 @@ static DNNModel *dnn_load_model_tf(DnnContext *ctx, DNNFunctionType func_type, A
     DNNModel *model = NULL;
     TFModel *tf_model = NULL;
 
-    model = av_mallocz(sizeof(DNNModel));
-    if (!model){
-        return NULL;
-    }
-
     tf_model = av_mallocz(sizeof(TFModel));
-    if (!tf_model){
-        av_freep(&model);
+    if (!tf_model)
         return NULL;
-    }
+    model = &tf_model->model;
     model->model = tf_model;
-    tf_model->model = model;
     tf_model->ctx = ctx;
 
     if (load_tf_model(tf_model, ctx->model_filename) != 0){
@@ -650,11 +643,11 @@ static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
     }
     input.data = (float *)TF_TensorData(infer_request->input_tensor);
 
-    switch (tf_model->model->func_type) {
+    switch (tf_model->model.func_type) {
     case DFT_PROCESS_FRAME:
         if (task->do_ioproc) {
-            if (tf_model->model->frame_pre_proc != NULL) {
-                tf_model->model->frame_pre_proc(task->in_frame, &input, tf_model->model->filter_ctx);
+            if (tf_model->model.frame_pre_proc != NULL) {
+                tf_model->model.frame_pre_proc(task->in_frame, &input, tf_model->model.filter_ctx);
             } else {
                 ff_proc_from_frame_to_dnn(task->in_frame, &input, ctx);
             }
@@ -664,7 +657,7 @@ static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
         ff_frame_to_dnn_detect(task->in_frame, &input, ctx);
         break;
     default:
-        avpriv_report_missing_feature(ctx, "model function type %d", tf_model->model->func_type);
+        avpriv_report_missing_feature(ctx, "model function type %d", tf_model->model.func_type);
         break;
     }
 
@@ -724,12 +717,12 @@ static void infer_completion_callback(void *args) {
         outputs[i].data = TF_TensorData(infer_request->output_tensors[i]);
         outputs[i].dt = (DNNDataType)TF_TensorType(infer_request->output_tensors[i]);
     }
-    switch (tf_model->model->func_type) {
+    switch (tf_model->model.func_type) {
     case DFT_PROCESS_FRAME:
         //it only support 1 output if it's frame in & frame out
         if (task->do_ioproc) {
-            if (tf_model->model->frame_post_proc != NULL) {
-                tf_model->model->frame_post_proc(task->out_frame, outputs, tf_model->model->filter_ctx);
+            if (tf_model->model.frame_post_proc != NULL) {
+                tf_model->model.frame_post_proc(task->out_frame, outputs, tf_model->model.filter_ctx);
             } else {
                 ff_proc_from_dnn_to_frame(task->out_frame, outputs, ctx);
             }
@@ -741,11 +734,11 @@ static void infer_completion_callback(void *args) {
         }
         break;
     case DFT_ANALYTICS_DETECT:
-        if (!tf_model->model->detect_post_proc) {
+        if (!tf_model->model.detect_post_proc) {
             av_log(ctx, AV_LOG_ERROR, "Detect filter needs provide post proc\n");
             return;
         }
-        tf_model->model->detect_post_proc(task->in_frame, outputs, task->nb_output, tf_model->model->filter_ctx);
+        tf_model->model.detect_post_proc(task->in_frame, outputs, task->nb_output, tf_model->model.filter_ctx);
         break;
     default:
         av_log(ctx, AV_LOG_ERROR, "Tensorflow backend does not support this kind of dnn filter now\n");
-- 
2.34.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

  parent reply	other threads:[~2024-04-27 16:42 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
     [not found] <20240427164159.82771-1-quinkblack@foxmail.com>
2024-04-27 16:41 ` [FFmpeg-devel] [PATCH WIP 1/9] avfilter/dnn: Refactor DNN parameter configuration system Zhao Zhili
2024-04-27 16:41 ` [FFmpeg-devel] [PATCH WIP 2/9] avfilter/dnn_backend_openvino: Fix free context at random place Zhao Zhili
2024-04-27 16:41 ` [FFmpeg-devel] [PATCH WIP 3/9] avfilter/dnn_backend_openvino: simplify memory allocation Zhao Zhili
2024-04-27 16:41 ` [FFmpeg-devel] [PATCH WIP 4/9] avfilter/dnn_backend_tf: Remove one level of indentation Zhao Zhili
2024-04-27 16:41 ` [FFmpeg-devel] [PATCH WIP 5/9] avfilter/dnn_backend_tf: Fix free context at random place Zhao Zhili
2024-04-27 16:41 ` Zhao Zhili [this message]
2024-04-27 16:41 ` [FFmpeg-devel] [PATCH WIP 7/9] avfilter/dnn_backend_torch: Simplify memory allocation Zhao Zhili
2024-04-27 16:41 ` [FFmpeg-devel] [PATCH WIP 8/9] avfilter/dnn: Remove a level of dereference Zhao Zhili
2024-04-27 16:41 ` [FFmpeg-devel] [PATCH WIP 9/9] avfilter/dnn: Use dnn_backend_info_list to search for dnn module Zhao Zhili

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=tencent_0C489A62404B9AF09F36AB1D4E4670FC2D08@qq.com \
    --to=quinkblack@foxmail.com \
    --cc=ffmpeg-devel@ffmpeg.org \
    --cc=zhilizhao@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git