Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
* [FFmpeg-devel] [PATCH WIP v2 2/9] avfilter/dnn_backend_openvino: Fix free context at random place
       [not found] <20240428064655.106853-1-quinkblack@foxmail.com>
@ 2024-04-28  6:46 ` Zhao Zhili
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 3/9] avfilter/dnn_backend_openvino: simplify memory allocation Zhao Zhili
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Zhao Zhili @ 2024-04-28  6:46 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Zhao Zhili

From: Zhao Zhili <zhilizhao@tencent.com>

It will be freed again by ff_dnn_uninit.
---
 libavfilter/dnn/dnn_backend_openvino.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/libavfilter/dnn/dnn_backend_openvino.c b/libavfilter/dnn/dnn_backend_openvino.c
index c4b0682f11..769ba0a54b 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -959,7 +959,6 @@ err:
     if (input_model_info)
         ov_preprocess_input_model_info_free(input_model_info);
 #endif
-    dnn_free_model_ov(&ov_model->model);
     return ret;
 }
 
-- 
2.25.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [FFmpeg-devel] [PATCH WIP v2 3/9] avfilter/dnn_backend_openvino: simplify memory allocation
       [not found] <20240428064655.106853-1-quinkblack@foxmail.com>
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 2/9] avfilter/dnn_backend_openvino: Fix free context at random place Zhao Zhili
@ 2024-04-28  6:46 ` Zhao Zhili
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 4/9] avfilter/dnn_backend_tf: Remove one level of indentation Zhao Zhili
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Zhao Zhili @ 2024-04-28  6:46 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Zhao Zhili

From: Zhao Zhili <zhilizhao@tencent.com>

---
 libavfilter/dnn/dnn_backend_openvino.c | 47 +++++++++++---------------
 1 file changed, 20 insertions(+), 27 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_openvino.c b/libavfilter/dnn/dnn_backend_openvino.c
index 769ba0a54b..1acc54b791 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -41,8 +41,8 @@
 #include "dnn_backend_common.h"
 
 typedef struct OVModel{
+    DNNModel model;
     DnnContext *ctx;
-    DNNModel *model;
 #if HAVE_OPENVINO2
     ov_core_t *core;
     ov_model_t *ov_model;
@@ -300,11 +300,11 @@ static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
             return ov2_map_error(status, NULL);
         }
 #endif
-        switch (ov_model->model->func_type) {
+        switch (ov_model->model.func_type) {
         case DFT_PROCESS_FRAME:
             if (task->do_ioproc) {
-                if (ov_model->model->frame_pre_proc != NULL) {
-                    ov_model->model->frame_pre_proc(task->in_frame, &input, ov_model->model->filter_ctx);
+                if (ov_model->model.frame_pre_proc != NULL) {
+                    ov_model->model.frame_pre_proc(task->in_frame, &input, ov_model->model.filter_ctx);
                 } else {
                     ff_proc_from_frame_to_dnn(task->in_frame, &input, ctx);
                 }
@@ -442,11 +442,11 @@ static void infer_completion_callback(void *args)
     for (int i = 0; i < request->lltask_count; ++i) {
         task = request->lltasks[i]->task;
 
-        switch (ov_model->model->func_type) {
+        switch (ov_model->model.func_type) {
         case DFT_PROCESS_FRAME:
             if (task->do_ioproc) {
-                if (ov_model->model->frame_post_proc != NULL) {
-                    ov_model->model->frame_post_proc(task->out_frame, outputs, ov_model->model->filter_ctx);
+                if (ov_model->model.frame_post_proc != NULL) {
+                    ov_model->model.frame_post_proc(task->out_frame, outputs, ov_model->model.filter_ctx);
                 } else {
                     ff_proc_from_dnn_to_frame(task->out_frame, outputs, ctx);
                 }
@@ -458,23 +458,23 @@ static void infer_completion_callback(void *args)
             }
             break;
         case DFT_ANALYTICS_DETECT:
-            if (!ov_model->model->detect_post_proc) {
+            if (!ov_model->model.detect_post_proc) {
                 av_log(ctx, AV_LOG_ERROR, "detect filter needs to provide post proc\n");
                 goto end;
             }
-            ov_model->model->detect_post_proc(task->in_frame, outputs,
+            ov_model->model.detect_post_proc(task->in_frame, outputs,
                                               ov_model->nb_outputs,
-                                              ov_model->model->filter_ctx);
+                                              ov_model->model.filter_ctx);
             break;
         case DFT_ANALYTICS_CLASSIFY:
-            if (!ov_model->model->classify_post_proc) {
+            if (!ov_model->model.classify_post_proc) {
                 av_log(ctx, AV_LOG_ERROR, "classify filter needs to provide post proc\n");
                 goto end;
             }
             for (int output_i = 0; output_i < ov_model->nb_outputs; output_i++)
-                ov_model->model->classify_post_proc(task->in_frame, outputs,
+                ov_model->model.classify_post_proc(task->in_frame, outputs,
                                                     request->lltasks[i]->bbox_index,
-                                                    ov_model->model->filter_ctx);
+                                                    ov_model->model.filter_ctx);
             break;
         default:
             av_assert0(!"should not reach here");
@@ -571,7 +571,7 @@ static void dnn_free_model_ov(DNNModel **model)
     av_free(ov_model->all_input_names);
 #endif
     av_freep(&ov_model);
-    av_freep(model);
+    *model = NULL;
 }
 
 
@@ -598,7 +598,7 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
 #endif
     // We scale pixel by default when do frame processing.
     if (fabsf(ctx->ov_option.scale) < 1e-6f)
-        ctx->ov_option.scale = ov_model->model->func_type == DFT_PROCESS_FRAME ? 255 : 1;
+        ctx->ov_option.scale = ov_model->model.func_type == DFT_PROCESS_FRAME ? 255 : 1;
     // batch size
     if (ctx->ov_option.batch_size <= 0) {
         ctx->ov_option.batch_size = 1;
@@ -702,7 +702,7 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
             ret = ov2_map_error(status, NULL);
             goto err;
         }
-        if (ov_model->model->func_type != DFT_PROCESS_FRAME)
+        if (ov_model->model.func_type != DFT_PROCESS_FRAME)
             status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
         else if (fabsf(ctx->ov_option.scale - 1) > 1e-6f || fabsf(ctx->ov_option.mean) > 1e-6f)
             status |= ov_preprocess_output_set_element_type(output_tensor_info, F32);
@@ -1280,7 +1280,7 @@ static int get_output_ov(void *model, const char *input_name, int input_width, i
         .out_frame      = NULL,
     };
 
-    if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
+    if (ov_model->model.func_type != DFT_PROCESS_FRAME) {
         av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
         return AVERROR(EINVAL);
     }
@@ -1342,7 +1342,7 @@ static int get_output_ov(void *model, const char *input_name, int input_width, i
         goto err;
     }
 
-    ret = extract_lltask_from_task(ov_model->model->func_type, &task, ov_model->lltask_queue, NULL);
+    ret = extract_lltask_from_task(ov_model->model.func_type, &task, ov_model->lltask_queue, NULL);
     if (ret != 0) {
         av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
         goto err;
@@ -1378,19 +1378,12 @@ static DNNModel *dnn_load_model_ov(DnnContext *ctx, DNNFunctionType func_type, A
     IEStatusCode status;
 #endif
 
-    model = av_mallocz(sizeof(DNNModel));
-    if (!model){
-        return NULL;
-    }
-
     ov_model = av_mallocz(sizeof(OVModel));
-    if (!ov_model) {
-        av_freep(&model);
+    if (!ov_model)
         return NULL;
-    }
     ov_model->ctx = ctx;
+    model = &ov_model->model;
     model->model = ov_model;
-    ov_model->model = model;
 
 #if HAVE_OPENVINO2
     status = ov_core_create(&core);
-- 
2.25.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [FFmpeg-devel] [PATCH WIP v2 4/9] avfilter/dnn_backend_tf: Remove one level of indentation
       [not found] <20240428064655.106853-1-quinkblack@foxmail.com>
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 2/9] avfilter/dnn_backend_openvino: Fix free context at random place Zhao Zhili
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 3/9] avfilter/dnn_backend_openvino: simplify memory allocation Zhao Zhili
@ 2024-04-28  6:46 ` Zhao Zhili
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 5/9] avfilter/dnn_backend_tf: Fix free context at random place Zhao Zhili
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Zhao Zhili @ 2024-04-28  6:46 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Zhao Zhili

From: Zhao Zhili <zhilizhao@tencent.com>

---
 libavfilter/dnn/dnn_backend_tf.c | 63 ++++++++++++++++----------------
 1 file changed, 32 insertions(+), 31 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index d24591b90b..60f9e57fb7 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -483,41 +483,42 @@ static void dnn_free_model_tf(DNNModel **model)
 {
     TFModel *tf_model;
 
-    if (*model){
-        tf_model = (*model)->model;
-        while (ff_safe_queue_size(tf_model->request_queue) != 0) {
-            TFRequestItem *item = ff_safe_queue_pop_front(tf_model->request_queue);
-            destroy_request_item(&item);
-        }
-        ff_safe_queue_destroy(tf_model->request_queue);
+    if (!model || !*model)
+        return;
 
-        while (ff_queue_size(tf_model->lltask_queue) != 0) {
-            LastLevelTaskItem *item = ff_queue_pop_front(tf_model->lltask_queue);
-            av_freep(&item);
-        }
-        ff_queue_destroy(tf_model->lltask_queue);
+    tf_model = (*model)->model;
+    while (ff_safe_queue_size(tf_model->request_queue) != 0) {
+        TFRequestItem *item = ff_safe_queue_pop_front(tf_model->request_queue);
+        destroy_request_item(&item);
+    }
+    ff_safe_queue_destroy(tf_model->request_queue);
 
-        while (ff_queue_size(tf_model->task_queue) != 0) {
-            TaskItem *item = ff_queue_pop_front(tf_model->task_queue);
-            av_frame_free(&item->in_frame);
-            av_frame_free(&item->out_frame);
-            av_freep(&item);
-        }
-        ff_queue_destroy(tf_model->task_queue);
+    while (ff_queue_size(tf_model->lltask_queue) != 0) {
+        LastLevelTaskItem *item = ff_queue_pop_front(tf_model->lltask_queue);
+        av_freep(&item);
+    }
+    ff_queue_destroy(tf_model->lltask_queue);
 
-        if (tf_model->graph){
-            TF_DeleteGraph(tf_model->graph);
-        }
-        if (tf_model->session){
-            TF_CloseSession(tf_model->session, tf_model->status);
-            TF_DeleteSession(tf_model->session, tf_model->status);
-        }
-        if (tf_model->status){
-            TF_DeleteStatus(tf_model->status);
-        }
-        av_freep(&tf_model);
-        av_freep(&model);
+    while (ff_queue_size(tf_model->task_queue) != 0) {
+        TaskItem *item = ff_queue_pop_front(tf_model->task_queue);
+        av_frame_free(&item->in_frame);
+        av_frame_free(&item->out_frame);
+        av_freep(&item);
+    }
+    ff_queue_destroy(tf_model->task_queue);
+
+    if (tf_model->graph){
+        TF_DeleteGraph(tf_model->graph);
+    }
+    if (tf_model->session){
+        TF_CloseSession(tf_model->session, tf_model->status);
+        TF_DeleteSession(tf_model->session, tf_model->status);
+    }
+    if (tf_model->status){
+        TF_DeleteStatus(tf_model->status);
     }
+    av_freep(&tf_model);
+    av_freep(&model);
 }
 
 static DNNModel *dnn_load_model_tf(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
-- 
2.25.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [FFmpeg-devel] [PATCH WIP v2 5/9] avfilter/dnn_backend_tf: Fix free context at random place
       [not found] <20240428064655.106853-1-quinkblack@foxmail.com>
                   ` (2 preceding siblings ...)
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 4/9] avfilter/dnn_backend_tf: Remove one level of indentation Zhao Zhili
@ 2024-04-28  6:46 ` Zhao Zhili
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 6/9] avfilter/dnn_backend_tf: Simplify memory allocation Zhao Zhili
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Zhao Zhili @ 2024-04-28  6:46 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Zhao Zhili

From: Zhao Zhili <zhilizhao@tencent.com>

It will be freed again by ff_dnn_uninit.
---
 libavfilter/dnn/dnn_backend_tf.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 60f9e57fb7..3b4de6d13f 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -804,7 +804,7 @@ err:
     if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
         destroy_request_item(&request);
     }
-    dnn_free_model_tf(&tf_model->model);
+
     return ret;
 }
 
-- 
2.25.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [FFmpeg-devel] [PATCH WIP v2 6/9] avfilter/dnn_backend_tf: Simplify memory allocation
       [not found] <20240428064655.106853-1-quinkblack@foxmail.com>
                   ` (3 preceding siblings ...)
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 5/9] avfilter/dnn_backend_tf: Fix free context at random place Zhao Zhili
@ 2024-04-28  6:46 ` Zhao Zhili
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 7/9] avfilter/dnn_backend_torch: " Zhao Zhili
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 8+ messages in thread
From: Zhao Zhili @ 2024-04-28  6:46 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Zhao Zhili

From: Zhao Zhili <zhilizhao@tencent.com>

---
 libavfilter/dnn/dnn_backend_tf.c | 33 +++++++++++++-------------------
 1 file changed, 13 insertions(+), 20 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 3b4de6d13f..c7716e696d 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -37,8 +37,8 @@
 #include <tensorflow/c/c_api.h>
 
 typedef struct TFModel {
+    DNNModel model;
     DnnContext *ctx;
-    DNNModel *model;
     TF_Graph *graph;
     TF_Session *session;
     TF_Status *status;
@@ -518,7 +518,7 @@ static void dnn_free_model_tf(DNNModel **model)
         TF_DeleteStatus(tf_model->status);
     }
     av_freep(&tf_model);
-    av_freep(&model);
+    *model = NULL;
 }
 
 static DNNModel *dnn_load_model_tf(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
@@ -526,18 +526,11 @@ static DNNModel *dnn_load_model_tf(DnnContext *ctx, DNNFunctionType func_type, A
     DNNModel *model = NULL;
     TFModel *tf_model = NULL;
 
-    model = av_mallocz(sizeof(DNNModel));
-    if (!model){
-        return NULL;
-    }
-
     tf_model = av_mallocz(sizeof(TFModel));
-    if (!tf_model){
-        av_freep(&model);
+    if (!tf_model)
         return NULL;
-    }
+    model = &tf_model->model;
     model->model = tf_model;
-    tf_model->model = model;
     tf_model->ctx = ctx;
 
     if (load_tf_model(tf_model, ctx->model_filename) != 0){
@@ -650,11 +643,11 @@ static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
     }
     input.data = (float *)TF_TensorData(infer_request->input_tensor);
 
-    switch (tf_model->model->func_type) {
+    switch (tf_model->model.func_type) {
     case DFT_PROCESS_FRAME:
         if (task->do_ioproc) {
-            if (tf_model->model->frame_pre_proc != NULL) {
-                tf_model->model->frame_pre_proc(task->in_frame, &input, tf_model->model->filter_ctx);
+            if (tf_model->model.frame_pre_proc != NULL) {
+                tf_model->model.frame_pre_proc(task->in_frame, &input, tf_model->model.filter_ctx);
             } else {
                 ff_proc_from_frame_to_dnn(task->in_frame, &input, ctx);
             }
@@ -664,7 +657,7 @@ static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
         ff_frame_to_dnn_detect(task->in_frame, &input, ctx);
         break;
     default:
-        avpriv_report_missing_feature(ctx, "model function type %d", tf_model->model->func_type);
+        avpriv_report_missing_feature(ctx, "model function type %d", tf_model->model.func_type);
         break;
     }
 
@@ -724,12 +717,12 @@ static void infer_completion_callback(void *args) {
         outputs[i].data = TF_TensorData(infer_request->output_tensors[i]);
         outputs[i].dt = (DNNDataType)TF_TensorType(infer_request->output_tensors[i]);
     }
-    switch (tf_model->model->func_type) {
+    switch (tf_model->model.func_type) {
     case DFT_PROCESS_FRAME:
         //it only support 1 output if it's frame in & frame out
         if (task->do_ioproc) {
-            if (tf_model->model->frame_post_proc != NULL) {
-                tf_model->model->frame_post_proc(task->out_frame, outputs, tf_model->model->filter_ctx);
+            if (tf_model->model.frame_post_proc != NULL) {
+                tf_model->model.frame_post_proc(task->out_frame, outputs, tf_model->model.filter_ctx);
             } else {
                 ff_proc_from_dnn_to_frame(task->out_frame, outputs, ctx);
             }
@@ -741,11 +734,11 @@ static void infer_completion_callback(void *args) {
         }
         break;
     case DFT_ANALYTICS_DETECT:
-        if (!tf_model->model->detect_post_proc) {
+        if (!tf_model->model.detect_post_proc) {
             av_log(ctx, AV_LOG_ERROR, "Detect filter needs provide post proc\n");
             return;
         }
-        tf_model->model->detect_post_proc(task->in_frame, outputs, task->nb_output, tf_model->model->filter_ctx);
+        tf_model->model.detect_post_proc(task->in_frame, outputs, task->nb_output, tf_model->model.filter_ctx);
         break;
     default:
         av_log(ctx, AV_LOG_ERROR, "Tensorflow backend does not support this kind of dnn filter now\n");
-- 
2.25.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [FFmpeg-devel] [PATCH WIP v2 7/9] avfilter/dnn_backend_torch: Simplify memory allocation
       [not found] <20240428064655.106853-1-quinkblack@foxmail.com>
                   ` (4 preceding siblings ...)
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 6/9] avfilter/dnn_backend_tf: Simplify memory allocation Zhao Zhili
@ 2024-04-28  6:46 ` Zhao Zhili
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 8/9] avfilter/dnn: Remove a level of dereference Zhao Zhili
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 9/9] avfilter/dnn: Use dnn_backend_info_list to search for dnn module Zhao Zhili
  7 siblings, 0 replies; 8+ messages in thread
From: Zhao Zhili @ 2024-04-28  6:46 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Zhao Zhili

From: Zhao Zhili <zhilizhao@tencent.com>

---
 libavfilter/dnn/dnn_backend_torch.cpp | 31 +++++++++++----------------
 1 file changed, 12 insertions(+), 19 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_torch.cpp b/libavfilter/dnn/dnn_backend_torch.cpp
index abdef1f178..818ec5b713 100644
--- a/libavfilter/dnn/dnn_backend_torch.cpp
+++ b/libavfilter/dnn/dnn_backend_torch.cpp
@@ -37,8 +37,8 @@ extern "C" {
 }
 
 typedef struct THModel {
+    DNNModel model;
     DnnContext *ctx;
-    DNNModel *model;
     torch::jit::Module *jit_model;
     SafeQueue *request_queue;
     Queue *task_queue;
@@ -141,7 +141,7 @@ static void dnn_free_model_th(DNNModel **model)
     ff_queue_destroy(th_model->task_queue);
     delete th_model->jit_model;
     av_freep(&th_model);
-    av_freep(model);
+    *model = NULL;
 }
 
 static int get_input_th(void *model, DNNData *input, const char *input_name)
@@ -195,19 +195,19 @@ static int fill_model_input_th(THModel *th_model, THRequestItem *request)
     infer_request->input_tensor = new torch::Tensor();
     infer_request->output = new torch::Tensor();
 
-    switch (th_model->model->func_type) {
+    switch (th_model->model.func_type) {
     case DFT_PROCESS_FRAME:
         input.scale = 255;
         if (task->do_ioproc) {
-            if (th_model->model->frame_pre_proc != NULL) {
-                th_model->model->frame_pre_proc(task->in_frame, &input, th_model->model->filter_ctx);
+            if (th_model->model.frame_pre_proc != NULL) {
+                th_model->model.frame_pre_proc(task->in_frame, &input, th_model->model.filter_ctx);
             } else {
                 ff_proc_from_frame_to_dnn(task->in_frame, &input, ctx);
             }
         }
         break;
     default:
-        avpriv_report_missing_feature(NULL, "model function type %d", th_model->model->func_type);
+        avpriv_report_missing_feature(NULL, "model function type %d", th_model->model.func_type);
         break;
     }
     *infer_request->input_tensor = torch::from_blob(input.data,
@@ -282,13 +282,13 @@ static void infer_completion_callback(void *args) {
         goto err;
     }
 
-    switch (th_model->model->func_type) {
+    switch (th_model->model.func_type) {
     case DFT_PROCESS_FRAME:
         if (task->do_ioproc) {
             outputs.scale = 255;
             outputs.data = output->data_ptr();
-            if (th_model->model->frame_post_proc != NULL) {
-                th_model->model->frame_post_proc(task->out_frame, &outputs, th_model->model->filter_ctx);
+            if (th_model->model.frame_post_proc != NULL) {
+                th_model->model.frame_post_proc(task->out_frame, &outputs, th_model->model.filter_ctx);
             } else {
                 ff_proc_from_dnn_to_frame(task->out_frame, &outputs, th_model->ctx);
             }
@@ -298,7 +298,7 @@ static void infer_completion_callback(void *args) {
         }
         break;
     default:
-        avpriv_report_missing_feature(th_model->ctx, "model function type %d", th_model->model->func_type);
+        avpriv_report_missing_feature(th_model->ctx, "model function type %d", th_model->model.func_type);
         goto err;
     }
     task->inference_done++;
@@ -417,17 +417,10 @@ static DNNModel *dnn_load_model_th(DnnContext *ctx, DNNFunctionType func_type, A
     THRequestItem *item = NULL;
     const char *device_name = ctx->device ? ctx->device : "cpu";
 
-    model = (DNNModel *)av_mallocz(sizeof(DNNModel));
-    if (!model) {
-        return NULL;
-    }
-
     th_model = (THModel *)av_mallocz(sizeof(THModel));
-    if (!th_model) {
-        av_freep(&model);
+    if (!th_model)
         return NULL;
-    }
-    th_model->model = model;
+    model = &th_model->model;
     model->model = th_model;
     th_model->ctx = ctx;
 
-- 
2.25.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [FFmpeg-devel] [PATCH WIP v2 8/9] avfilter/dnn: Remove a level of dereference
       [not found] <20240428064655.106853-1-quinkblack@foxmail.com>
                   ` (5 preceding siblings ...)
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 7/9] avfilter/dnn_backend_torch: " Zhao Zhili
@ 2024-04-28  6:46 ` Zhao Zhili
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 9/9] avfilter/dnn: Use dnn_backend_info_list to search for dnn module Zhao Zhili
  7 siblings, 0 replies; 8+ messages in thread
From: Zhao Zhili @ 2024-04-28  6:46 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Zhao Zhili

From: Zhao Zhili <zhilizhao@tencent.com>

For code such as 'model->model = ov_model' is confusing. We can
just drop the member variable and use cast to get the subclass.
---
 libavfilter/dnn/dnn_backend_openvino.c | 17 ++++++++---------
 libavfilter/dnn/dnn_backend_tf.c       | 19 +++++++++----------
 libavfilter/dnn/dnn_backend_torch.cpp  | 15 +++++++--------
 libavfilter/dnn_filter_common.c        |  6 +++---
 libavfilter/dnn_interface.h            |  6 ++----
 5 files changed, 29 insertions(+), 34 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_openvino.c b/libavfilter/dnn/dnn_backend_openvino.c
index 1acc54b791..d8a6820dc2 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -517,7 +517,7 @@ static void dnn_free_model_ov(DNNModel **model)
     if (!model || !*model)
         return;
 
-    ov_model = (*model)->model;
+    ov_model = (OVModel *)(*model);
     while (ff_safe_queue_size(ov_model->request_queue) != 0) {
         OVRequestItem *item = ff_safe_queue_pop_front(ov_model->request_queue);
         if (item && item->infer_request) {
@@ -1059,9 +1059,9 @@ err:
     return ret;
 }
 
-static int get_input_ov(void *model, DNNData *input, const char *input_name)
+static int get_input_ov(DNNModel *model, DNNData *input, const char *input_name)
 {
-    OVModel *ov_model = model;
+    OVModel *ov_model = (OVModel *)model;
     DnnContext *ctx = ov_model->ctx;
     int input_resizable = ctx->ov_option.input_resizable;
 
@@ -1255,7 +1255,7 @@ static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Q
     }
 }
 
-static int get_output_ov(void *model, const char *input_name, int input_width, int input_height,
+static int get_output_ov(DNNModel *model, const char *input_name, int input_width, int input_height,
                                    const char *output_name, int *output_width, int *output_height)
 {
 #if HAVE_OPENVINO2
@@ -1268,7 +1268,7 @@ static int get_output_ov(void *model, const char *input_name, int input_width, i
     input_shapes_t input_shapes;
 #endif
     int ret;
-    OVModel *ov_model = model;
+    OVModel *ov_model = (OVModel *)model;
     DnnContext *ctx = ov_model->ctx;
     TaskItem task;
     OVRequestItem *request;
@@ -1383,7 +1383,6 @@ static DNNModel *dnn_load_model_ov(DnnContext *ctx, DNNFunctionType func_type, A
         return NULL;
     ov_model->ctx = ctx;
     model = &ov_model->model;
-    model->model = ov_model;
 
 #if HAVE_OPENVINO2
     status = ov_core_create(&core);
@@ -1470,7 +1469,7 @@ err:
 
 static int dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
 {
-    OVModel *ov_model = model->model;
+    OVModel *ov_model = (OVModel *)model;
     DnnContext *ctx = ov_model->ctx;
     OVRequestItem *request;
     TaskItem *task;
@@ -1558,13 +1557,13 @@ static int dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_p
 
 static DNNAsyncStatusType dnn_get_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
 {
-    OVModel *ov_model = model->model;
+    OVModel *ov_model = (OVModel *)model;
     return ff_dnn_get_result_common(ov_model->task_queue, in, out);
 }
 
 static int dnn_flush_ov(const DNNModel *model)
 {
-    OVModel *ov_model = model->model;
+    OVModel *ov_model = (OVModel *)model;
     DnnContext *ctx = ov_model->ctx;
     OVRequestItem *request;
 #if HAVE_OPENVINO2
diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index c7716e696d..06ea6cbb8c 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -262,9 +262,9 @@ static TF_Tensor *allocate_input_tensor(const DNNData *input)
                              input_dims[1] * input_dims[2] * input_dims[3] * size);
 }
 
-static int get_input_tf(void *model, DNNData *input, const char *input_name)
+static int get_input_tf(DNNModel *model, DNNData *input, const char *input_name)
 {
-    TFModel *tf_model = model;
+    TFModel *tf_model = (TFModel *)model;
     DnnContext *ctx = tf_model->ctx;
     TF_Status *status;
     TF_DataType dt;
@@ -310,11 +310,11 @@ static int get_input_tf(void *model, DNNData *input, const char *input_name)
     return 0;
 }
 
-static int get_output_tf(void *model, const char *input_name, int input_width, int input_height,
+static int get_output_tf(DNNModel *model, const char *input_name, int input_width, int input_height,
                                    const char *output_name, int *output_width, int *output_height)
 {
     int ret;
-    TFModel *tf_model = model;
+    TFModel *tf_model = (TFModel *)model;
     DnnContext *ctx = tf_model->ctx;
     TaskItem task;
     TFRequestItem *request;
@@ -486,7 +486,7 @@ static void dnn_free_model_tf(DNNModel **model)
     if (!model || !*model)
         return;
 
-    tf_model = (*model)->model;
+    tf_model = (TFModel *)(*model);
     while (ff_safe_queue_size(tf_model->request_queue) != 0) {
         TFRequestItem *item = ff_safe_queue_pop_front(tf_model->request_queue);
         destroy_request_item(&item);
@@ -530,7 +530,6 @@ static DNNModel *dnn_load_model_tf(DnnContext *ctx, DNNFunctionType func_type, A
     if (!tf_model)
         return NULL;
     model = &tf_model->model;
-    model->model = tf_model;
     tf_model->ctx = ctx;
 
     if (load_tf_model(tf_model, ctx->model_filename) != 0){
@@ -611,7 +610,7 @@ static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
     task = lltask->task;
     request->lltask = lltask;
 
-    ret = get_input_tf(tf_model, &input, task->input_name);
+    ret = get_input_tf(&tf_model->model, &input, task->input_name);
     if (ret != 0) {
         goto err;
     }
@@ -803,7 +802,7 @@ err:
 
 static int dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
 {
-    TFModel *tf_model = model->model;
+    TFModel *tf_model = (TFModel *)model;
     DnnContext *ctx = tf_model->ctx;
     TaskItem *task;
     TFRequestItem *request;
@@ -851,13 +850,13 @@ static int dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_p
 
 static DNNAsyncStatusType dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out)
 {
-    TFModel *tf_model = model->model;
+    TFModel *tf_model = (TFModel *)model;
     return ff_dnn_get_result_common(tf_model->task_queue, in, out);
 }
 
 static int dnn_flush_tf(const DNNModel *model)
 {
-    TFModel *tf_model = model->model;
+    TFModel *tf_model = (TFModel *)model;
     DnnContext *ctx = tf_model->ctx;
     TFRequestItem *request;
     int ret;
diff --git a/libavfilter/dnn/dnn_backend_torch.cpp b/libavfilter/dnn/dnn_backend_torch.cpp
index 818ec5b713..24e9f2c8e2 100644
--- a/libavfilter/dnn/dnn_backend_torch.cpp
+++ b/libavfilter/dnn/dnn_backend_torch.cpp
@@ -119,7 +119,7 @@ static void dnn_free_model_th(DNNModel **model)
     if (!model || !*model)
         return;
 
-    th_model = (THModel *) (*model)->model;
+    th_model = (THModel *) (*model);
     while (ff_safe_queue_size(th_model->request_queue) != 0) {
         THRequestItem *item = (THRequestItem *)ff_safe_queue_pop_front(th_model->request_queue);
         destroy_request_item(&item);
@@ -144,7 +144,7 @@ static void dnn_free_model_th(DNNModel **model)
     *model = NULL;
 }
 
-static int get_input_th(void *model, DNNData *input, const char *input_name)
+static int get_input_th(DNNModel *model, DNNData *input, const char *input_name)
 {
     input->dt = DNN_FLOAT;
     input->order = DCO_RGB;
@@ -179,7 +179,7 @@ static int fill_model_input_th(THModel *th_model, THRequestItem *request)
     task = lltask->task;
     infer_request = request->infer_request;
 
-    ret = get_input_th(th_model, &input, NULL);
+    ret = get_input_th(&th_model->model, &input, NULL);
     if ( ret != 0) {
         goto err;
     }
@@ -356,7 +356,7 @@ err:
     return ret;
 }
 
-static int get_output_th(void *model, const char *input_name, int input_width, int input_height,
+static int get_output_th(DNNModel *model, const char *input_name, int input_width, int input_height,
                                    const char *output_name, int *output_width, int *output_height)
 {
     int ret = 0;
@@ -421,7 +421,6 @@ static DNNModel *dnn_load_model_th(DnnContext *ctx, DNNFunctionType func_type, A
     if (!th_model)
         return NULL;
     model = &th_model->model;
-    model->model = th_model;
     th_model->ctx = ctx;
 
     c10::Device device = c10::Device(device_name);
@@ -489,7 +488,7 @@ fail:
 
 static int dnn_execute_model_th(const DNNModel *model, DNNExecBaseParams *exec_params)
 {
-    THModel *th_model = (THModel *)model->model;
+    THModel *th_model = (THModel *)model;
     DnnContext *ctx = th_model->ctx;
     TaskItem *task;
     THRequestItem *request;
@@ -538,13 +537,13 @@ static int dnn_execute_model_th(const DNNModel *model, DNNExecBaseParams *exec_p
 
 static DNNAsyncStatusType dnn_get_result_th(const DNNModel *model, AVFrame **in, AVFrame **out)
 {
-    THModel *th_model = (THModel *)model->model;
+    THModel *th_model = (THModel *)model;
     return ff_dnn_get_result_common(th_model->task_queue, in, out);
 }
 
 static int dnn_flush_th(const DNNModel *model)
 {
-    THModel *th_model = (THModel *)model->model;
+    THModel *th_model = (THModel *)model;
     THRequestItem *request;
 
     if (ff_queue_size(th_model->lltask_queue) == 0)
diff --git a/libavfilter/dnn_filter_common.c b/libavfilter/dnn_filter_common.c
index 3dd51badf6..132dd75550 100644
--- a/libavfilter/dnn_filter_common.c
+++ b/libavfilter/dnn_filter_common.c
@@ -151,15 +151,15 @@ int ff_dnn_set_classify_post_proc(DnnContext *ctx, ClassifyPostProc post_proc)
 
 int ff_dnn_get_input(DnnContext *ctx, DNNData *input)
 {
-    return ctx->model->get_input(ctx->model->model, input, ctx->model_inputname);
+    return ctx->model->get_input(ctx->model, input, ctx->model_inputname);
 }
 
 int ff_dnn_get_output(DnnContext *ctx, int input_width, int input_height, int *output_width, int *output_height)
 {
     char * output_name = ctx->model_outputnames && ctx->backend_type != DNN_TH ?
                          ctx->model_outputnames[0] : NULL;
-    return ctx->model->get_output(ctx->model->model, ctx->model_inputname, input_width, input_height,
-                                    (const char *)output_name, output_width, output_height);
+    return ctx->model->get_output(ctx->model, ctx->model_inputname, input_width, input_height,
+                                  (const char *)output_name, output_width, output_height);
 }
 
 int ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame)
diff --git a/libavfilter/dnn_interface.h b/libavfilter/dnn_interface.h
index a58001bab2..4e14a42d00 100644
--- a/libavfilter/dnn_interface.h
+++ b/libavfilter/dnn_interface.h
@@ -91,17 +91,15 @@ typedef int (*DetectPostProc)(AVFrame *frame, DNNData *output, uint32_t nb, AVFi
 typedef int (*ClassifyPostProc)(AVFrame *frame, DNNData *output, uint32_t bbox_index, AVFilterContext *filter_ctx);
 
 typedef struct DNNModel{
-    // Stores model that can be different for different backends.
-    void *model;
     // Stores FilterContext used for the interaction between AVFrame and DNNData
     AVFilterContext *filter_ctx;
     // Stores function type of the model
     DNNFunctionType func_type;
     // Gets model input information
     // Just reuse struct DNNData here, actually the DNNData.data field is not needed.
-    int (*get_input)(void *model, DNNData *input, const char *input_name);
+    int (*get_input)(struct DNNModel *model, DNNData *input, const char *input_name);
     // Gets model output width/height with given input w/h
-    int (*get_output)(void *model, const char *input_name, int input_width, int input_height,
+    int (*get_output)(struct DNNModel *model, const char *input_name, int input_width, int input_height,
                                 const char *output_name, int *output_width, int *output_height);
     // set the pre process to transfer data from AVFrame to DNNData
     // the default implementation within DNN is used if it is not provided by the filter
-- 
2.25.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [FFmpeg-devel] [PATCH WIP v2 9/9] avfilter/dnn: Use dnn_backend_info_list to search for dnn module
       [not found] <20240428064655.106853-1-quinkblack@foxmail.com>
                   ` (6 preceding siblings ...)
  2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 8/9] avfilter/dnn: Remove a level of dereference Zhao Zhili
@ 2024-04-28  6:46 ` Zhao Zhili
  7 siblings, 0 replies; 8+ messages in thread
From: Zhao Zhili @ 2024-04-28  6:46 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Zhao Zhili

From: Zhao Zhili <zhilizhao@tencent.com>

---
 libavfilter/dnn/dnn_backend_openvino.c |  1 +
 libavfilter/dnn/dnn_backend_tf.c       |  1 +
 libavfilter/dnn/dnn_backend_torch.cpp  |  1 +
 libavfilter/dnn/dnn_interface.c        | 26 ++++++++------------------
 libavfilter/dnn_interface.h            |  1 +
 5 files changed, 12 insertions(+), 18 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_openvino.c b/libavfilter/dnn/dnn_backend_openvino.c
index d8a6820dc2..9c699cdc8c 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -1613,6 +1613,7 @@ static int dnn_flush_ov(const DNNModel *model)
 
 const DNNModule ff_dnn_backend_openvino = {
     .clazz          = DNN_DEFINE_CLASS(dnn_openvino),
+    .type           = DNN_OV,
     .load_model     = dnn_load_model_ov,
     .execute_model  = dnn_execute_model_ov,
     .get_result     = dnn_get_result_ov,
diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 06ea6cbb8c..6afefe8115 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -886,6 +886,7 @@ static int dnn_flush_tf(const DNNModel *model)
 
 const DNNModule ff_dnn_backend_tf = {
     .clazz          = DNN_DEFINE_CLASS(dnn_tensorflow),
+    .type           = DNN_TF,
     .load_model     = dnn_load_model_tf,
     .execute_model  = dnn_execute_model_tf,
     .get_result     = dnn_get_result_tf,
diff --git a/libavfilter/dnn/dnn_backend_torch.cpp b/libavfilter/dnn/dnn_backend_torch.cpp
index 24e9f2c8e2..2557264713 100644
--- a/libavfilter/dnn/dnn_backend_torch.cpp
+++ b/libavfilter/dnn/dnn_backend_torch.cpp
@@ -561,6 +561,7 @@ static int dnn_flush_th(const DNNModel *model)
 
 extern const DNNModule ff_dnn_backend_torch = {
     .clazz          = DNN_DEFINE_CLASS(dnn_th),
+    .type           = DNN_TH,
     .load_model     = dnn_load_model_th,
     .execute_model  = dnn_execute_model_th,
     .get_result     = dnn_get_result_th,
diff --git a/libavfilter/dnn/dnn_interface.c b/libavfilter/dnn/dnn_interface.c
index ebd308cd84..cce3c45856 100644
--- a/libavfilter/dnn/dnn_interface.c
+++ b/libavfilter/dnn/dnn_interface.c
@@ -80,25 +80,15 @@ static const DnnBackendInfo dnn_backend_info_list[] = {
 
 const DNNModule *ff_get_dnn_module(DNNBackendType backend_type, void *log_ctx)
 {
-    switch(backend_type){
-    #if (CONFIG_LIBTENSORFLOW == 1)
-    case DNN_TF:
-        return &ff_dnn_backend_tf;
-    #endif
-    #if (CONFIG_LIBOPENVINO == 1)
-    case DNN_OV:
-        return &ff_dnn_backend_openvino;
-    #endif
-    #if (CONFIG_LIBTORCH == 1)
-    case DNN_TH:
-        return &ff_dnn_backend_torch;
-    #endif
-    default:
-        av_log(log_ctx, AV_LOG_ERROR,
-                "Module backend_type %d is not supported or enabled.\n",
-                backend_type);
-        return NULL;
+    for (int i = 1; i < FF_ARRAY_ELEMS(dnn_backend_info_list); i++) {
+        if (dnn_backend_info_list[i].module->type == backend_type)
+            return dnn_backend_info_list[i].module;
     }
+
+    av_log(log_ctx, AV_LOG_ERROR,
+            "Module backend_type %d is not supported or enabled.\n",
+            backend_type);
+    return NULL;
 }
 
 void *ff_dnn_child_next(DnnContext *obj, void *prev) {
diff --git a/libavfilter/dnn_interface.h b/libavfilter/dnn_interface.h
index 4e14a42d00..4b25ac2b84 100644
--- a/libavfilter/dnn_interface.h
+++ b/libavfilter/dnn_interface.h
@@ -170,6 +170,7 @@ typedef struct DnnContext {
 // Stores pointers to functions for loading, executing, freeing DNN models for one of the backends.
 struct DNNModule {
     const AVClass clazz;
+    DNNBackendType type;
     // Loads model and parameters from given file. Returns NULL if it is not possible.
     DNNModel *(*load_model)(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx);
     // Executes model with specified input and output. Returns the error code otherwise.
-- 
2.25.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2024-04-28  6:48 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20240428064655.106853-1-quinkblack@foxmail.com>
2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 2/9] avfilter/dnn_backend_openvino: Fix free context at random place Zhao Zhili
2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 3/9] avfilter/dnn_backend_openvino: simplify memory allocation Zhao Zhili
2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 4/9] avfilter/dnn_backend_tf: Remove one level of indentation Zhao Zhili
2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 5/9] avfilter/dnn_backend_tf: Fix free context at random place Zhao Zhili
2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 6/9] avfilter/dnn_backend_tf: Simplify memory allocation Zhao Zhili
2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 7/9] avfilter/dnn_backend_torch: " Zhao Zhili
2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 8/9] avfilter/dnn: Remove a level of dereference Zhao Zhili
2024-04-28  6:46 ` [FFmpeg-devel] [PATCH WIP v2 9/9] avfilter/dnn: Use dnn_backend_info_list to search for dnn module Zhao Zhili

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git