Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
* [FFmpeg-devel] [PATCH V2 1/8] libavfilter: Prepare to handle specific error codes in DNN Filters
@ 2022-03-02 18:05 Shubhanshu Saxena
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 2/8] lavfi/dnn: Error Specificity in Native Backend Layers Shubhanshu Saxena
                   ` (6 more replies)
  0 siblings, 7 replies; 9+ messages in thread
From: Shubhanshu Saxena @ 2022-03-02 18:05 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Shubhanshu Saxena

This commit prepares the filter side to handle specific error codes
from the DNN backends instead of current DNN_ERROR.

Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
---
 libavfilter/dnn_filter_common.c | 10 +++++-----
 libavfilter/dnn_filter_common.h | 10 +++++-----
 libavfilter/vf_derain.c         |  4 ++--
 libavfilter/vf_dnn_processing.c |  8 ++++----
 libavfilter/vf_sr.c             |  8 ++++----
 5 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/libavfilter/dnn_filter_common.c b/libavfilter/dnn_filter_common.c
index 3c7a962b3a..5083e3de19 100644
--- a/libavfilter/dnn_filter_common.c
+++ b/libavfilter/dnn_filter_common.c
@@ -106,18 +106,18 @@ int ff_dnn_set_classify_post_proc(DnnContext *ctx, ClassifyPostProc post_proc)
     return 0;
 }
 
-DNNReturnType ff_dnn_get_input(DnnContext *ctx, DNNData *input)
+int ff_dnn_get_input(DnnContext *ctx, DNNData *input)
 {
     return ctx->model->get_input(ctx->model->model, input, ctx->model_inputname);
 }
 
-DNNReturnType ff_dnn_get_output(DnnContext *ctx, int input_width, int input_height, int *output_width, int *output_height)
+int ff_dnn_get_output(DnnContext *ctx, int input_width, int input_height, int *output_width, int *output_height)
 {
     return ctx->model->get_output(ctx->model->model, ctx->model_inputname, input_width, input_height,
                                     (const char *)ctx->model_outputnames[0], output_width, output_height);
 }
 
-DNNReturnType ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame)
+int ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame)
 {
     DNNExecBaseParams exec_params = {
         .input_name     = ctx->model_inputname,
@@ -129,7 +129,7 @@ DNNReturnType ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *
     return (ctx->dnn_module->execute_model)(ctx->model, &exec_params);
 }
 
-DNNReturnType ff_dnn_execute_model_classification(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame, const char *target)
+int ff_dnn_execute_model_classification(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame, const char *target)
 {
     DNNExecClassificationParams class_params = {
         {
@@ -149,7 +149,7 @@ DNNAsyncStatusType ff_dnn_get_result(DnnContext *ctx, AVFrame **in_frame, AVFram
     return (ctx->dnn_module->get_result)(ctx->model, in_frame, out_frame);
 }
 
-DNNReturnType ff_dnn_flush(DnnContext *ctx)
+int ff_dnn_flush(DnnContext *ctx)
 {
     return (ctx->dnn_module->flush)(ctx->model);
 }
diff --git a/libavfilter/dnn_filter_common.h b/libavfilter/dnn_filter_common.h
index 635ae631c1..bcdf37c815 100644
--- a/libavfilter/dnn_filter_common.h
+++ b/libavfilter/dnn_filter_common.h
@@ -53,12 +53,12 @@ int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *fil
 int ff_dnn_set_frame_proc(DnnContext *ctx, FramePrePostProc pre_proc, FramePrePostProc post_proc);
 int ff_dnn_set_detect_post_proc(DnnContext *ctx, DetectPostProc post_proc);
 int ff_dnn_set_classify_post_proc(DnnContext *ctx, ClassifyPostProc post_proc);
-DNNReturnType ff_dnn_get_input(DnnContext *ctx, DNNData *input);
-DNNReturnType ff_dnn_get_output(DnnContext *ctx, int input_width, int input_height, int *output_width, int *output_height);
-DNNReturnType ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame);
-DNNReturnType ff_dnn_execute_model_classification(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame, const char *target);
+int ff_dnn_get_input(DnnContext *ctx, DNNData *input);
+int ff_dnn_get_output(DnnContext *ctx, int input_width, int input_height, int *output_width, int *output_height);
+int ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame);
+int ff_dnn_execute_model_classification(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame, const char *target);
 DNNAsyncStatusType ff_dnn_get_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame);
-DNNReturnType ff_dnn_flush(DnnContext *ctx);
+int ff_dnn_flush(DnnContext *ctx);
 void ff_dnn_uninit(DnnContext *ctx);
 
 #endif
diff --git a/libavfilter/vf_derain.c b/libavfilter/vf_derain.c
index 0eb7da18da..6758cc05d2 100644
--- a/libavfilter/vf_derain.c
+++ b/libavfilter/vf_derain.c
@@ -62,7 +62,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
     AVFilterContext *ctx  = inlink->dst;
     AVFilterLink *outlink = ctx->outputs[0];
     DRContext *dr_context = ctx->priv;
-    DNNReturnType dnn_result;
+    int dnn_result;
     AVFrame *out;
 
     out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
@@ -77,7 +77,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
     if (dnn_result != DNN_SUCCESS){
         av_log(ctx, AV_LOG_ERROR, "failed to execute model\n");
         av_frame_free(&in);
-        return AVERROR(EIO);
+        return dnn_result;
     }
     do {
         async_state = ff_dnn_get_result(&dr_context->dnnctx, &in, &out);
diff --git a/libavfilter/vf_dnn_processing.c b/libavfilter/vf_dnn_processing.c
index c5ca36ef09..4a1ff5898f 100644
--- a/libavfilter/vf_dnn_processing.c
+++ b/libavfilter/vf_dnn_processing.c
@@ -134,14 +134,14 @@ static int config_input(AVFilterLink *inlink)
 {
     AVFilterContext *context     = inlink->dst;
     DnnProcessingContext *ctx = context->priv;
-    DNNReturnType result;
+    int result;
     DNNData model_input;
     int check;
 
     result = ff_dnn_get_input(&ctx->dnnctx, &model_input);
     if (result != DNN_SUCCESS) {
         av_log(ctx, AV_LOG_ERROR, "could not get input from the model\n");
-        return AVERROR(EIO);
+        return result;
     }
 
     check = check_modelinput_inlink(&model_input, inlink);
@@ -194,14 +194,14 @@ static int config_output(AVFilterLink *outlink)
 {
     AVFilterContext *context = outlink->src;
     DnnProcessingContext *ctx = context->priv;
-    DNNReturnType result;
+    int result;
     AVFilterLink *inlink = context->inputs[0];
 
     // have a try run in case that the dnn model resize the frame
     result = ff_dnn_get_output(&ctx->dnnctx, inlink->w, inlink->h, &outlink->w, &outlink->h);
     if (result != DNN_SUCCESS) {
         av_log(ctx, AV_LOG_ERROR, "could not get output from the model\n");
-        return AVERROR(EIO);
+        return result;
     }
 
     prepare_uv_scale(outlink);
diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c
index b07335bc30..02d9452681 100644
--- a/libavfilter/vf_sr.c
+++ b/libavfilter/vf_sr.c
@@ -76,7 +76,7 @@ static int config_output(AVFilterLink *outlink)
 {
     AVFilterContext *context = outlink->src;
     SRContext *ctx = context->priv;
-    DNNReturnType result;
+    int result;
     AVFilterLink *inlink = context->inputs[0];
     int out_width, out_height;
 
@@ -84,7 +84,7 @@ static int config_output(AVFilterLink *outlink)
     result = ff_dnn_get_output(&ctx->dnnctx, inlink->w, inlink->h, &out_width, &out_height);
     if (result != DNN_SUCCESS) {
         av_log(ctx, AV_LOG_ERROR, "could not get output from the model\n");
-        return AVERROR(EIO);
+        return result;
     }
 
     if (inlink->w != out_width || inlink->h != out_height) {
@@ -121,7 +121,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
     SRContext *ctx = context->priv;
     AVFilterLink *outlink = context->outputs[0];
     AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
-    DNNReturnType dnn_result;
+    int dnn_result;
 
     if (!out){
         av_log(context, AV_LOG_ERROR, "could not allocate memory for output frame\n");
@@ -143,7 +143,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
         av_log(ctx, AV_LOG_ERROR, "failed to execute loaded model\n");
         av_frame_free(&in);
         av_frame_free(&out);
-        return AVERROR(EIO);
+        return dnn_result;
     }
 
     do {
-- 
2.32.0

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [FFmpeg-devel] [PATCH V2 2/8] lavfi/dnn: Error Specificity in Native Backend Layers
  2022-03-02 18:05 [FFmpeg-devel] [PATCH V2 1/8] libavfilter: Prepare to handle specific error codes in DNN Filters Shubhanshu Saxena
@ 2022-03-02 18:05 ` Shubhanshu Saxena
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 3/8] lavfi/dnn_io_proc: Return Specific Error Codes Shubhanshu Saxena
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: Shubhanshu Saxena @ 2022-03-02 18:05 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Shubhanshu Saxena

This commit returns specific error codes from the execution
functions in the Native Backend layers instead of DNN_ERROR.

Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
---
 libavfilter/dnn/dnn_backend_native_layer_avgpool.c |  6 +++---
 libavfilter/dnn/dnn_backend_native_layer_avgpool.h |  3 ++-
 libavfilter/dnn/dnn_backend_native_layer_conv2d.c  | 14 ++++++++------
 libavfilter/dnn/dnn_backend_native_layer_conv2d.h  |  3 ++-
 libavfilter/dnn/dnn_backend_native_layer_dense.c   |  6 +++---
 libavfilter/dnn/dnn_backend_native_layer_dense.h   |  3 ++-
 .../dnn/dnn_backend_native_layer_depth2space.c     |  6 +++---
 .../dnn/dnn_backend_native_layer_depth2space.h     |  3 ++-
 .../dnn/dnn_backend_native_layer_mathbinary.c      |  6 +++---
 .../dnn/dnn_backend_native_layer_mathunary.c       |  6 +++---
 .../dnn/dnn_backend_native_layer_mathunary.h       |  3 ++-
 libavfilter/dnn/dnn_backend_native_layer_maximum.c |  4 ++--
 libavfilter/dnn/dnn_backend_native_layer_pad.c     |  4 ++--
 libavfilter/dnn_interface.h                        |  2 ++
 14 files changed, 39 insertions(+), 30 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_native_layer_avgpool.c b/libavfilter/dnn/dnn_backend_native_layer_avgpool.c
index 89f1787523..510a28a8c9 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_avgpool.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_avgpool.c
@@ -109,12 +109,12 @@ int ff_dnn_execute_layer_avg_pool(DnnOperand *operands, const int32_t *input_ope
     output_operand->length = ff_calculate_operand_data_length(output_operand);
     if (output_operand->length <= 0) {
         av_log(ctx, AV_LOG_ERROR, "The output data length overflow\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
     output_operand->data = av_realloc(output_operand->data, output_operand->length);
     if (!output_operand->data) {
         av_log(ctx, AV_LOG_ERROR, "Failed to reallocate memory for output\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
     output = output_operand->data;
 
@@ -143,5 +143,5 @@ int ff_dnn_execute_layer_avg_pool(DnnOperand *operands, const int32_t *input_ope
         }
     }
 
-    return 0;
+    return DNN_SUCCESS;
 }
diff --git a/libavfilter/dnn/dnn_backend_native_layer_avgpool.h b/libavfilter/dnn/dnn_backend_native_layer_avgpool.h
index d8972487de..118a160090 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_avgpool.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_avgpool.h
@@ -60,7 +60,8 @@ int ff_dnn_load_layer_avg_pool(Layer *layer, AVIOContext *model_file_context, in
  * @param parameters average pooling parameters
  * @param ctx pointer to Native model context for logging
  * @retval 0 if the execution succeeds
- * @retval DNN_ERROR if the execution fails
+ * @retval AVERROR(ENOMEM) if memory allocation fails
+ * @retval AVERROR(EINVAL) for invalid arguments
  */
 int ff_dnn_execute_layer_avg_pool(DnnOperand *operands, const int32_t *input_operand_indexes,
                                   int32_t output_operand_index, const void *parameters, NativeContext *ctx);
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
index 7a60aa6a4b..dfa0d1ed36 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
@@ -211,12 +211,12 @@ int ff_dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_opera
     output_operand->length = ff_calculate_operand_data_length(output_operand);
     if (output_operand->length <= 0) {
         av_log(ctx, AV_LOG_ERROR, "The output data length overflow\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
     tmp = av_realloc(output_operand->data, output_operand->length);
     if (!tmp) {
         av_log(ctx, AV_LOG_ERROR, "Failed to reallocate memory for output\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
     output_operand->data = tmp;
     thread_common_param.output_data = output_operand->data;
@@ -229,17 +229,19 @@ int ff_dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_opera
 #if HAVE_PTHREAD_CANCEL
     thread_param = av_malloc_array(thread_num, sizeof(*thread_param));
     if (!thread_param)
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     thread_stride = (height - pad_size * 2) / thread_num;
     //create threads
     for (int i = 0; i < thread_num; i++){
+        int thread_ret = 0;
         thread_param[i].thread_common_param = &thread_common_param;
         thread_param[i].thread_start = thread_stride * i + pad_size;
         thread_param[i].thread_end = (i == thread_num - 1) ? (height - pad_size) : (thread_param[i].thread_start + thread_stride);
-        if (pthread_create(&thread_param[i].thread, NULL,
-                           dnn_execute_layer_conv2d_thread, &thread_param[i])) {
+        thread_ret = pthread_create(&thread_param[i].thread, NULL,
+                                    dnn_execute_layer_conv2d_thread, &thread_param[i]);
+        if (thread_ret) {
             thread_num = i;
-            ret = DNN_ERROR;
+            ret = AVERROR(thread_ret);
             break;
         }
     }
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
index 446f48f608..f754a9ba18 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
@@ -60,7 +60,8 @@ int ff_dnn_load_layer_conv2d(Layer *layer, AVIOContext *model_file_context, int
  * @param parameters convolution parameters
  * @param ctx pointer to Native model context for logging
  * @retval 0 if the execution succeeds
- * @retval DNN_ERROR if the execution fails
+ * @retval AVERROR(ENOMEM) if memory allocation fails
+ * @retval AVERROR(EINVAL) for invalid arguments
  */
 int ff_dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_indexes,
                                 int32_t output_operand_index, const void *parameters, NativeContext *ctx);
diff --git a/libavfilter/dnn/dnn_backend_native_layer_dense.c b/libavfilter/dnn/dnn_backend_native_layer_dense.c
index 117590d7bb..a22a484464 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_dense.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_dense.c
@@ -104,12 +104,12 @@ int ff_dnn_execute_layer_dense(DnnOperand *operands, const int32_t *input_operan
     output_operand->length = ff_calculate_operand_data_length(output_operand);
     if (output_operand->length <= 0) {
         av_log(ctx, AV_LOG_ERROR, "The output data length overflow\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
     output_operand->data = av_realloc(output_operand->data, output_operand->length);
     if (!output_operand->data) {
         av_log(ctx, AV_LOG_ERROR, "Failed to reallocate memory for output\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
     output = output_operand->data;
 
@@ -147,5 +147,5 @@ int ff_dnn_execute_layer_dense(DnnOperand *operands, const int32_t *input_operan
             output += dense_params->output_num;
         }
     }
-    return 0;
+    return DNN_SUCCESS;
 }
diff --git a/libavfilter/dnn/dnn_backend_native_layer_dense.h b/libavfilter/dnn/dnn_backend_native_layer_dense.h
index 0488b03cc3..607fc3e684 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_dense.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_dense.h
@@ -57,7 +57,8 @@ int ff_dnn_load_layer_dense(Layer *layer, AVIOContext *model_file_context, int f
  * @param parameters dense layer parameters
  * @param ctx pointer to Native model context for logging
  * @retval 0 if the execution succeeds
- * @retval DNN_ERROR if the execution fails
+ * @retval AVERROR(ENOMEM) if memory allocation fails
+ * @retval AVERROR(EINVAL) for invalid arguments
  */
 int ff_dnn_execute_layer_dense(DnnOperand *operands, const int32_t *input_operand_indexes,
                                int32_t output_operand_index, const void *parameters, NativeContext *ctx);
diff --git a/libavfilter/dnn/dnn_backend_native_layer_depth2space.c b/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
index 30ced43cd5..82b1a52be2 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
@@ -76,12 +76,12 @@ int ff_dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_
     output_operand->length = ff_calculate_operand_data_length(output_operand);
     if (output_operand->length <= 0) {
         av_log(ctx, AV_LOG_ERROR, "The output data length overflow\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
     output_operand->data = av_realloc(output_operand->data, output_operand->length);
     if (!output_operand->data) {
         av_log(ctx, AV_LOG_ERROR, "Failed to reallocate memory for output\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
     output = output_operand->data;
 
@@ -98,5 +98,5 @@ int ff_dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_
         }
         output += output_linesize;
     }
-    return 0;
+    return DNN_SUCCESS;
 }
diff --git a/libavfilter/dnn/dnn_backend_native_layer_depth2space.h b/libavfilter/dnn/dnn_backend_native_layer_depth2space.h
index 2792a33ebe..aaf2df4c13 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_depth2space.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_depth2space.h
@@ -63,7 +63,8 @@ int ff_dnn_load_layer_depth2space(Layer *layer, AVIOContext *model_file_context,
  * @param parameters depth to space layer parameters
  * @param ctx pointer to Native model context for logging
  * @retval 0 if the execution succeeds
- * @retval DNN_ERROR if the execution fails
+ * @retval AVERROR(ENOMEM) if memory allocation fails
+ * @retval AVERROR(EINVAL) for invalid arguments
  */
 int ff_dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_operand_indexes,
                                      int32_t output_operand_index, const void *parameters, NativeContext *ctx);
diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathbinary.c b/libavfilter/dnn/dnn_backend_native_layer_mathbinary.c
index 81901c56d9..1a3fa3f132 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_mathbinary.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_mathbinary.c
@@ -159,12 +159,12 @@ int ff_dnn_execute_layer_math_binary(DnnOperand *operands, const int32_t *input_
     output->length = ff_calculate_operand_data_length(output);
     if (output->length <= 0) {
         av_log(ctx, AV_LOG_ERROR, "The output data length overflow\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
     output->data = av_realloc(output->data, output->length);
     if (!output->data) {
         av_log(ctx, AV_LOG_ERROR, "Failed to reallocate memory for output\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
 
     switch (params->bin_op) {
@@ -188,6 +188,6 @@ int ff_dnn_execute_layer_math_binary(DnnOperand *operands, const int32_t *input_
         return 0;
     default:
         av_log(ctx, AV_LOG_ERROR, "Unmatch math binary operator\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
 }
diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathunary.c b/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
index b8694910d9..e3c5106e5e 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
@@ -68,12 +68,12 @@ int ff_dnn_execute_layer_math_unary(DnnOperand *operands, const int32_t *input_o
     output->length = ff_calculate_operand_data_length(output);
     if (output->length <= 0) {
         av_log(ctx, AV_LOG_ERROR, "The output data length overflow\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
     output->data = av_realloc(output->data, output->length);
     if (!output->data) {
         av_log(ctx, AV_LOG_ERROR, "Failed to reallocate memory for output\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
 
     dims_count = ff_calculate_operand_dims_count(output);
@@ -151,6 +151,6 @@ int ff_dnn_execute_layer_math_unary(DnnOperand *operands, const int32_t *input_o
         return 0;
     default:
         av_log(ctx, AV_LOG_ERROR, "Unmatch math unary operator\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
 }
diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathunary.h b/libavfilter/dnn/dnn_backend_native_layer_mathunary.h
index ed79947896..806e73b29f 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_mathunary.h
+++ b/libavfilter/dnn/dnn_backend_native_layer_mathunary.h
@@ -83,7 +83,8 @@ int ff_dnn_load_layer_math_unary(Layer *layer, AVIOContext *model_file_context,
  * @param parameters unary math layer parameters
  * @param ctx pointer to Native model context for logging
  * @retval 0 if the execution succeeds
- * @retval DNN_ERROR if the execution fails
+ * @retval AVERROR(ENOMEM) if memory allocation fails
+ * @retval AVERROR(EINVAL) for invalid arguments
  */
 int ff_dnn_execute_layer_math_unary(DnnOperand *operands, const int32_t *input_operand_indexes,
                                     int32_t output_operand_index, const void *parameters, NativeContext *ctx);
diff --git a/libavfilter/dnn/dnn_backend_native_layer_maximum.c b/libavfilter/dnn/dnn_backend_native_layer_maximum.c
index a38759eeee..667efaa3b8 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_maximum.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_maximum.c
@@ -65,12 +65,12 @@ int ff_dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_oper
     output->length = ff_calculate_operand_data_length(output);
     if (output->length <= 0) {
         av_log(ctx, AV_LOG_ERROR, "The output data length overflow\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
     output->data = av_realloc(output->data, output->length);
     if (!output->data) {
         av_log(ctx, AV_LOG_ERROR, "Failed to reallocate memory for output\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
 
     dims_count = ff_calculate_operand_dims_count(output);
diff --git a/libavfilter/dnn/dnn_backend_native_layer_pad.c b/libavfilter/dnn/dnn_backend_native_layer_pad.c
index a60451a8de..e274fe12c6 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_pad.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_pad.c
@@ -113,12 +113,12 @@ int ff_dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_
     output_operand->length = ff_calculate_operand_data_length(output_operand);
     if (output_operand->length <= 0) {
         av_log(ctx, AV_LOG_ERROR, "The output data length overflow\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
     output_operand->data = av_realloc(output_operand->data, output_operand->length);
     if (!output_operand->data) {
         av_log(ctx, AV_LOG_ERROR, "Failed to reallocate memory for output\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
     output = output_operand->data;
 
diff --git a/libavfilter/dnn_interface.h b/libavfilter/dnn_interface.h
index 37e89d9789..24e0b66661 100644
--- a/libavfilter/dnn_interface.h
+++ b/libavfilter/dnn_interface.h
@@ -30,6 +30,8 @@
 #include "libavutil/frame.h"
 #include "avfilter.h"
 
+#define DNN_GENERIC_ERROR FFERRTAG('D','N','N','!')
+
 typedef enum {DNN_SUCCESS, DNN_ERROR} DNNReturnType;
 
 typedef enum {DNN_NATIVE, DNN_TF, DNN_OV} DNNBackendType;
-- 
2.32.0

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [FFmpeg-devel] [PATCH V2 3/8] lavfi/dnn_io_proc: Return Specific Error Codes
  2022-03-02 18:05 [FFmpeg-devel] [PATCH V2 1/8] libavfilter: Prepare to handle specific error codes in DNN Filters Shubhanshu Saxena
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 2/8] lavfi/dnn: Error Specificity in Native Backend Layers Shubhanshu Saxena
@ 2022-03-02 18:05 ` Shubhanshu Saxena
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 4/8] lavfi/dnn_backend_openvino: " Shubhanshu Saxena
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: Shubhanshu Saxena @ 2022-03-02 18:05 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Shubhanshu Saxena

This commit returns specific error codes from the functions in the
dnn_io_proc instead of DNN_ERROR.

Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
---
 libavfilter/dnn/dnn_io_proc.c | 48 +++++++++++++++++++----------------
 libavfilter/dnn/dnn_io_proc.h |  8 +++---
 2 files changed, 30 insertions(+), 26 deletions(-)

diff --git a/libavfilter/dnn/dnn_io_proc.c b/libavfilter/dnn/dnn_io_proc.c
index f55424d97c..36cc051e5e 100644
--- a/libavfilter/dnn/dnn_io_proc.c
+++ b/libavfilter/dnn/dnn_io_proc.c
@@ -24,16 +24,16 @@
 #include "libavutil/avassert.h"
 #include "libavutil/detection_bbox.h"
 
-DNNReturnType ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
+int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
 {
     struct SwsContext *sws_ctx;
     int bytewidth = av_image_get_linesize(frame->format, frame->width, 0);
     if (bytewidth < 0) {
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
     if (output->dt != DNN_FLOAT) {
         avpriv_report_missing_feature(log_ctx, "data type rather than DNN_FLOAT");
-        return DNN_ERROR;
+        return AVERROR(ENOSYS);
     }
 
     switch (frame->format) {
@@ -51,7 +51,7 @@ DNNReturnType ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *l
                 "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
                 av_get_pix_fmt_name(AV_PIX_FMT_GRAYF32), frame->width * 3, frame->height,
                 av_get_pix_fmt_name(AV_PIX_FMT_GRAY8),   frame->width * 3, frame->height);
-            return DNN_ERROR;
+            return AVERROR(EINVAL);
         }
         sws_scale(sws_ctx, (const uint8_t *[4]){(const uint8_t *)output->data, 0, 0, 0},
                            (const int[4]){frame->width * 3 * sizeof(float), 0, 0, 0}, 0, frame->height,
@@ -82,7 +82,7 @@ DNNReturnType ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *l
                 "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
                 av_get_pix_fmt_name(AV_PIX_FMT_GRAYF32), frame->width, frame->height,
                 av_get_pix_fmt_name(AV_PIX_FMT_GRAY8),   frame->width, frame->height);
-            return DNN_ERROR;
+            return AVERROR(EINVAL);
         }
         sws_scale(sws_ctx, (const uint8_t *[4]){(const uint8_t *)output->data, 0, 0, 0},
                            (const int[4]){frame->width * sizeof(float), 0, 0, 0}, 0, frame->height,
@@ -91,22 +91,22 @@ DNNReturnType ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *l
         return DNN_SUCCESS;
     default:
         avpriv_report_missing_feature(log_ctx, "%s", av_get_pix_fmt_name(frame->format));
-        return DNN_ERROR;
+        return AVERROR(ENOSYS);
     }
 
     return DNN_SUCCESS;
 }
 
-DNNReturnType ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
+int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
 {
     struct SwsContext *sws_ctx;
     int bytewidth = av_image_get_linesize(frame->format, frame->width, 0);
     if (bytewidth < 0) {
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
     if (input->dt != DNN_FLOAT) {
         avpriv_report_missing_feature(log_ctx, "data type rather than DNN_FLOAT");
-        return DNN_ERROR;
+        return AVERROR(ENOSYS);
     }
 
     switch (frame->format) {
@@ -124,7 +124,7 @@ DNNReturnType ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *lo
                 "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
                 av_get_pix_fmt_name(AV_PIX_FMT_GRAY8),  frame->width * 3, frame->height,
                 av_get_pix_fmt_name(AV_PIX_FMT_GRAYF32),frame->width * 3, frame->height);
-            return DNN_ERROR;
+            return AVERROR(EINVAL);
         }
         sws_scale(sws_ctx, (const uint8_t **)frame->data,
                            frame->linesize, 0, frame->height,
@@ -156,7 +156,7 @@ DNNReturnType ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *lo
                 "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
                 av_get_pix_fmt_name(AV_PIX_FMT_GRAY8),  frame->width, frame->height,
                 av_get_pix_fmt_name(AV_PIX_FMT_GRAYF32),frame->width, frame->height);
-            return DNN_ERROR;
+            return AVERROR(EINVAL);
         }
         sws_scale(sws_ctx, (const uint8_t **)frame->data,
                            frame->linesize, 0, frame->height,
@@ -166,7 +166,7 @@ DNNReturnType ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *lo
         break;
     default:
         avpriv_report_missing_feature(log_ctx, "%s", av_get_pix_fmt_name(frame->format));
-        return DNN_ERROR;
+        return AVERROR(ENOSYS);
     }
 
     return DNN_SUCCESS;
@@ -190,13 +190,14 @@ static enum AVPixelFormat get_pixel_format(DNNData *data)
     return AV_PIX_FMT_BGR24;
 }
 
-DNNReturnType ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
+int ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx)
 {
     const AVPixFmtDescriptor *desc;
     int offsetx[4], offsety[4];
     uint8_t *bbox_data[4];
     struct SwsContext *sws_ctx;
     int linesizes[4];
+    int ret = DNN_SUCCESS;
     enum AVPixelFormat fmt;
     int left, top, width, height;
     const AVDetectionBBoxHeader *header;
@@ -221,13 +222,14 @@ DNNReturnType ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t
                "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
                av_get_pix_fmt_name(frame->format), width, height,
                av_get_pix_fmt_name(fmt), input->width, input->height);
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
 
-    if (av_image_fill_linesizes(linesizes, fmt, input->width) < 0) {
+    ret = av_image_fill_linesizes(linesizes, fmt, input->width);
+    if (ret < 0) {
         av_log(log_ctx, AV_LOG_ERROR, "unable to get linesizes with av_image_fill_linesizes");
         sws_freeContext(sws_ctx);
-        return DNN_ERROR;
+        return ret;
     }
 
     desc = av_pix_fmt_desc_get(frame->format);
@@ -246,13 +248,14 @@ DNNReturnType ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t
 
     sws_freeContext(sws_ctx);
 
-    return DNN_SUCCESS;
+    return ret;
 }
 
-DNNReturnType ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
+int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
 {
     struct SwsContext *sws_ctx;
     int linesizes[4];
+    int ret = DNN_SUCCESS;
     enum AVPixelFormat fmt = get_pixel_format(input);
     sws_ctx = sws_getContext(frame->width, frame->height, frame->format,
                              input->width, input->height, fmt,
@@ -262,18 +265,19 @@ DNNReturnType ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_c
             "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
             av_get_pix_fmt_name(frame->format), frame->width, frame->height,
             av_get_pix_fmt_name(fmt), input->width, input->height);
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
 
-    if (av_image_fill_linesizes(linesizes, fmt, input->width) < 0) {
+    ret = av_image_fill_linesizes(linesizes, fmt, input->width);
+    if (ret < 0) {
         av_log(log_ctx, AV_LOG_ERROR, "unable to get linesizes with av_image_fill_linesizes");
         sws_freeContext(sws_ctx);
-        return DNN_ERROR;
+        return ret;
     }
 
     sws_scale(sws_ctx, (const uint8_t *const *)frame->data, frame->linesize, 0, frame->height,
                        (uint8_t *const [4]){input->data, 0, 0, 0}, linesizes);
 
     sws_freeContext(sws_ctx);
-    return DNN_SUCCESS;
+    return ret;
 }
diff --git a/libavfilter/dnn/dnn_io_proc.h b/libavfilter/dnn/dnn_io_proc.h
index daef01aceb..a3dd94675b 100644
--- a/libavfilter/dnn/dnn_io_proc.h
+++ b/libavfilter/dnn/dnn_io_proc.h
@@ -30,9 +30,9 @@
 #include "../dnn_interface.h"
 #include "libavutil/frame.h"
 
-DNNReturnType ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx);
-DNNReturnType ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx);
-DNNReturnType ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx);
-DNNReturnType ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx);
+int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx);
+int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx);
+int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx);
+int ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index, void *log_ctx);
 
 #endif
-- 
2.32.0

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [FFmpeg-devel] [PATCH V2 4/8] lavfi/dnn_backend_openvino: Return Specific Error Codes
  2022-03-02 18:05 [FFmpeg-devel] [PATCH V2 1/8] libavfilter: Prepare to handle specific error codes in DNN Filters Shubhanshu Saxena
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 2/8] lavfi/dnn: Error Specificity in Native Backend Layers Shubhanshu Saxena
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 3/8] lavfi/dnn_io_proc: Return Specific Error Codes Shubhanshu Saxena
@ 2022-03-02 18:05 ` Shubhanshu Saxena
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 5/8] lavfi/dnn_backend_tf: " Shubhanshu Saxena
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: Shubhanshu Saxena @ 2022-03-02 18:05 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Shubhanshu Saxena

Switch to returning specific error codes or DNN_GENERIC_ERROR
when an error is encountered. For OpenVINO API errors, currently
DNN_GENERIC_ERROR is returned.

Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
---
 libavfilter/dnn/dnn_backend_openvino.c | 138 +++++++++++++++----------
 libavfilter/dnn/dnn_backend_openvino.h |   4 +-
 libavfilter/dnn_interface.h            |  10 +-
 3 files changed, 89 insertions(+), 63 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_openvino.c b/libavfilter/dnn/dnn_backend_openvino.c
index f5b1454d21..2f140e996b 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -112,7 +112,7 @@ static int get_datatype_size(DNNDataType dt)
     }
 }
 
-static DNNReturnType fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
+static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
 {
     dimensions_t dims;
     precision_e precision;
@@ -131,7 +131,7 @@ static DNNReturnType fill_model_input_ov(OVModel *ov_model, OVRequestItem *reque
     status = ie_infer_request_get_blob(request->infer_request, task->input_name, &input_blob);
     if (status != OK) {
         av_log(ctx, AV_LOG_ERROR, "Failed to get input blob with name %s\n", task->input_name);
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
 
     status |= ie_blob_get_dims(input_blob, &dims);
@@ -139,14 +139,14 @@ static DNNReturnType fill_model_input_ov(OVModel *ov_model, OVRequestItem *reque
     if (status != OK) {
         ie_blob_free(&input_blob);
         av_log(ctx, AV_LOG_ERROR, "Failed to get input blob dims/precision\n");
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
 
     status = ie_blob_get_buffer(input_blob, &blob_buffer);
     if (status != OK) {
         ie_blob_free(&input_blob);
         av_log(ctx, AV_LOG_ERROR, "Failed to get input blob buffer\n");
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
 
     input.height = dims.dims[2];
@@ -301,8 +301,9 @@ static void infer_completion_callback(void *args)
     }
 }
 
-static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
+static int init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
 {
+    int ret = DNN_SUCCESS;
     OVContext *ctx = &ov_model->ctx;
     IEStatusCode status;
     ie_available_devices_t a_dev;
@@ -317,14 +318,18 @@ static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, co
     if (ctx->options.batch_size > 1) {
         input_shapes_t input_shapes;
         status = ie_network_get_input_shapes(ov_model->network, &input_shapes);
-        if (status != OK)
+        if (status != OK) {
+            ret = DNN_GENERIC_ERROR;
             goto err;
+        }
         for (int i = 0; i < input_shapes.shape_num; i++)
             input_shapes.shapes[i].shape.dims[0] = ctx->options.batch_size;
         status = ie_network_reshape(ov_model->network, input_shapes);
         ie_network_input_shapes_free(&input_shapes);
-        if (status != OK)
+        if (status != OK) {
+            ret = DNN_GENERIC_ERROR;
             goto err;
+        }
     }
 
     // The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
@@ -332,11 +337,13 @@ static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, co
     status = ie_network_set_input_layout(ov_model->network, input_name, NHWC);
     if (status != OK) {
         av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for input %s\n", input_name);
+        ret = DNN_GENERIC_ERROR;
         goto err;
     }
     status = ie_network_set_output_layout(ov_model->network, output_name, NHWC);
     if (status != OK) {
         av_log(ctx, AV_LOG_ERROR, "Failed to set layout as NHWC for output %s\n", output_name);
+        ret = DNN_GENERIC_ERROR;
         goto err;
     }
 
@@ -350,6 +357,7 @@ static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, co
         status = ie_network_set_input_precision(ov_model->network, input_name, U8);
         if (status != OK) {
             av_log(ctx, AV_LOG_ERROR, "Failed to set input precision as U8 for %s\n", input_name);
+            ret = DNN_GENERIC_ERROR;
             goto err;
         }
     }
@@ -360,6 +368,7 @@ static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, co
         status = ie_core_get_available_devices(ov_model->core, &a_dev);
         if (status != OK) {
             av_log(ctx, AV_LOG_ERROR, "Failed to get available devices\n");
+            ret = DNN_GENERIC_ERROR;
             goto err;
         }
         for (int i = 0; i < a_dev.num_devices; i++) {
@@ -367,6 +376,7 @@ static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, co
         }
         av_log(ctx, AV_LOG_ERROR,"device %s may not be supported, all available devices are: \"%s\"\n",
                ctx->options.device_type, all_dev_names);
+        ret = AVERROR(ENODEV);
         goto err;
     }
 
@@ -378,12 +388,14 @@ static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, co
 
     ov_model->request_queue = ff_safe_queue_create();
     if (!ov_model->request_queue) {
+        ret = AVERROR(ENOMEM);
         goto err;
     }
 
     for (int i = 0; i < ctx->options.nireq; i++) {
         OVRequestItem *item = av_mallocz(sizeof(*item));
         if (!item) {
+            ret = AVERROR(ENOMEM);
             goto err;
         }
 
@@ -391,16 +403,19 @@ static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, co
         item->callback.args = item;
         if (ff_safe_queue_push_back(ov_model->request_queue, item) < 0) {
             av_freep(&item);
+            ret = AVERROR(ENOMEM);
             goto err;
         }
 
         status = ie_exec_network_create_infer_request(ov_model->exe_network, &item->infer_request);
         if (status != OK) {
+            ret = DNN_GENERIC_ERROR;
             goto err;
         }
 
         item->lltasks = av_malloc_array(ctx->options.batch_size, sizeof(*item->lltasks));
         if (!item->lltasks) {
+            ret = AVERROR(ENOMEM);
             goto err;
         }
         item->lltask_count = 0;
@@ -408,11 +423,13 @@ static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, co
 
     ov_model->task_queue = ff_queue_create();
     if (!ov_model->task_queue) {
+        ret = AVERROR(ENOMEM);
         goto err;
     }
 
     ov_model->lltask_queue = ff_queue_create();
     if (!ov_model->lltask_queue) {
+        ret = AVERROR(ENOMEM);
         goto err;
     }
 
@@ -420,14 +437,14 @@ static DNNReturnType init_model_ov(OVModel *ov_model, const char *input_name, co
 
 err:
     ff_dnn_free_model_ov(&ov_model->model);
-    return DNN_ERROR;
+    return ret;
 }
 
-static DNNReturnType execute_model_ov(OVRequestItem *request, Queue *inferenceq)
+static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
 {
     IEStatusCode status;
-    DNNReturnType ret;
     LastLevelTaskItem *lltask;
+    int ret = DNN_SUCCESS;
     TaskItem *task;
     OVContext *ctx;
     OVModel *ov_model;
@@ -451,11 +468,13 @@ static DNNReturnType execute_model_ov(OVRequestItem *request, Queue *inferenceq)
         status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
         if (status != OK) {
             av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
+            ret = DNN_GENERIC_ERROR;
             goto err;
         }
         status = ie_infer_request_infer_async(request->infer_request);
         if (status != OK) {
             av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
+            ret = DNN_GENERIC_ERROR;
             goto err;
         }
         return DNN_SUCCESS;
@@ -467,20 +486,21 @@ static DNNReturnType execute_model_ov(OVRequestItem *request, Queue *inferenceq)
         status = ie_infer_request_infer(request->infer_request);
         if (status != OK) {
             av_log(ctx, AV_LOG_ERROR, "Failed to start synchronous model inference\n");
+            ret = DNN_GENERIC_ERROR;
             goto err;
         }
         infer_completion_callback(request);
-        return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : DNN_ERROR;
+        return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : DNN_GENERIC_ERROR;
     }
 err:
     if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
         ie_infer_request_free(&request->infer_request);
         av_freep(&request);
     }
-    return DNN_ERROR;
+    return ret;
 }
 
-static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
+static int get_input_ov(void *model, DNNData *input, const char *input_name)
 {
     OVModel *ov_model = model;
     OVContext *ctx = &ov_model->ctx;
@@ -495,14 +515,14 @@ static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input
     status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
     if (status != OK) {
         av_log(ctx, AV_LOG_ERROR, "Failed to get input count\n");
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
 
     for (size_t i = 0; i < model_input_count; i++) {
         status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
         if (status != OK) {
             av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's name\n", (int)i);
-            return DNN_ERROR;
+            return DNN_GENERIC_ERROR;
         }
         if (strcmp(model_input_name, input_name) == 0) {
             ie_network_name_free(&model_input_name);
@@ -510,7 +530,7 @@ static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input
             status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
             if (status != OK) {
                 av_log(ctx, AV_LOG_ERROR, "Failed to get No.%d input's dims or precision\n", (int)i);
-                return DNN_ERROR;
+                return DNN_GENERIC_ERROR;
             }
 
             input->channels = dims.dims[1];
@@ -527,7 +547,7 @@ static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input
     }
 
     av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model, all input(s) are: \"%s\"\n", input_name, all_input_names);
-    return DNN_ERROR;
+    return AVERROR(EINVAL);
 }
 
 static int contain_valid_detection_bbox(AVFrame *frame)
@@ -567,7 +587,7 @@ static int contain_valid_detection_bbox(AVFrame *frame)
     return 1;
 }
 
-static DNNReturnType extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
+static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Queue *lltask_queue, DNNExecBaseParams *exec_params)
 {
     switch (func_type) {
     case DFT_PROCESS_FRAME:
@@ -575,14 +595,14 @@ static DNNReturnType extract_lltask_from_task(DNNFunctionType func_type, TaskIte
     {
         LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
         if (!lltask) {
-            return DNN_ERROR;
+            return AVERROR(ENOMEM);
         }
         task->inference_todo = 1;
         task->inference_done = 0;
         lltask->task = task;
         if (ff_queue_push_back(lltask_queue, lltask) < 0) {
             av_freep(&lltask);
-            return DNN_ERROR;
+            return AVERROR(ENOMEM);
         }
         return DNN_SUCCESS;
     }
@@ -615,28 +635,28 @@ static DNNReturnType extract_lltask_from_task(DNNFunctionType func_type, TaskIte
 
             lltask = av_malloc(sizeof(*lltask));
             if (!lltask) {
-                return DNN_ERROR;
+                return AVERROR(ENOMEM);
             }
             task->inference_todo++;
             lltask->task = task;
             lltask->bbox_index = i;
             if (ff_queue_push_back(lltask_queue, lltask) < 0) {
                 av_freep(&lltask);
-                return DNN_ERROR;
+                return AVERROR(ENOMEM);
             }
         }
         return DNN_SUCCESS;
     }
     default:
         av_assert0(!"should not reach here");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
 }
 
-static DNNReturnType get_output_ov(void *model, const char *input_name, int input_width, int input_height,
+static int get_output_ov(void *model, const char *input_name, int input_width, int input_height,
                                    const char *output_name, int *output_width, int *output_height)
 {
-    DNNReturnType ret;
+    int ret;
     OVModel *ov_model = model;
     OVContext *ctx = &ov_model->ctx;
     TaskItem task;
@@ -653,7 +673,7 @@ static DNNReturnType get_output_ov(void *model, const char *input_name, int inpu
 
     if (ov_model->model->func_type != DFT_PROCESS_FRAME) {
         av_log(ctx, AV_LOG_ERROR, "Get output dim only when processing frame.\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
 
     if (ctx->options.input_resizable) {
@@ -664,31 +684,33 @@ static DNNReturnType get_output_ov(void *model, const char *input_name, int inpu
         ie_network_input_shapes_free(&input_shapes);
         if (status != OK) {
             av_log(ctx, AV_LOG_ERROR, "Failed to reshape input size for %s\n", input_name);
-            return DNN_ERROR;
+            return DNN_GENERIC_ERROR;
         }
     }
 
     if (!ov_model->exe_network) {
-        if (init_model_ov(ov_model, input_name, output_name) != DNN_SUCCESS) {
+        ret = init_model_ov(ov_model, input_name, output_name);
+        if (ret != DNN_SUCCESS) {
             av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
-            return DNN_ERROR;
+            return ret;
         }
     }
 
-    if (ff_dnn_fill_gettingoutput_task(&task, &exec_params, ov_model, input_height, input_width, ctx) != DNN_SUCCESS) {
-        return DNN_ERROR;
+    ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, ov_model, input_height, input_width, ctx);
+    if (ret != DNN_SUCCESS) {
+        goto err;
     }
 
-    if (extract_lltask_from_task(ov_model->model->func_type, &task, ov_model->lltask_queue, NULL) != DNN_SUCCESS) {
-        av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
-        ret = DNN_ERROR;
+    ret = extract_lltask_from_task(ov_model->model->func_type, &task, ov_model->lltask_queue, NULL);
+    if (ret != DNN_SUCCESS) {
+        av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
         goto err;
     }
 
     request = ff_safe_queue_pop_front(ov_model->request_queue);
     if (!request) {
         av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
-        ret = DNN_ERROR;
+        ret = AVERROR(EINVAL);
         goto err;
     }
 
@@ -758,45 +780,49 @@ err:
     return NULL;
 }
 
-DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
+int ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params)
 {
     OVModel *ov_model = model->model;
     OVContext *ctx = &ov_model->ctx;
     OVRequestItem *request;
     TaskItem *task;
-    DNNReturnType ret;
+    int ret;
 
-    if (ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params) != 0) {
-        return DNN_ERROR;
+    ret = ff_check_exec_params(ctx, DNN_OV, model->func_type, exec_params);
+    if (ret != 0) {
+        return ret;
     }
 
     if (!ov_model->exe_network) {
-        if (init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]) != DNN_SUCCESS) {
+        ret = init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]);
+        if (ret != DNN_SUCCESS) {
             av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
-            return DNN_ERROR;
+            return ret;
         }
     }
 
     task = av_malloc(sizeof(*task));
     if (!task) {
         av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
 
-    if (ff_dnn_fill_task(task, exec_params, ov_model, ctx->options.async, 1) != DNN_SUCCESS) {
+    ret = ff_dnn_fill_task(task, exec_params, ov_model, ctx->options.async, 1);
+    if (ret != DNN_SUCCESS) {
         av_freep(&task);
-        return DNN_ERROR;
+        return ret;
     }
 
     if (ff_queue_push_back(ov_model->task_queue, task) < 0) {
         av_freep(&task);
         av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
 
-    if (extract_lltask_from_task(model->func_type, task, ov_model->lltask_queue, exec_params) != DNN_SUCCESS) {
+    ret = extract_lltask_from_task(model->func_type, task, ov_model->lltask_queue, exec_params);
+    if (ret != DNN_SUCCESS) {
         av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
-        return DNN_ERROR;
+        return ret;
     }
 
     if (ctx->options.async) {
@@ -804,7 +830,7 @@ DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *
             request = ff_safe_queue_pop_front(ov_model->request_queue);
             if (!request) {
                 av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
-                return DNN_ERROR;
+                return AVERROR(EINVAL);
             }
 
             ret = execute_model_ov(request, ov_model->lltask_queue);
@@ -820,18 +846,18 @@ DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *
             // Classification filter has not been completely
             // tested with the sync mode. So, do not support now.
             avpriv_report_missing_feature(ctx, "classify for sync execution");
-            return DNN_ERROR;
+            return AVERROR(ENOSYS);
         }
 
         if (ctx->options.batch_size > 1) {
             avpriv_report_missing_feature(ctx, "batch mode for sync execution");
-            return DNN_ERROR;
+            return AVERROR(ENOSYS);
         }
 
         request = ff_safe_queue_pop_front(ov_model->request_queue);
         if (!request) {
             av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
-            return DNN_ERROR;
+            return AVERROR(EINVAL);
         }
         return execute_model_ov(request, ov_model->lltask_queue);
     }
@@ -843,13 +869,13 @@ DNNAsyncStatusType ff_dnn_get_result_ov(const DNNModel *model, AVFrame **in, AVF
     return ff_dnn_get_result_common(ov_model->task_queue, in, out);
 }
 
-DNNReturnType ff_dnn_flush_ov(const DNNModel *model)
+int ff_dnn_flush_ov(const DNNModel *model)
 {
     OVModel *ov_model = model->model;
     OVContext *ctx = &ov_model->ctx;
     OVRequestItem *request;
     IEStatusCode status;
-    DNNReturnType ret;
+    int ret;
 
     if (ff_queue_size(ov_model->lltask_queue) == 0) {
         // no pending task need to flush
@@ -859,7 +885,7 @@ DNNReturnType ff_dnn_flush_ov(const DNNModel *model)
     request = ff_safe_queue_pop_front(ov_model->request_queue);
     if (!request) {
         av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
 
     ret = fill_model_input_ov(ov_model, request);
@@ -870,12 +896,12 @@ DNNReturnType ff_dnn_flush_ov(const DNNModel *model)
     status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
     if (status != OK) {
         av_log(ctx, AV_LOG_ERROR, "Failed to set completion callback for inference\n");
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
     status = ie_infer_request_infer_async(request->infer_request);
     if (status != OK) {
         av_log(ctx, AV_LOG_ERROR, "Failed to start async inference\n");
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
 
     return DNN_SUCCESS;
diff --git a/libavfilter/dnn/dnn_backend_openvino.h b/libavfilter/dnn/dnn_backend_openvino.h
index 0bbca0c057..304bc96b99 100644
--- a/libavfilter/dnn/dnn_backend_openvino.h
+++ b/libavfilter/dnn/dnn_backend_openvino.h
@@ -31,9 +31,9 @@
 
 DNNModel *ff_dnn_load_model_ov(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx);
 
-DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params);
+int ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_params);
 DNNAsyncStatusType ff_dnn_get_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out);
-DNNReturnType ff_dnn_flush_ov(const DNNModel *model);
+int ff_dnn_flush_ov(const DNNModel *model);
 
 void ff_dnn_free_model_ov(DNNModel **model);
 
diff --git a/libavfilter/dnn_interface.h b/libavfilter/dnn_interface.h
index 24e0b66661..06e71f7946 100644
--- a/libavfilter/dnn_interface.h
+++ b/libavfilter/dnn_interface.h
@@ -94,9 +94,9 @@ typedef struct DNNModel{
     DNNFunctionType func_type;
     // Gets model input information
     // Just reuse struct DNNData here, actually the DNNData.data field is not needed.
-    DNNReturnType (*get_input)(void *model, DNNData *input, const char *input_name);
+    int (*get_input)(void *model, DNNData *input, const char *input_name);
     // Gets model output width/height with given input w/h
-    DNNReturnType (*get_output)(void *model, const char *input_name, int input_width, int input_height,
+    int (*get_output)(void *model, const char *input_name, int input_width, int input_height,
                                 const char *output_name, int *output_width, int *output_height);
     // set the pre process to transfer data from AVFrame to DNNData
     // the default implementation within DNN is used if it is not provided by the filter
@@ -114,12 +114,12 @@ typedef struct DNNModel{
 typedef struct DNNModule{
     // Loads model and parameters from given file. Returns NULL if it is not possible.
     DNNModel *(*load_model)(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx);
-    // Executes model with specified input and output. Returns DNN_ERROR otherwise.
-    DNNReturnType (*execute_model)(const DNNModel *model, DNNExecBaseParams *exec_params);
+    // Executes model with specified input and output. Returns the error code otherwise.
+    int (*execute_model)(const DNNModel *model, DNNExecBaseParams *exec_params);
     // Retrieve inference result.
     DNNAsyncStatusType (*get_result)(const DNNModel *model, AVFrame **in, AVFrame **out);
     // Flush all the pending tasks.
-    DNNReturnType (*flush)(const DNNModel *model);
+    int (*flush)(const DNNModel *model);
     // Frees memory allocated for model.
     void (*free_model)(DNNModel **model);
 } DNNModule;
-- 
2.32.0

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [FFmpeg-devel] [PATCH V2 5/8] lavfi/dnn_backend_tf: Return Specific Error Codes
  2022-03-02 18:05 [FFmpeg-devel] [PATCH V2 1/8] libavfilter: Prepare to handle specific error codes in DNN Filters Shubhanshu Saxena
                   ` (2 preceding siblings ...)
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 4/8] lavfi/dnn_backend_openvino: " Shubhanshu Saxena
@ 2022-03-02 18:05 ` Shubhanshu Saxena
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 6/8] lavfi/dnn_backend_native: " Shubhanshu Saxena
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 9+ messages in thread
From: Shubhanshu Saxena @ 2022-03-02 18:05 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Shubhanshu Saxena

Switch to returning specific error codes or DNN_GENERIC_ERROR
when an error is encountered. For TensorFlow C API errors, currently
DNN_GENERIC_ERROR is returned.

Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
---
 libavfilter/dnn/dnn_backend_tf.c | 148 +++++++++++++++++--------------
 libavfilter/dnn/dnn_backend_tf.h |   4 +-
 2 files changed, 85 insertions(+), 67 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 7dd48fb612..cede1286c3 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -90,7 +90,7 @@ static const AVOption dnn_tensorflow_options[] = {
 
 AVFILTER_DEFINE_CLASS(dnn_tensorflow);
 
-static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *lltask_queue);
+static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue);
 static void infer_completion_callback(void *args);
 static inline void destroy_request_item(TFRequestItem **arg);
 
@@ -152,9 +152,10 @@ static TFInferRequest *tf_create_inference_request(void)
  *
  * @param request pointer to the TFRequestItem for inference
  * @retval DNN_SUCCESS if execution is successful
- * @retval DNN_ERROR if execution fails
+ * @retval AVERROR(EINVAL) if request is NULL
+ * @retval DNN_GENERIC_ERROR if execution fails
  */
-static DNNReturnType tf_start_inference(void *args)
+static int tf_start_inference(void *args)
 {
     TFRequestItem *request = args;
     TFInferRequest *infer_request = request->infer_request;
@@ -164,7 +165,7 @@ static DNNReturnType tf_start_inference(void *args)
 
     if (!request) {
         av_log(&tf_model->ctx, AV_LOG_ERROR, "TFRequestItem is NULL\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
 
     TF_SessionRun(tf_model->session, NULL,
@@ -178,7 +179,7 @@ static DNNReturnType tf_start_inference(void *args)
         if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
             destroy_request_item(&request);
         }
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
     return DNN_SUCCESS;
 }
@@ -202,14 +203,14 @@ static inline void destroy_request_item(TFRequestItem **arg) {
     av_freep(arg);
 }
 
-static DNNReturnType extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
+static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
 {
     TFModel *tf_model = task->model;
     TFContext *ctx = &tf_model->ctx;
     LastLevelTaskItem *lltask = av_malloc(sizeof(*lltask));
     if (!lltask) {
         av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for LastLevelTaskItem\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
     task->inference_todo = 1;
     task->inference_done = 0;
@@ -217,7 +218,7 @@ static DNNReturnType extract_lltask_from_task(TaskItem *task, Queue *lltask_queu
     if (ff_queue_push_back(lltask_queue, lltask) < 0) {
         av_log(ctx, AV_LOG_ERROR, "Failed to push back lltask_queue.\n");
         av_freep(&lltask);
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
     return DNN_SUCCESS;
 }
@@ -277,7 +278,7 @@ static TF_Tensor *allocate_input_tensor(const DNNData *input)
                              input_dims[1] * input_dims[2] * input_dims[3] * size);
 }
 
-static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input_name)
+static int get_input_tf(void *model, DNNData *input, const char *input_name)
 {
     TFModel *tf_model = model;
     TFContext *ctx = &tf_model->ctx;
@@ -288,7 +289,7 @@ static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input
     tf_output.oper = TF_GraphOperationByName(tf_model->graph, input_name);
     if (!tf_output.oper) {
         av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
 
     tf_output.index = 0;
@@ -300,7 +301,7 @@ static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input
     if (TF_GetCode(status) != TF_OK){
         TF_DeleteStatus(status);
         av_log(ctx, AV_LOG_ERROR, "Failed to get input tensor shape: number of dimension incorrect\n");
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
     TF_DeleteStatus(status);
 
@@ -313,10 +314,10 @@ static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input
     return DNN_SUCCESS;
 }
 
-static DNNReturnType get_output_tf(void *model, const char *input_name, int input_width, int input_height,
+static int get_output_tf(void *model, const char *input_name, int input_width, int input_height,
                                    const char *output_name, int *output_width, int *output_height)
 {
-    DNNReturnType ret;
+    int ret;
     TFModel *tf_model = model;
     TFContext *ctx = &tf_model->ctx;
     TaskItem task;
@@ -329,20 +330,21 @@ static DNNReturnType get_output_tf(void *model, const char *input_name, int inpu
         .out_frame      = NULL,
     };
 
-    if (ff_dnn_fill_gettingoutput_task(&task, &exec_params, tf_model, input_height, input_width, ctx) != DNN_SUCCESS) {
+    ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, tf_model, input_height, input_width, ctx);
+    if (ret != DNN_SUCCESS) {
         goto err;
     }
 
-    if (extract_lltask_from_task(&task, tf_model->lltask_queue) != DNN_SUCCESS) {
+    ret = extract_lltask_from_task(&task, tf_model->lltask_queue);
+    if (ret != DNN_SUCCESS) {
         av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
-        ret = DNN_ERROR;
         goto err;
     }
 
     request = ff_safe_queue_pop_front(tf_model->request_queue);
     if (!request) {
         av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
-        ret = DNN_ERROR;
+        ret = AVERROR(EINVAL);
         goto err;
     }
 
@@ -386,7 +388,7 @@ static int hex_to_data(uint8_t *data, const char *p)
     return len;
 }
 
-static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename)
+static int load_tf_model(TFModel *tf_model, const char *model_filename)
 {
     TFContext *ctx = &tf_model->ctx;
     TF_Buffer *graph_def;
@@ -407,7 +409,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
         */
         if (strncmp(tf_model->ctx.options.sess_config, "0x", 2) != 0) {
             av_log(ctx, AV_LOG_ERROR, "sess_config should start with '0x'\n");
-            return DNN_ERROR;
+            return AVERROR(EINVAL);
         }
         config = tf_model->ctx.options.sess_config + 2;
         sess_config_length = hex_to_data(NULL, config);
@@ -415,11 +417,11 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
         sess_config = av_mallocz(sess_config_length + AV_INPUT_BUFFER_PADDING_SIZE);
         if (!sess_config) {
             av_log(ctx, AV_LOG_ERROR, "failed to allocate memory\n");
-            return DNN_ERROR;
+            return AVERROR(ENOMEM);
         }
         if (hex_to_data(sess_config, config) < 0) {
             av_log(ctx, AV_LOG_ERROR, "failed to convert hex to data\n");
-            return DNN_ERROR;
+            return AVERROR(EINVAL);
         }
     }
 
@@ -427,7 +429,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
     if (!graph_def){
         av_log(ctx, AV_LOG_ERROR, "Failed to read model \"%s\" graph\n", model_filename);
         av_freep(&sess_config);
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
     tf_model->graph = TF_NewGraph();
     tf_model->status = TF_NewStatus();
@@ -440,7 +442,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
         TF_DeleteStatus(tf_model->status);
         av_log(ctx, AV_LOG_ERROR, "Failed to import serialized graph to model graph\n");
         av_freep(&sess_config);
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
 
     init_op = TF_GraphOperationByName(tf_model->graph, "init");
@@ -455,7 +457,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
             TF_DeleteSessionOptions(sess_opts);
             av_log(ctx, AV_LOG_ERROR, "Failed to set config for sess options with %s\n",
                                       tf_model->ctx.options.sess_config);
-            return DNN_ERROR;
+            return DNN_GENERIC_ERROR;
         }
     }
 
@@ -466,7 +468,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
         TF_DeleteGraph(tf_model->graph);
         TF_DeleteStatus(tf_model->status);
         av_log(ctx, AV_LOG_ERROR, "Failed to create new session with model graph\n");
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
 
     // Run initialization operation with name "init" if it is present in graph
@@ -481,7 +483,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
             TF_DeleteGraph(tf_model->graph);
             TF_DeleteStatus(tf_model->status);
             av_log(ctx, AV_LOG_ERROR, "Failed to run session when initializing\n");
-            return DNN_ERROR;
+            return DNN_GENERIC_ERROR;
         }
     }
 
@@ -490,7 +492,7 @@ static DNNReturnType load_tf_model(TFModel *tf_model, const char *model_filename
 
 #define NAME_BUFFER_SIZE 256
 
-static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_op, TF_Operation **cur_op,
+static int add_conv_layer(TFModel *tf_model, TF_Operation *transpose_op, TF_Operation **cur_op,
                                     ConvolutionalParams* params, const int layer)
 {
     TFContext *ctx = &tf_model->ctx;
@@ -594,7 +596,7 @@ static DNNReturnType add_conv_layer(TFModel *tf_model, TF_Operation *transpose_o
         break;
     default:
         avpriv_report_missing_feature(ctx, "convolutional activation function %d", params->activation);
-        return DNN_ERROR;
+        return AVERROR(ENOSYS);
     }
     input.oper = *cur_op;
     TF_AddInput(op_desc, input);
@@ -609,10 +611,10 @@ err:
     TF_DeleteTensor(kernel_tensor);
     TF_DeleteTensor(biases_tensor);
     av_log(ctx, AV_LOG_ERROR, "Failed to add conv layer %d\n", layer);
-    return DNN_ERROR;
+    return DNN_GENERIC_ERROR;
 }
 
-static DNNReturnType add_depth_to_space_layer(TFModel *tf_model, TF_Operation **cur_op,
+static int add_depth_to_space_layer(TFModel *tf_model, TF_Operation **cur_op,
                                               DepthToSpaceParams *params, const int layer)
 {
     TFContext *ctx = &tf_model->ctx;
@@ -630,13 +632,13 @@ static DNNReturnType add_depth_to_space_layer(TFModel *tf_model, TF_Operation **
     *cur_op = TF_FinishOperation(op_desc, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
         av_log(ctx, AV_LOG_ERROR, "Failed to add depth_to_space to layer %d\n", layer);
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
 
     return DNN_SUCCESS;
 }
 
-static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
+static int add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
                                               LayerPadParams *params, const int layer)
 {
     TFContext *ctx = &tf_model->ctx;
@@ -666,13 +668,13 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
     if (TF_GetCode(tf_model->status) != TF_OK){
         TF_DeleteTensor(tensor);
         av_log(ctx, AV_LOG_ERROR, "Failed to set value for pad of layer %d\n", layer);
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
     op = TF_FinishOperation(op_desc, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
         TF_DeleteTensor(tensor);
         av_log(ctx, AV_LOG_ERROR, "Failed to add pad to layer %d\n", layer);
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
 
     op_desc = TF_NewOperation(tf_model->graph, "MirrorPad", "mirror_pad");
@@ -688,13 +690,13 @@ static DNNReturnType add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
     if (TF_GetCode(tf_model->status) != TF_OK){
         TF_DeleteTensor(tensor);
         av_log(ctx, AV_LOG_ERROR, "Failed to add mirror_pad to layer %d\n", layer);
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
 
     return DNN_SUCCESS;
 }
 
-static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
+static int add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
                                        DnnLayerMaximumParams *params, const int layer)
 {
     TFContext *ctx = &tf_model->ctx;
@@ -716,13 +718,13 @@ static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
     if (TF_GetCode(tf_model->status) != TF_OK){
         TF_DeleteTensor(tensor);
         av_log(ctx, AV_LOG_ERROR, "Failed to set value for maximum/y of layer %d", layer);
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
     op = TF_FinishOperation(op_desc, tf_model->status);
     if (TF_GetCode(tf_model->status) != TF_OK){
         TF_DeleteTensor(tensor);
         av_log(ctx, AV_LOG_ERROR, "Failed to add maximum/y to layer %d\n", layer);
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
 
     snprintf(name_buffer, NAME_BUFFER_SIZE, "maximum%d", layer);
@@ -737,13 +739,13 @@ static DNNReturnType add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
     if (TF_GetCode(tf_model->status) != TF_OK){
         TF_DeleteTensor(tensor);
         av_log(ctx, AV_LOG_ERROR, "Failed to add maximum to layer %d\n", layer);
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
 
     return DNN_SUCCESS;
 }
 
-static DNNReturnType load_native_model(TFModel *tf_model, const char *model_filename)
+static int load_native_model(TFModel *tf_model, const char *model_filename)
 {
     TFContext *ctx = &tf_model->ctx;
     int32_t layer;
@@ -755,14 +757,14 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
     int32_t *transpose_perm;
     int64_t transpose_perm_shape[] = {4};
     int64_t input_shape[] = {1, -1, -1, -1};
-    DNNReturnType layer_add_res;
+    int layer_add_res;
     DNNModel *model = NULL;
     NativeModel *native_model;
 
     model = ff_dnn_load_model_native(model_filename, DFT_PROCESS_FRAME, NULL, NULL);
     if (!model){
         av_log(ctx, AV_LOG_ERROR, "Failed to load native model\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
 
     native_model = model->model;
@@ -775,7 +777,7 @@ static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
         TF_DeleteGraph(tf_model->graph); \
         TF_DeleteStatus(tf_model->status); \
         av_log(ctx, AV_LOG_ERROR, "Failed to set value or add operator to layer\n"); \
-        return DNN_ERROR; \
+        return DNN_GENERIC_ERROR; \
     }
 
     op_desc = TF_NewOperation(tf_model->graph, "Placeholder", "x");
@@ -942,19 +944,21 @@ err:
     return NULL;
 }
 
-static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
+static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
     DNNData input;
     LastLevelTaskItem *lltask;
     TaskItem *task;
     TFInferRequest *infer_request;
     TFContext *ctx = &tf_model->ctx;
+    int ret = 0;
 
     lltask = ff_queue_pop_front(tf_model->lltask_queue);
     av_assert0(lltask);
     task = lltask->task;
     request->lltask = lltask;
 
-    if (get_input_tf(tf_model, &input, task->input_name) != DNN_SUCCESS) {
+    ret = get_input_tf(tf_model, &input, task->input_name);
+    if (ret != DNN_SUCCESS) {
         goto err;
     }
 
@@ -965,12 +969,14 @@ static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *reque
     infer_request->tf_input = av_malloc(sizeof(TF_Output));
     if (!infer_request->tf_input) {
         av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
+        ret = AVERROR(ENOMEM);
         goto err;
     }
 
     infer_request->tf_input->oper = TF_GraphOperationByName(tf_model->graph, task->input_name);
     if (!infer_request->tf_input->oper){
         av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", task->input_name);
+        ret = DNN_GENERIC_ERROR;
         goto err;
     }
     infer_request->tf_input->index = 0;
@@ -978,6 +984,7 @@ static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *reque
     infer_request->input_tensor = allocate_input_tensor(&input);
     if (!infer_request->input_tensor){
         av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input tensor\n");
+        ret = AVERROR(ENOMEM);
         goto err;
     }
     input.data = (float *)TF_TensorData(infer_request->input_tensor);
@@ -1003,12 +1010,14 @@ static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *reque
     infer_request->tf_outputs = av_malloc_array(task->nb_output, sizeof(TF_Output));
     if (infer_request->tf_outputs == NULL) {
         av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *tf_outputs\n");
+        ret = AVERROR(ENOMEM);
         goto err;
     }
 
     infer_request->output_tensors = av_calloc(task->nb_output, sizeof(*infer_request->output_tensors));
     if (!infer_request->output_tensors) {
         av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output tensor\n");
+        ret = AVERROR(ENOMEM);
         goto err;
     }
 
@@ -1017,6 +1026,7 @@ static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *reque
         infer_request->tf_outputs[i].oper = TF_GraphOperationByName(tf_model->graph, task->output_names[i]);
         if (!infer_request->tf_outputs[i].oper) {
             av_log(ctx, AV_LOG_ERROR, "Could not find output \"%s\" in model\n", task->output_names[i]);
+            ret = DNN_GENERIC_ERROR;
             goto err;
         }
         infer_request->tf_outputs[i].index = 0;
@@ -1025,7 +1035,7 @@ static DNNReturnType fill_model_input_tf(TFModel *tf_model, TFRequestItem *reque
     return DNN_SUCCESS;
 err:
     tf_free_request(infer_request);
-    return DNN_ERROR;
+    return ret;
 }
 
 static void infer_completion_callback(void *args) {
@@ -1086,12 +1096,13 @@ err:
     }
 }
 
-static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
+static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
 {
     TFModel *tf_model;
     TFContext *ctx;
     LastLevelTaskItem *lltask;
     TaskItem *task;
+    int ret = 0;
 
     if (ff_queue_size(lltask_queue) == 0) {
         destroy_request_item(&request);
@@ -1103,7 +1114,8 @@ static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *lltask_queu
     tf_model = task->model;
     ctx = &tf_model->ctx;
 
-    if (fill_model_input_tf(tf_model, request) != DNN_SUCCESS) {
+    ret = fill_model_input_tf(tf_model, request);
+    if (ret != DNN_SUCCESS) {
         goto err;
     }
 
@@ -1112,58 +1124,64 @@ static DNNReturnType execute_model_tf(TFRequestItem *request, Queue *lltask_queu
             goto err;
         }
         return DNN_SUCCESS;
-    } else {
-        if (tf_start_inference(request) != DNN_SUCCESS) {
+    }
+    else {
+        ret = tf_start_inference(request);
+        if (ret != DNN_SUCCESS) {
             goto err;
         }
         infer_completion_callback(request);
-        return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : DNN_ERROR;
+        return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : DNN_GENERIC_ERROR;
     }
 err:
     tf_free_request(request->infer_request);
     if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
         destroy_request_item(&request);
     }
-    return DNN_ERROR;
+    return ret;
 }
 
-DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
+int ff_dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params)
 {
     TFModel *tf_model = model->model;
     TFContext *ctx = &tf_model->ctx;
     TaskItem *task;
     TFRequestItem *request;
+    int ret = 0;
 
-    if (ff_check_exec_params(ctx, DNN_TF, model->func_type, exec_params) != 0) {
-        return DNN_ERROR;
+    ret = ff_check_exec_params(ctx, DNN_TF, model->func_type, exec_params);
+    if (ret != 0) {
+        return ret;
     }
 
     task = av_malloc(sizeof(*task));
     if (!task) {
         av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
 
-    if (ff_dnn_fill_task(task, exec_params, tf_model, ctx->options.async, 1) != DNN_SUCCESS) {
+    ret = ff_dnn_fill_task(task, exec_params, tf_model, ctx->options.async, 1);
+    if (ret != DNN_SUCCESS) {
         av_freep(&task);
-        return DNN_ERROR;
+        return ret;
     }
 
     if (ff_queue_push_back(tf_model->task_queue, task) < 0) {
         av_freep(&task);
         av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
 
-    if (extract_lltask_from_task(task, tf_model->lltask_queue) != DNN_SUCCESS) {
+    ret = extract_lltask_from_task(task, tf_model->lltask_queue);
+    if (ret != DNN_SUCCESS) {
         av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
-        return DNN_ERROR;
+        return ret;
     }
 
     request = ff_safe_queue_pop_front(tf_model->request_queue);
     if (!request) {
         av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
     return execute_model_tf(request, tf_model->lltask_queue);
 }
@@ -1174,12 +1192,12 @@ DNNAsyncStatusType ff_dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVF
     return ff_dnn_get_result_common(tf_model->task_queue, in, out);
 }
 
-DNNReturnType ff_dnn_flush_tf(const DNNModel *model)
+int ff_dnn_flush_tf(const DNNModel *model)
 {
     TFModel *tf_model = model->model;
     TFContext *ctx = &tf_model->ctx;
     TFRequestItem *request;
-    DNNReturnType ret;
+    int ret;
 
     if (ff_queue_size(tf_model->lltask_queue) == 0) {
         // no pending task need to flush
@@ -1189,7 +1207,7 @@ DNNReturnType ff_dnn_flush_tf(const DNNModel *model)
     request = ff_safe_queue_pop_front(tf_model->request_queue);
     if (!request) {
         av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
 
     ret = fill_model_input_tf(tf_model, request);
diff --git a/libavfilter/dnn/dnn_backend_tf.h b/libavfilter/dnn/dnn_backend_tf.h
index f14ea8c47a..0b63a4b6d2 100644
--- a/libavfilter/dnn/dnn_backend_tf.h
+++ b/libavfilter/dnn/dnn_backend_tf.h
@@ -31,9 +31,9 @@
 
 DNNModel *ff_dnn_load_model_tf(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx);
 
-DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params);
+int ff_dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_params);
 DNNAsyncStatusType ff_dnn_get_result_tf(const DNNModel *model, AVFrame **in, AVFrame **out);
-DNNReturnType ff_dnn_flush_tf(const DNNModel *model);
+int ff_dnn_flush_tf(const DNNModel *model);
 
 void ff_dnn_free_model_tf(DNNModel **model);
 
-- 
2.32.0

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [FFmpeg-devel] [PATCH V2 6/8] lavfi/dnn_backend_native: Return Specific Error Codes
  2022-03-02 18:05 [FFmpeg-devel] [PATCH V2 1/8] libavfilter: Prepare to handle specific error codes in DNN Filters Shubhanshu Saxena
                   ` (3 preceding siblings ...)
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 5/8] lavfi/dnn_backend_tf: " Shubhanshu Saxena
@ 2022-03-02 18:05 ` Shubhanshu Saxena
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 7/8] lavfi/dnn_backend_common: Return specific error codes Shubhanshu Saxena
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 8/8] libavfilter: Remove DNNReturnType from DNN Module Shubhanshu Saxena
  6 siblings, 0 replies; 9+ messages in thread
From: Shubhanshu Saxena @ 2022-03-02 18:05 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Shubhanshu Saxena

Switch to returning specific error codes or DNN_GENERIC_ERROR
when an error is encountered.

Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
---
 libavfilter/dnn/dnn_backend_native.c | 82 +++++++++++++++-------------
 libavfilter/dnn/dnn_backend_native.h |  4 +-
 2 files changed, 45 insertions(+), 41 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c
index 13436c0484..f29e0e06bd 100644
--- a/libavfilter/dnn/dnn_backend_native.c
+++ b/libavfilter/dnn/dnn_backend_native.c
@@ -46,9 +46,9 @@ static const AVClass dnn_native_class = {
     .category   = AV_CLASS_CATEGORY_FILTER,
 };
 
-static DNNReturnType execute_model_native(Queue *lltask_queue);
+static int execute_model_native(Queue *lltask_queue);
 
-static DNNReturnType extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
+static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
 {
     NativeModel *native_model = task->model;
     NativeContext *ctx = &native_model->ctx;
@@ -56,7 +56,7 @@ static DNNReturnType extract_lltask_from_task(TaskItem *task, Queue *lltask_queu
 
     if (!lltask) {
         av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for LastLevelTaskItem\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
     task->inference_todo = 1;
     task->inference_done = 0;
@@ -65,12 +65,12 @@ static DNNReturnType extract_lltask_from_task(TaskItem *task, Queue *lltask_queu
     if (ff_queue_push_back(lltask_queue, lltask) < 0) {
         av_log(ctx, AV_LOG_ERROR, "Failed to push back lltask_queue.\n");
         av_freep(&lltask);
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
     return DNN_SUCCESS;
 }
 
-static DNNReturnType get_input_native(void *model, DNNData *input, const char *input_name)
+static int get_input_native(void *model, DNNData *input, const char *input_name)
 {
     NativeModel *native_model = model;
     NativeContext *ctx = &native_model->ctx;
@@ -80,7 +80,7 @@ static DNNReturnType get_input_native(void *model, DNNData *input, const char *i
         if (strcmp(oprd->name, input_name) == 0) {
             if (oprd->type != DOT_INPUT) {
                 av_log(ctx, AV_LOG_ERROR, "Found \"%s\" in model, but it is not input node\n", input_name);
-                return DNN_ERROR;
+                return AVERROR(EINVAL);
             }
             input->dt = oprd->data_type;
             av_assert0(oprd->dims[0] == 1);
@@ -93,13 +93,13 @@ static DNNReturnType get_input_native(void *model, DNNData *input, const char *i
 
     // do not find the input operand
     av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name);
-    return DNN_ERROR;
+    return AVERROR(EINVAL);
 }
 
-static DNNReturnType get_output_native(void *model, const char *input_name, int input_width, int input_height,
+static int get_output_native(void *model, const char *input_name, int input_width, int input_height,
                                        const char *output_name, int *output_width, int *output_height)
 {
-    DNNReturnType ret = 0;
+    int ret = 0;
     NativeModel *native_model = model;
     NativeContext *ctx = &native_model->ctx;
     TaskItem task;
@@ -111,14 +111,14 @@ static DNNReturnType get_output_native(void *model, const char *input_name, int
         .out_frame      = NULL,
     };
 
-    if (ff_dnn_fill_gettingoutput_task(&task, &exec_params, native_model, input_height, input_width, ctx) != DNN_SUCCESS) {
-        ret = DNN_ERROR;
+    ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, native_model, input_height, input_width, ctx);
+    if (ret != DNN_SUCCESS) {
         goto err;
     }
 
-    if (extract_lltask_from_task(&task, native_model->lltask_queue) != DNN_SUCCESS) {
+    ret = extract_lltask_from_task(&task, native_model->lltask_queue);
+    if (ret != DNN_SUCCESS) {
         av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
-        ret = DNN_ERROR;
         goto err;
     }
 
@@ -297,7 +297,7 @@ fail:
     return NULL;
 }
 
-static DNNReturnType execute_model_native(Queue *lltask_queue)
+static int execute_model_native(Queue *lltask_queue)
 {
     NativeModel *native_model = NULL;
     NativeContext *ctx = NULL;
@@ -306,12 +306,12 @@ static DNNReturnType execute_model_native(Queue *lltask_queue)
     DnnOperand *oprd = NULL;
     LastLevelTaskItem *lltask = NULL;
     TaskItem *task = NULL;
-    DNNReturnType ret = 0;
+    int ret = 0;
 
     lltask = ff_queue_pop_front(lltask_queue);
     if (!lltask) {
         av_log(NULL, AV_LOG_ERROR, "Failed to get LastLevelTaskItem\n");
-        ret = DNN_ERROR;
+        ret = AVERROR(EINVAL);
         goto err;
     }
     task = lltask->task;
@@ -320,7 +320,7 @@ static DNNReturnType execute_model_native(Queue *lltask_queue)
 
     if (native_model->layers_num <= 0 || native_model->operands_num <= 0) {
         av_log(ctx, AV_LOG_ERROR, "No operands or layers in model\n");
-        ret = DNN_ERROR;
+        ret = AVERROR(EINVAL);
         goto err;
     }
 
@@ -329,7 +329,7 @@ static DNNReturnType execute_model_native(Queue *lltask_queue)
         if (strcmp(oprd->name, task->input_name) == 0) {
             if (oprd->type != DOT_INPUT) {
                 av_log(ctx, AV_LOG_ERROR, "Found \"%s\" in model, but it is not input node\n", task->input_name);
-                ret = DNN_ERROR;
+                ret = AVERROR(EINVAL);
                 goto err;
             }
             break;
@@ -338,7 +338,7 @@ static DNNReturnType execute_model_native(Queue *lltask_queue)
     }
     if (!oprd) {
         av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", task->input_name);
-        ret = DNN_ERROR;
+        ret = AVERROR(EINVAL);
         goto err;
     }
 
@@ -349,13 +349,13 @@ static DNNReturnType execute_model_native(Queue *lltask_queue)
     oprd->length = ff_calculate_operand_data_length(oprd);
     if (oprd->length <= 0) {
         av_log(ctx, AV_LOG_ERROR, "The input data length overflow\n");
-        ret = DNN_ERROR;
+        ret = AVERROR(EINVAL);
         goto err;
     }
     oprd->data = av_malloc(oprd->length);
     if (!oprd->data) {
         av_log(ctx, AV_LOG_ERROR, "Failed to malloc memory for input data\n");
-        ret = DNN_ERROR;
+        ret = AVERROR(ENOMEM);
         goto err;
     }
 
@@ -376,19 +376,19 @@ static DNNReturnType execute_model_native(Queue *lltask_queue)
         // currently, the filter does not need multiple outputs,
         // so we just pending the support until we really need it.
         avpriv_report_missing_feature(ctx, "multiple outputs");
-        ret = DNN_ERROR;
+        ret = AVERROR(ENOSYS);
         goto err;
     }
 
     for (layer = 0; layer < native_model->layers_num; ++layer){
         DNNLayerType layer_type = native_model->layers[layer].type;
-        if (ff_layer_funcs[layer_type].pf_exec(native_model->operands,
-                                            native_model->layers[layer].input_operand_indexes,
-                                            native_model->layers[layer].output_operand_index,
-                                            native_model->layers[layer].params,
-                                            &native_model->ctx) == DNN_ERROR) {
+        ret = ff_layer_funcs[layer_type].pf_exec(native_model->operands,
+                                                 native_model->layers[layer].input_operand_indexes,
+                                                 native_model->layers[layer].output_operand_index,
+                                                 native_model->layers[layer].params,
+                                                 &native_model->ctx);
+        if (ret != DNN_SUCCESS) {
             av_log(ctx, AV_LOG_ERROR, "Failed to execute model\n");
-            ret = DNN_ERROR;
             goto err;
         }
     }
@@ -405,7 +405,7 @@ static DNNReturnType execute_model_native(Queue *lltask_queue)
 
         if (oprd == NULL) {
             av_log(ctx, AV_LOG_ERROR, "Could not find output in model\n");
-            ret = DNN_ERROR;
+            ret = AVERROR(EINVAL);
             goto err;
         }
 
@@ -432,42 +432,46 @@ err:
     return ret;
 }
 
-DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNExecBaseParams *exec_params)
+int ff_dnn_execute_model_native(const DNNModel *model, DNNExecBaseParams *exec_params)
 {
     NativeModel *native_model = model->model;
     NativeContext *ctx = &native_model->ctx;
     TaskItem *task;
+    int ret = 0;
 
-    if (ff_check_exec_params(ctx, DNN_NATIVE, model->func_type, exec_params) != 0) {
-        return DNN_ERROR;
+    ret = ff_check_exec_params(ctx, DNN_NATIVE, model->func_type, exec_params);
+    if (ret != 0) {
+        return ret;
     }
 
     task = av_malloc(sizeof(*task));
     if (!task) {
         av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
 
-    if (ff_dnn_fill_task(task, exec_params, native_model, ctx->options.async, 1) != DNN_SUCCESS) {
+    ret = ff_dnn_fill_task(task, exec_params, native_model, ctx->options.async, 1);
+    if (ret != DNN_SUCCESS) {
         av_freep(&task);
-        return DNN_ERROR;
+        return ret;
     }
 
     if (ff_queue_push_back(native_model->task_queue, task) < 0) {
         av_freep(&task);
         av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
 
-    if (extract_lltask_from_task(task, native_model->lltask_queue) != DNN_SUCCESS) {
+    ret = extract_lltask_from_task(task, native_model->lltask_queue);
+    if (ret != DNN_SUCCESS) {
         av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
-        return DNN_ERROR;
+        return ret;
     }
 
     return execute_model_native(native_model->lltask_queue);
 }
 
-DNNReturnType ff_dnn_flush_native(const DNNModel *model)
+int ff_dnn_flush_native(const DNNModel *model)
 {
     NativeModel *native_model = model->model;
 
diff --git a/libavfilter/dnn/dnn_backend_native.h b/libavfilter/dnn/dnn_backend_native.h
index e8017ee4b4..75bd9a44f7 100644
--- a/libavfilter/dnn/dnn_backend_native.h
+++ b/libavfilter/dnn/dnn_backend_native.h
@@ -134,11 +134,11 @@ typedef struct NativeModel{
 
 DNNModel *ff_dnn_load_model_native(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx);
 
-DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNExecBaseParams *exec_params);
+int ff_dnn_execute_model_native(const DNNModel *model, DNNExecBaseParams *exec_params);
 
 DNNAsyncStatusType ff_dnn_get_result_native(const DNNModel *model, AVFrame **in, AVFrame **out);
 
-DNNReturnType ff_dnn_flush_native(const DNNModel *model);
+int ff_dnn_flush_native(const DNNModel *model);
 
 void ff_dnn_free_model_native(DNNModel **model);
 
-- 
2.32.0

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [FFmpeg-devel] [PATCH V2 7/8] lavfi/dnn_backend_common: Return specific error codes
  2022-03-02 18:05 [FFmpeg-devel] [PATCH V2 1/8] libavfilter: Prepare to handle specific error codes in DNN Filters Shubhanshu Saxena
                   ` (4 preceding siblings ...)
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 6/8] lavfi/dnn_backend_native: " Shubhanshu Saxena
@ 2022-03-02 18:05 ` Shubhanshu Saxena
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 8/8] libavfilter: Remove DNNReturnType from DNN Module Shubhanshu Saxena
  6 siblings, 0 replies; 9+ messages in thread
From: Shubhanshu Saxena @ 2022-03-02 18:05 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Shubhanshu Saxena

Switch to returning specific error codes or DNN_GENERIC_ERROR
when an error is encountered in the common DNN backend functions.

Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
---
 libavfilter/dnn/dnn_backend_common.c | 35 ++++++++++++++--------------
 libavfilter/dnn/dnn_backend_common.h | 22 +++++++----------
 2 files changed, 27 insertions(+), 30 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_common.c b/libavfilter/dnn/dnn_backend_common.c
index 6a9c4cc87f..64ed441415 100644
--- a/libavfilter/dnn/dnn_backend_common.c
+++ b/libavfilter/dnn/dnn_backend_common.c
@@ -47,19 +47,19 @@ int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func
         // currently, the filter does not need multiple outputs,
         // so we just pending the support until we really need it.
         avpriv_report_missing_feature(ctx, "multiple outputs");
-        return AVERROR(EINVAL);
+        return AVERROR(ENOSYS);
     }
 
     return 0;
 }
 
-DNNReturnType ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc) {
+int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc) {
     if (task == NULL || exec_params == NULL || backend_model == NULL)
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     if (do_ioproc != 0 && do_ioproc != 1)
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     if (async != 0 && async != 1)
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
 
     task->do_ioproc = do_ioproc;
     task->async = async;
@@ -89,17 +89,17 @@ static void *async_thread_routine(void *args)
     return DNN_ASYNC_SUCCESS;
 }
 
-DNNReturnType ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
+int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
 {
     void *status = 0;
     if (!async_module) {
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
 #if HAVE_PTHREAD_CANCEL
     pthread_join(async_module->thread_id, &status);
     if (status == DNN_ASYNC_FAIL) {
         av_log(NULL, AV_LOG_ERROR, "Last Inference Failed.\n");
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
 #endif
     async_module->start_inference = NULL;
@@ -108,30 +108,31 @@ DNNReturnType ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
     return DNN_SUCCESS;
 }
 
-DNNReturnType ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module)
+int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module)
 {
     int ret;
     void *status = 0;
 
     if (!async_module) {
         av_log(ctx, AV_LOG_ERROR, "async_module is null when starting async inference.\n");
-        return DNN_ERROR;
+        return AVERROR(EINVAL);
     }
 
 #if HAVE_PTHREAD_CANCEL
     pthread_join(async_module->thread_id, &status);
     if (status == DNN_ASYNC_FAIL) {
         av_log(ctx, AV_LOG_ERROR, "Unable to start inference as previous inference failed.\n");
-        return DNN_ERROR;
+        return DNN_GENERIC_ERROR;
     }
     ret = pthread_create(&async_module->thread_id, NULL, async_thread_routine, async_module);
     if (ret != 0) {
         av_log(ctx, AV_LOG_ERROR, "Unable to start async inference.\n");
-        return DNN_ERROR;
+        return ret;
     }
 #else
-    if (async_module->start_inference(async_module->args) != DNN_SUCCESS) {
-        return DNN_ERROR;
+    ret = async_module->start_inference(async_module->args);
+    if (ret != DNN_SUCCESS) {
+        return ret;
     }
     async_module->callback(async_module->args);
 #endif
@@ -158,7 +159,7 @@ DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVF
     return DAST_SUCCESS;
 }
 
-DNNReturnType ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
+int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx)
 {
     AVFrame *in_frame = NULL;
     AVFrame *out_frame = NULL;
@@ -166,14 +167,14 @@ DNNReturnType ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *
     in_frame = av_frame_alloc();
     if (!in_frame) {
         av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for input frame\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
 
     out_frame = av_frame_alloc();
     if (!out_frame) {
         av_frame_free(&in_frame);
         av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for output frame\n");
-        return DNN_ERROR;
+        return AVERROR(ENOMEM);
     }
 
     in_frame->width = input_width;
diff --git a/libavfilter/dnn/dnn_backend_common.h b/libavfilter/dnn/dnn_backend_common.h
index 6b6a5e21ae..fa79caee1f 100644
--- a/libavfilter/dnn/dnn_backend_common.h
+++ b/libavfilter/dnn/dnn_backend_common.h
@@ -60,7 +60,7 @@ typedef struct DNNAsyncExecModule {
      * Synchronous inference function for the backend
      * with corresponding request item as the argument.
      */
-    DNNReturnType (*start_inference)(void *request);
+    int (*start_inference)(void *request);
 
     /**
      * Completion Callback for the backend.
@@ -92,20 +92,18 @@ int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func
  * @param async flag for async execution. Must be 0 or 1
  * @param do_ioproc flag for IO processing. Must be 0 or 1
  *
- * @retval DNN_SUCCESS if successful
- * @retval DNN_ERROR if flags are invalid or any parameter is NULL
+ * @returns DNN_SUCCESS if successful or error code otherwise.
  */
-DNNReturnType ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc);
+int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc);
 
 /**
  * Join the Async Execution thread and set module pointers to NULL.
  *
  * @param async_module pointer to DNNAsyncExecModule module
  *
- * @retval DNN_SUCCESS if successful
- * @retval DNN_ERROR if async_module is NULL
+ * @returns DNN_SUCCESS if successful or error code otherwise.
  */
-DNNReturnType ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module);
+int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module);
 
 /**
  * Start asynchronous inference routine for the TensorFlow
@@ -119,10 +117,9 @@ DNNReturnType ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module);
  * @param ctx pointer to the backend context
  * @param async_module pointer to DNNAsyncExecModule module
  *
- * @retval DNN_SUCCESS on the start of async inference.
- * @retval DNN_ERROR in case async inference cannot be started
+ * @returns DNN_SUCCESS on the start of async inference or error code otherwise.
  */
-DNNReturnType ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module);
+int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module);
 
 /**
  * Extract input and output frame from the Task Queue after
@@ -149,9 +146,8 @@ DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVF
  * @param input_width width of input frame
  * @param ctx pointer to the backend context
  *
- * @retval DNN_SUCCESS if successful
- * @retval DNN_ERROR if allocation fails
+ * @returns DNN_SUCCESS if successful or error code otherwise.
  */
-DNNReturnType ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx);
+int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx);
 
 #endif
-- 
2.32.0

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [FFmpeg-devel] [PATCH V2 8/8] libavfilter: Remove DNNReturnType from DNN Module
  2022-03-02 18:05 [FFmpeg-devel] [PATCH V2 1/8] libavfilter: Prepare to handle specific error codes in DNN Filters Shubhanshu Saxena
                   ` (5 preceding siblings ...)
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 7/8] lavfi/dnn_backend_common: Return specific error codes Shubhanshu Saxena
@ 2022-03-02 18:05 ` Shubhanshu Saxena
  2022-03-08 14:18   ` Guo, Yejun
  6 siblings, 1 reply; 9+ messages in thread
From: Shubhanshu Saxena @ 2022-03-02 18:05 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Shubhanshu Saxena

This patch removes all occurences of DNNReturnType from the DNN module.
This commit replaces DNN_SUCCESS by 0 (essentially the same), so the
functions with DNNReturnType now return 0 in case of success, the negative
values otherwise.

Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
---
 libavfilter/dnn/dnn_backend_common.c          | 10 ++--
 libavfilter/dnn/dnn_backend_common.h          |  8 +--
 libavfilter/dnn/dnn_backend_native.c          | 16 +++---
 .../dnn/dnn_backend_native_layer_avgpool.c    |  2 +-
 .../dnn/dnn_backend_native_layer_conv2d.c     |  4 +-
 .../dnn/dnn_backend_native_layer_dense.c      |  2 +-
 .../dnn_backend_native_layer_depth2space.c    |  2 +-
 libavfilter/dnn/dnn_backend_openvino.c        | 48 ++++++++--------
 libavfilter/dnn/dnn_backend_tf.c              | 56 +++++++++----------
 libavfilter/dnn/dnn_io_proc.c                 | 14 ++---
 libavfilter/dnn_interface.h                   |  2 -
 libavfilter/vf_derain.c                       |  2 +-
 libavfilter/vf_dnn_classify.c                 |  4 +-
 libavfilter/vf_dnn_detect.c                   |  4 +-
 libavfilter/vf_dnn_processing.c               |  8 +--
 libavfilter/vf_sr.c                           |  4 +-
 16 files changed, 92 insertions(+), 94 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_common.c b/libavfilter/dnn/dnn_backend_common.c
index 64ed441415..91a4a3c4bf 100644
--- a/libavfilter/dnn/dnn_backend_common.c
+++ b/libavfilter/dnn/dnn_backend_common.c
@@ -70,7 +70,7 @@ int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backe
     task->nb_output = exec_params->nb_output;
     task->output_names = exec_params->output_names;
 
-    return DNN_SUCCESS;
+    return 0;
 }
 
 /**
@@ -82,7 +82,7 @@ static void *async_thread_routine(void *args)
     DNNAsyncExecModule *async_module = args;
     void *request = async_module->args;
 
-    if (async_module->start_inference(request) != DNN_SUCCESS) {
+    if (async_module->start_inference(request) != 0) {
         return DNN_ASYNC_FAIL;
     }
     async_module->callback(request);
@@ -105,7 +105,7 @@ int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module)
     async_module->start_inference = NULL;
     async_module->callback = NULL;
     async_module->args = NULL;
-    return DNN_SUCCESS;
+    return 0;
 }
 
 int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module)
@@ -131,12 +131,12 @@ int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module)
     }
 #else
     ret = async_module->start_inference(async_module->args);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         return ret;
     }
     async_module->callback(async_module->args);
 #endif
-    return DNN_SUCCESS;
+    return 0;
 }
 
 DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVFrame **out)
diff --git a/libavfilter/dnn/dnn_backend_common.h b/libavfilter/dnn/dnn_backend_common.h
index fa79caee1f..42c67c7040 100644
--- a/libavfilter/dnn/dnn_backend_common.h
+++ b/libavfilter/dnn/dnn_backend_common.h
@@ -92,7 +92,7 @@ int ff_check_exec_params(void *ctx, DNNBackendType backend, DNNFunctionType func
  * @param async flag for async execution. Must be 0 or 1
  * @param do_ioproc flag for IO processing. Must be 0 or 1
  *
- * @returns DNN_SUCCESS if successful or error code otherwise.
+ * @returns 0 if successful or error code otherwise.
  */
 int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int async, int do_ioproc);
 
@@ -101,7 +101,7 @@ int ff_dnn_fill_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backe
  *
  * @param async_module pointer to DNNAsyncExecModule module
  *
- * @returns DNN_SUCCESS if successful or error code otherwise.
+ * @returns 0 if successful or error code otherwise.
  */
 int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module);
 
@@ -117,7 +117,7 @@ int ff_dnn_async_module_cleanup(DNNAsyncExecModule *async_module);
  * @param ctx pointer to the backend context
  * @param async_module pointer to DNNAsyncExecModule module
  *
- * @returns DNN_SUCCESS on the start of async inference or error code otherwise.
+ * @returns 0 on the start of async inference or error code otherwise.
  */
 int ff_dnn_start_inference_async(void *ctx, DNNAsyncExecModule *async_module);
 
@@ -146,7 +146,7 @@ DNNAsyncStatusType ff_dnn_get_result_common(Queue *task_queue, AVFrame **in, AVF
  * @param input_width width of input frame
  * @param ctx pointer to the backend context
  *
- * @returns DNN_SUCCESS if successful or error code otherwise.
+ * @returns 0 if successful or error code otherwise.
  */
 int ff_dnn_fill_gettingoutput_task(TaskItem *task, DNNExecBaseParams *exec_params, void *backend_model, int input_height, int input_width, void *ctx);
 
diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c
index f29e0e06bd..b53799f04d 100644
--- a/libavfilter/dnn/dnn_backend_native.c
+++ b/libavfilter/dnn/dnn_backend_native.c
@@ -67,7 +67,7 @@ static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
         av_freep(&lltask);
         return AVERROR(ENOMEM);
     }
-    return DNN_SUCCESS;
+    return 0;
 }
 
 static int get_input_native(void *model, DNNData *input, const char *input_name)
@@ -87,7 +87,7 @@ static int get_input_native(void *model, DNNData *input, const char *input_name)
             input->height = oprd->dims[1];
             input->width = oprd->dims[2];
             input->channels = oprd->dims[3];
-            return DNN_SUCCESS;
+            return 0;
         }
     }
 
@@ -112,12 +112,12 @@ static int get_output_native(void *model, const char *input_name, int input_widt
     };
 
     ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, native_model, input_height, input_width, ctx);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         goto err;
     }
 
     ret = extract_lltask_from_task(&task, native_model->lltask_queue);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
         goto err;
     }
@@ -387,7 +387,7 @@ static int execute_model_native(Queue *lltask_queue)
                                                  native_model->layers[layer].output_operand_index,
                                                  native_model->layers[layer].params,
                                                  &native_model->ctx);
-        if (ret != DNN_SUCCESS) {
+        if (ret != 0) {
             av_log(ctx, AV_LOG_ERROR, "Failed to execute model\n");
             goto err;
         }
@@ -451,7 +451,7 @@ int ff_dnn_execute_model_native(const DNNModel *model, DNNExecBaseParams *exec_p
     }
 
     ret = ff_dnn_fill_task(task, exec_params, native_model, ctx->options.async, 1);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         av_freep(&task);
         return ret;
     }
@@ -463,7 +463,7 @@ int ff_dnn_execute_model_native(const DNNModel *model, DNNExecBaseParams *exec_p
     }
 
     ret = extract_lltask_from_task(task, native_model->lltask_queue);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
         return ret;
     }
@@ -477,7 +477,7 @@ int ff_dnn_flush_native(const DNNModel *model)
 
     if (ff_queue_size(native_model->lltask_queue) == 0) {
         // no pending task need to flush
-        return DNN_SUCCESS;
+        return 0;
     }
 
     // for now, use sync node with flush operation
diff --git a/libavfilter/dnn/dnn_backend_native_layer_avgpool.c b/libavfilter/dnn/dnn_backend_native_layer_avgpool.c
index 510a28a8c9..d6fcac8a35 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_avgpool.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_avgpool.c
@@ -143,5 +143,5 @@ int ff_dnn_execute_layer_avg_pool(DnnOperand *operands, const int32_t *input_ope
         }
     }
 
-    return DNN_SUCCESS;
+    return 0;
 }
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
index dfa0d1ed36..2ac37d8855 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
@@ -190,7 +190,7 @@ int ff_dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_opera
 #if HAVE_PTHREAD_CANCEL
     int thread_num = (ctx->options.conv2d_threads <= 0 || ctx->options.conv2d_threads > av_cpu_count())
         ? (av_cpu_count() + 1) : (ctx->options.conv2d_threads);
-    int ret = DNN_SUCCESS, thread_stride;
+    int ret = 0, thread_stride;
     ThreadParam *thread_param;
 #else
     ThreadParam thread_param = { 0 };
@@ -260,6 +260,6 @@ int ff_dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_opera
     thread_param.thread_end = height - pad_size;
     dnn_execute_layer_conv2d_thread(&thread_param);
 
-    return DNN_SUCCESS;
+    return 0;
 #endif
 }
diff --git a/libavfilter/dnn/dnn_backend_native_layer_dense.c b/libavfilter/dnn/dnn_backend_native_layer_dense.c
index a22a484464..dff342c1f3 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_dense.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_dense.c
@@ -147,5 +147,5 @@ int ff_dnn_execute_layer_dense(DnnOperand *operands, const int32_t *input_operan
             output += dense_params->output_num;
         }
     }
-    return DNN_SUCCESS;
+    return 0;
 }
diff --git a/libavfilter/dnn/dnn_backend_native_layer_depth2space.c b/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
index 82b1a52be2..358ac3bcaa 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
@@ -98,5 +98,5 @@ int ff_dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_
         }
         output += output_linesize;
     }
-    return DNN_SUCCESS;
+    return 0;
 }
diff --git a/libavfilter/dnn/dnn_backend_openvino.c b/libavfilter/dnn/dnn_backend_openvino.c
index 2f140e996b..cf012aca4c 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -191,7 +191,7 @@ static int fill_model_input_ov(OVModel *ov_model, OVRequestItem *request)
     }
     ie_blob_free(&input_blob);
 
-    return DNN_SUCCESS;
+    return 0;
 }
 
 static void infer_completion_callback(void *args)
@@ -303,7 +303,7 @@ static void infer_completion_callback(void *args)
 
 static int init_model_ov(OVModel *ov_model, const char *input_name, const char *output_name)
 {
-    int ret = DNN_SUCCESS;
+    int ret = 0;
     OVContext *ctx = &ov_model->ctx;
     IEStatusCode status;
     ie_available_devices_t a_dev;
@@ -433,7 +433,7 @@ static int init_model_ov(OVModel *ov_model, const char *input_name, const char *
         goto err;
     }
 
-    return DNN_SUCCESS;
+    return 0;
 
 err:
     ff_dnn_free_model_ov(&ov_model->model);
@@ -444,7 +444,7 @@ static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
 {
     IEStatusCode status;
     LastLevelTaskItem *lltask;
-    int ret = DNN_SUCCESS;
+    int ret = 0;
     TaskItem *task;
     OVContext *ctx;
     OVModel *ov_model;
@@ -452,7 +452,7 @@ static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
     if (ff_queue_size(inferenceq) == 0) {
         ie_infer_request_free(&request->infer_request);
         av_freep(&request);
-        return DNN_SUCCESS;
+        return 0;
     }
 
     lltask = ff_queue_peek_front(inferenceq);
@@ -462,7 +462,7 @@ static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
 
     if (task->async) {
         ret = fill_model_input_ov(ov_model, request);
-        if (ret != DNN_SUCCESS) {
+        if (ret != 0) {
             goto err;
         }
         status = ie_infer_set_completion_callback(request->infer_request, &request->callback);
@@ -477,10 +477,10 @@ static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
             ret = DNN_GENERIC_ERROR;
             goto err;
         }
-        return DNN_SUCCESS;
+        return 0;
     } else {
         ret = fill_model_input_ov(ov_model, request);
-        if (ret != DNN_SUCCESS) {
+        if (ret != 0) {
             goto err;
         }
         status = ie_infer_request_infer(request->infer_request);
@@ -490,7 +490,7 @@ static int execute_model_ov(OVRequestItem *request, Queue *inferenceq)
             goto err;
         }
         infer_completion_callback(request);
-        return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : DNN_GENERIC_ERROR;
+        return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
     }
 err:
     if (ff_safe_queue_push_back(ov_model->request_queue, request) < 0) {
@@ -537,7 +537,7 @@ static int get_input_ov(void *model, DNNData *input, const char *input_name)
             input->height   = input_resizable ? -1 : dims.dims[2];
             input->width    = input_resizable ? -1 : dims.dims[3];
             input->dt       = precision_to_datatype(precision);
-            return DNN_SUCCESS;
+            return 0;
         } else {
             //incorrect input name
             APPEND_STRING(all_input_names, model_input_name)
@@ -604,7 +604,7 @@ static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Q
             av_freep(&lltask);
             return AVERROR(ENOMEM);
         }
-        return DNN_SUCCESS;
+        return 0;
     }
     case DFT_ANALYTICS_CLASSIFY:
     {
@@ -617,7 +617,7 @@ static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Q
         task->inference_done = 0;
 
         if (!contain_valid_detection_bbox(frame)) {
-            return DNN_SUCCESS;
+            return 0;
         }
 
         sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DETECTION_BBOXES);
@@ -645,7 +645,7 @@ static int extract_lltask_from_task(DNNFunctionType func_type, TaskItem *task, Q
                 return AVERROR(ENOMEM);
             }
         }
-        return DNN_SUCCESS;
+        return 0;
     }
     default:
         av_assert0(!"should not reach here");
@@ -690,19 +690,19 @@ static int get_output_ov(void *model, const char *input_name, int input_width, i
 
     if (!ov_model->exe_network) {
         ret = init_model_ov(ov_model, input_name, output_name);
-        if (ret != DNN_SUCCESS) {
+        if (ret != 0) {
             av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
             return ret;
         }
     }
 
     ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, ov_model, input_height, input_width, ctx);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         goto err;
     }
 
     ret = extract_lltask_from_task(ov_model->model->func_type, &task, ov_model->lltask_queue, NULL);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
         goto err;
     }
@@ -795,7 +795,7 @@ int ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_param
 
     if (!ov_model->exe_network) {
         ret = init_model_ov(ov_model, exec_params->input_name, exec_params->output_names[0]);
-        if (ret != DNN_SUCCESS) {
+        if (ret != 0) {
             av_log(ctx, AV_LOG_ERROR, "Failed init OpenVINO exectuable network or inference request\n");
             return ret;
         }
@@ -808,7 +808,7 @@ int ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_param
     }
 
     ret = ff_dnn_fill_task(task, exec_params, ov_model, ctx->options.async, 1);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         av_freep(&task);
         return ret;
     }
@@ -820,7 +820,7 @@ int ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_param
     }
 
     ret = extract_lltask_from_task(model->func_type, task, ov_model->lltask_queue, exec_params);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
         return ret;
     }
@@ -834,12 +834,12 @@ int ff_dnn_execute_model_ov(const DNNModel *model, DNNExecBaseParams *exec_param
             }
 
             ret = execute_model_ov(request, ov_model->lltask_queue);
-            if (ret != DNN_SUCCESS) {
+            if (ret != 0) {
                 return ret;
             }
         }
 
-        return DNN_SUCCESS;
+        return 0;
     }
     else {
         if (model->func_type == DFT_ANALYTICS_CLASSIFY) {
@@ -879,7 +879,7 @@ int ff_dnn_flush_ov(const DNNModel *model)
 
     if (ff_queue_size(ov_model->lltask_queue) == 0) {
         // no pending task need to flush
-        return DNN_SUCCESS;
+        return 0;
     }
 
     request = ff_safe_queue_pop_front(ov_model->request_queue);
@@ -889,7 +889,7 @@ int ff_dnn_flush_ov(const DNNModel *model)
     }
 
     ret = fill_model_input_ov(ov_model, request);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
         return ret;
     }
@@ -904,7 +904,7 @@ int ff_dnn_flush_ov(const DNNModel *model)
         return DNN_GENERIC_ERROR;
     }
 
-    return DNN_SUCCESS;
+    return 0;
 }
 
 void ff_dnn_free_model_ov(DNNModel **model)
diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index cede1286c3..3b5084b67b 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -151,7 +151,7 @@ static TFInferRequest *tf_create_inference_request(void)
  * Start synchronous inference for the TensorFlow model.
  *
  * @param request pointer to the TFRequestItem for inference
- * @retval DNN_SUCCESS if execution is successful
+ * @retval 0 if execution is successful
  * @retval AVERROR(EINVAL) if request is NULL
  * @retval DNN_GENERIC_ERROR if execution fails
  */
@@ -181,7 +181,7 @@ static int tf_start_inference(void *args)
         }
         return DNN_GENERIC_ERROR;
     }
-    return DNN_SUCCESS;
+    return 0;
 }
 
 /**
@@ -220,7 +220,7 @@ static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue)
         av_freep(&lltask);
         return AVERROR(ENOMEM);
     }
-    return DNN_SUCCESS;
+    return 0;
 }
 
 static TF_Buffer *read_graph(const char *model_filename)
@@ -311,7 +311,7 @@ static int get_input_tf(void *model, DNNData *input, const char *input_name)
     input->width = dims[2];
     input->channels = dims[3];
 
-    return DNN_SUCCESS;
+    return 0;
 }
 
 static int get_output_tf(void *model, const char *input_name, int input_width, int input_height,
@@ -331,12 +331,12 @@ static int get_output_tf(void *model, const char *input_name, int input_width, i
     };
 
     ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, tf_model, input_height, input_width, ctx);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         goto err;
     }
 
     ret = extract_lltask_from_task(&task, tf_model->lltask_queue);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         av_log(ctx, AV_LOG_ERROR, "unable to extract inference from task.\n");
         goto err;
     }
@@ -487,7 +487,7 @@ static int load_tf_model(TFModel *tf_model, const char *model_filename)
         }
     }
 
-    return DNN_SUCCESS;
+    return 0;
 }
 
 #define NAME_BUFFER_SIZE 256
@@ -606,7 +606,7 @@ static int add_conv_layer(TFModel *tf_model, TF_Operation *transpose_op, TF_Oper
         goto err;
     }
 
-    return DNN_SUCCESS;
+    return 0;
 err:
     TF_DeleteTensor(kernel_tensor);
     TF_DeleteTensor(biases_tensor);
@@ -635,7 +635,7 @@ static int add_depth_to_space_layer(TFModel *tf_model, TF_Operation **cur_op,
         return DNN_GENERIC_ERROR;
     }
 
-    return DNN_SUCCESS;
+    return 0;
 }
 
 static int add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
@@ -693,7 +693,7 @@ static int add_pad_layer(TFModel *tf_model, TF_Operation **cur_op,
         return DNN_GENERIC_ERROR;
     }
 
-    return DNN_SUCCESS;
+    return 0;
 }
 
 static int add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
@@ -742,7 +742,7 @@ static int add_maximum_layer(TFModel *tf_model, TF_Operation **cur_op,
         return DNN_GENERIC_ERROR;
     }
 
-    return DNN_SUCCESS;
+    return 0;
 }
 
 static int load_native_model(TFModel *tf_model, const char *model_filename)
@@ -808,7 +808,7 @@ static int load_native_model(TFModel *tf_model, const char *model_filename)
     for (layer = 0; layer < native_model->layers_num; ++layer){
         switch (native_model->layers[layer].type){
         case DLT_INPUT:
-            layer_add_res = DNN_SUCCESS;
+            layer_add_res = 0;
             break;
         case DLT_CONV2D:
             layer_add_res = add_conv_layer(tf_model, transpose_op, &op,
@@ -830,7 +830,7 @@ static int load_native_model(TFModel *tf_model, const char *model_filename)
             CLEANUP_ON_ERROR(tf_model);
         }
 
-        if (layer_add_res != DNN_SUCCESS){
+        if (layer_add_res != 0){
             CLEANUP_ON_ERROR(tf_model);
         }
     }
@@ -846,7 +846,7 @@ static int load_native_model(TFModel *tf_model, const char *model_filename)
 
     ff_dnn_free_model_native(&model);
 
-    return DNN_SUCCESS;
+    return 0;
 }
 
 DNNModel *ff_dnn_load_model_tf(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx)
@@ -876,8 +876,8 @@ DNNModel *ff_dnn_load_model_tf(const char *model_filename, DNNFunctionType func_
         goto err;
     }
 
-    if (load_tf_model(tf_model, model_filename) != DNN_SUCCESS){
-        if (load_native_model(tf_model, model_filename) != DNN_SUCCESS){
+    if (load_tf_model(tf_model, model_filename) != 0){
+        if (load_native_model(tf_model, model_filename) != 0){
             goto err;
         }
     }
@@ -958,7 +958,7 @@ static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
     request->lltask = lltask;
 
     ret = get_input_tf(tf_model, &input, task->input_name);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         goto err;
     }
 
@@ -1032,7 +1032,7 @@ static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) {
         infer_request->tf_outputs[i].index = 0;
     }
 
-    return DNN_SUCCESS;
+    return 0;
 err:
     tf_free_request(infer_request);
     return ret;
@@ -1106,7 +1106,7 @@ static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
 
     if (ff_queue_size(lltask_queue) == 0) {
         destroy_request_item(&request);
-        return DNN_SUCCESS;
+        return 0;
     }
 
     lltask = ff_queue_peek_front(lltask_queue);
@@ -1115,23 +1115,23 @@ static int execute_model_tf(TFRequestItem *request, Queue *lltask_queue)
     ctx = &tf_model->ctx;
 
     ret = fill_model_input_tf(tf_model, request);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         goto err;
     }
 
     if (task->async) {
-        if (ff_dnn_start_inference_async(ctx, &request->exec_module) != DNN_SUCCESS) {
+        if (ff_dnn_start_inference_async(ctx, &request->exec_module) != 0) {
             goto err;
         }
-        return DNN_SUCCESS;
+        return 0;
     }
     else {
         ret = tf_start_inference(request);
-        if (ret != DNN_SUCCESS) {
+        if (ret != 0) {
             goto err;
         }
         infer_completion_callback(request);
-        return (task->inference_done == task->inference_todo) ? DNN_SUCCESS : DNN_GENERIC_ERROR;
+        return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
     }
 err:
     tf_free_request(request->infer_request);
@@ -1161,7 +1161,7 @@ int ff_dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_param
     }
 
     ret = ff_dnn_fill_task(task, exec_params, tf_model, ctx->options.async, 1);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         av_freep(&task);
         return ret;
     }
@@ -1173,7 +1173,7 @@ int ff_dnn_execute_model_tf(const DNNModel *model, DNNExecBaseParams *exec_param
     }
 
     ret = extract_lltask_from_task(task, tf_model->lltask_queue);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
         return ret;
     }
@@ -1201,7 +1201,7 @@ int ff_dnn_flush_tf(const DNNModel *model)
 
     if (ff_queue_size(tf_model->lltask_queue) == 0) {
         // no pending task need to flush
-        return DNN_SUCCESS;
+        return 0;
     }
 
     request = ff_safe_queue_pop_front(tf_model->request_queue);
@@ -1211,7 +1211,7 @@ int ff_dnn_flush_tf(const DNNModel *model)
     }
 
     ret = fill_model_input_tf(tf_model, request);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         av_log(ctx, AV_LOG_ERROR, "Failed to fill model input.\n");
         if (ff_safe_queue_push_back(tf_model->request_queue, request) < 0) {
             destroy_request_item(&request);
diff --git a/libavfilter/dnn/dnn_io_proc.c b/libavfilter/dnn/dnn_io_proc.c
index 36cc051e5e..7961bf6b95 100644
--- a/libavfilter/dnn/dnn_io_proc.c
+++ b/libavfilter/dnn/dnn_io_proc.c
@@ -57,12 +57,12 @@ int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
                            (const int[4]){frame->width * 3 * sizeof(float), 0, 0, 0}, 0, frame->height,
                            (uint8_t * const*)frame->data, frame->linesize);
         sws_freeContext(sws_ctx);
-        return DNN_SUCCESS;
+        return 0;
     case AV_PIX_FMT_GRAYF32:
         av_image_copy_plane(frame->data[0], frame->linesize[0],
                             output->data, bytewidth,
                             bytewidth, frame->height);
-        return DNN_SUCCESS;
+        return 0;
     case AV_PIX_FMT_YUV420P:
     case AV_PIX_FMT_YUV422P:
     case AV_PIX_FMT_YUV444P:
@@ -88,13 +88,13 @@ int ff_proc_from_dnn_to_frame(AVFrame *frame, DNNData *output, void *log_ctx)
                            (const int[4]){frame->width * sizeof(float), 0, 0, 0}, 0, frame->height,
                            (uint8_t * const*)frame->data, frame->linesize);
         sws_freeContext(sws_ctx);
-        return DNN_SUCCESS;
+        return 0;
     default:
         avpriv_report_missing_feature(log_ctx, "%s", av_get_pix_fmt_name(frame->format));
         return AVERROR(ENOSYS);
     }
 
-    return DNN_SUCCESS;
+    return 0;
 }
 
 int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
@@ -169,7 +169,7 @@ int ff_proc_from_frame_to_dnn(AVFrame *frame, DNNData *input, void *log_ctx)
         return AVERROR(ENOSYS);
     }
 
-    return DNN_SUCCESS;
+    return 0;
 }
 
 static enum AVPixelFormat get_pixel_format(DNNData *data)
@@ -197,7 +197,7 @@ int ff_frame_to_dnn_classify(AVFrame *frame, DNNData *input, uint32_t bbox_index
     uint8_t *bbox_data[4];
     struct SwsContext *sws_ctx;
     int linesizes[4];
-    int ret = DNN_SUCCESS;
+    int ret = 0;
     enum AVPixelFormat fmt;
     int left, top, width, height;
     const AVDetectionBBoxHeader *header;
@@ -255,7 +255,7 @@ int ff_frame_to_dnn_detect(AVFrame *frame, DNNData *input, void *log_ctx)
 {
     struct SwsContext *sws_ctx;
     int linesizes[4];
-    int ret = DNN_SUCCESS;
+    int ret = 0;
     enum AVPixelFormat fmt = get_pixel_format(input);
     sws_ctx = sws_getContext(frame->width, frame->height, frame->format,
                              input->width, input->height, fmt,
diff --git a/libavfilter/dnn_interface.h b/libavfilter/dnn_interface.h
index 06e71f7946..ef8d7ae66f 100644
--- a/libavfilter/dnn_interface.h
+++ b/libavfilter/dnn_interface.h
@@ -32,8 +32,6 @@
 
 #define DNN_GENERIC_ERROR FFERRTAG('D','N','N','!')
 
-typedef enum {DNN_SUCCESS, DNN_ERROR} DNNReturnType;
-
 typedef enum {DNN_NATIVE, DNN_TF, DNN_OV} DNNBackendType;
 
 typedef enum {DNN_FLOAT = 1, DNN_UINT8 = 4} DNNDataType;
diff --git a/libavfilter/vf_derain.c b/libavfilter/vf_derain.c
index 6758cc05d2..86e9eb8752 100644
--- a/libavfilter/vf_derain.c
+++ b/libavfilter/vf_derain.c
@@ -74,7 +74,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
     av_frame_copy_props(out, in);
 
     dnn_result = ff_dnn_execute_model(&dr_context->dnnctx, in, out);
-    if (dnn_result != DNN_SUCCESS){
+    if (dnn_result != 0){
         av_log(ctx, AV_LOG_ERROR, "failed to execute model\n");
         av_frame_free(&in);
         return dnn_result;
diff --git a/libavfilter/vf_dnn_classify.c b/libavfilter/vf_dnn_classify.c
index 5c6942d86a..c612ba8e80 100644
--- a/libavfilter/vf_dnn_classify.c
+++ b/libavfilter/vf_dnn_classify.c
@@ -213,7 +213,7 @@ static int dnn_classify_flush_frame(AVFilterLink *outlink, int64_t pts, int64_t
     DNNAsyncStatusType async_state;
 
     ret = ff_dnn_flush(&ctx->dnnctx);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         return -1;
     }
 
@@ -253,7 +253,7 @@ static int dnn_classify_activate(AVFilterContext *filter_ctx)
         if (ret < 0)
             return ret;
         if (ret > 0) {
-            if (ff_dnn_execute_model_classification(&ctx->dnnctx, in, NULL, ctx->target) != DNN_SUCCESS) {
+            if (ff_dnn_execute_model_classification(&ctx->dnnctx, in, NULL, ctx->target) != 0) {
                 return AVERROR(EIO);
             }
         }
diff --git a/libavfilter/vf_dnn_detect.c b/libavfilter/vf_dnn_detect.c
index 51f8b430df..dd4507250f 100644
--- a/libavfilter/vf_dnn_detect.c
+++ b/libavfilter/vf_dnn_detect.c
@@ -356,7 +356,7 @@ static int dnn_detect_flush_frame(AVFilterLink *outlink, int64_t pts, int64_t *o
     DNNAsyncStatusType async_state;
 
     ret = ff_dnn_flush(&ctx->dnnctx);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         return -1;
     }
 
@@ -396,7 +396,7 @@ static int dnn_detect_activate(AVFilterContext *filter_ctx)
         if (ret < 0)
             return ret;
         if (ret > 0) {
-            if (ff_dnn_execute_model(&ctx->dnnctx, in, NULL) != DNN_SUCCESS) {
+            if (ff_dnn_execute_model(&ctx->dnnctx, in, NULL) != 0) {
                 return AVERROR(EIO);
             }
         }
diff --git a/libavfilter/vf_dnn_processing.c b/libavfilter/vf_dnn_processing.c
index 4a1ff5898f..cac096a19f 100644
--- a/libavfilter/vf_dnn_processing.c
+++ b/libavfilter/vf_dnn_processing.c
@@ -139,7 +139,7 @@ static int config_input(AVFilterLink *inlink)
     int check;
 
     result = ff_dnn_get_input(&ctx->dnnctx, &model_input);
-    if (result != DNN_SUCCESS) {
+    if (result != 0) {
         av_log(ctx, AV_LOG_ERROR, "could not get input from the model\n");
         return result;
     }
@@ -199,7 +199,7 @@ static int config_output(AVFilterLink *outlink)
 
     // have a try run in case that the dnn model resize the frame
     result = ff_dnn_get_output(&ctx->dnnctx, inlink->w, inlink->h, &outlink->w, &outlink->h);
-    if (result != DNN_SUCCESS) {
+    if (result != 0) {
         av_log(ctx, AV_LOG_ERROR, "could not get output from the model\n");
         return result;
     }
@@ -247,7 +247,7 @@ static int flush_frame(AVFilterLink *outlink, int64_t pts, int64_t *out_pts)
     DNNAsyncStatusType async_state;
 
     ret = ff_dnn_flush(&ctx->dnnctx);
-    if (ret != DNN_SUCCESS) {
+    if (ret != 0) {
         return -1;
     }
 
@@ -296,7 +296,7 @@ static int activate(AVFilterContext *filter_ctx)
                 return AVERROR(ENOMEM);
             }
             av_frame_copy_props(out, in);
-            if (ff_dnn_execute_model(&ctx->dnnctx, in, out) != DNN_SUCCESS) {
+            if (ff_dnn_execute_model(&ctx->dnnctx, in, out) != 0) {
                 return AVERROR(EIO);
             }
         }
diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c
index 02d9452681..0890c8ba18 100644
--- a/libavfilter/vf_sr.c
+++ b/libavfilter/vf_sr.c
@@ -82,7 +82,7 @@ static int config_output(AVFilterLink *outlink)
 
     // have a try run in case that the dnn model resize the frame
     result = ff_dnn_get_output(&ctx->dnnctx, inlink->w, inlink->h, &out_width, &out_height);
-    if (result != DNN_SUCCESS) {
+    if (result != 0) {
         av_log(ctx, AV_LOG_ERROR, "could not get output from the model\n");
         return result;
     }
@@ -139,7 +139,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
         dnn_result = ff_dnn_execute_model(&ctx->dnnctx, in, out);
     }
 
-    if (dnn_result != DNN_SUCCESS){
+    if (dnn_result != 0){
         av_log(ctx, AV_LOG_ERROR, "failed to execute loaded model\n");
         av_frame_free(&in);
         av_frame_free(&out);
-- 
2.32.0

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [FFmpeg-devel] [PATCH V2 8/8] libavfilter: Remove DNNReturnType from DNN Module
  2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 8/8] libavfilter: Remove DNNReturnType from DNN Module Shubhanshu Saxena
@ 2022-03-08 14:18   ` Guo, Yejun
  0 siblings, 0 replies; 9+ messages in thread
From: Guo, Yejun @ 2022-03-08 14:18 UTC (permalink / raw)
  To: FFmpeg development discussions and patches; +Cc: Shubhanshu Saxena



-----Original Message-----
From: ffmpeg-devel <ffmpeg-devel-bounces@ffmpeg.org> On Behalf Of Shubhanshu Saxena
Sent: 2022年3月3日 2:06
To: ffmpeg-devel@ffmpeg.org
Cc: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
Subject: [FFmpeg-devel] [PATCH V2 8/8] libavfilter: Remove DNNReturnType from DNN Module

This patch removes all occurences of DNNReturnType from the DNN module.
This commit replaces DNN_SUCCESS by 0 (essentially the same), so the functions with DNNReturnType now return 0 in case of success, the negative values otherwise.

Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com>
---
 libavfilter/dnn/dnn_backend_common.c          | 10 ++--
 libavfilter/dnn/dnn_backend_common.h          |  8 +--
 libavfilter/dnn/dnn_backend_native.c          | 16 +++---
 .../dnn/dnn_backend_native_layer_avgpool.c    |  2 +-
 .../dnn/dnn_backend_native_layer_conv2d.c     |  4 +-
 .../dnn/dnn_backend_native_layer_dense.c      |  2 +-
 .../dnn_backend_native_layer_depth2space.c    |  2 +-
 libavfilter/dnn/dnn_backend_openvino.c        | 48 ++++++++--------
 libavfilter/dnn/dnn_backend_tf.c              | 56 +++++++++----------
 libavfilter/dnn/dnn_io_proc.c                 | 14 ++---
 libavfilter/dnn_interface.h                   |  2 -
 libavfilter/vf_derain.c                       |  2 +-
 libavfilter/vf_dnn_classify.c                 |  4 +-
 libavfilter/vf_dnn_detect.c                   |  4 +-
 libavfilter/vf_dnn_processing.c               |  8 +--
 libavfilter/vf_sr.c                           |  4 +-
 16 files changed, 92 insertions(+), 94 deletions(-)

Thanks, LGTM, will push soon.
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2022-03-08 14:19 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-03-02 18:05 [FFmpeg-devel] [PATCH V2 1/8] libavfilter: Prepare to handle specific error codes in DNN Filters Shubhanshu Saxena
2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 2/8] lavfi/dnn: Error Specificity in Native Backend Layers Shubhanshu Saxena
2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 3/8] lavfi/dnn_io_proc: Return Specific Error Codes Shubhanshu Saxena
2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 4/8] lavfi/dnn_backend_openvino: " Shubhanshu Saxena
2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 5/8] lavfi/dnn_backend_tf: " Shubhanshu Saxena
2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 6/8] lavfi/dnn_backend_native: " Shubhanshu Saxena
2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 7/8] lavfi/dnn_backend_common: Return specific error codes Shubhanshu Saxena
2022-03-02 18:05 ` [FFmpeg-devel] [PATCH V2 8/8] libavfilter: Remove DNNReturnType from DNN Module Shubhanshu Saxena
2022-03-08 14:18   ` Guo, Yejun

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git