From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org [79.124.17.100]) by master.gitmailbox.com (Postfix) with ESMTP id 9293F4050A for ; Thu, 24 Feb 2022 08:24:03 +0000 (UTC) Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id C3CB768B2E9; Thu, 24 Feb 2022 10:23:36 +0200 (EET) Received: from mail-pl1-f179.google.com (mail-pl1-f179.google.com [209.85.214.179]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id 8C55568B2C7 for ; Thu, 24 Feb 2022 10:23:35 +0200 (EET) Received: by mail-pl1-f179.google.com with SMTP id l9so619693pls.6 for ; Thu, 24 Feb 2022 00:23:35 -0800 (PST) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; h=from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=bd3gqszz7gs6LIL2BL1HsH705IubcRj7h7U8+wgbW3o=; b=H8Psyr2y5dhetIde1fWY4unzt4tgTjoJtNSRoMB4EGwFVV6XjZ5WRq70gx6tRTH/8G zEkLXWQI380uTX71sNdh5MoDGRtc2IWr02kvbi6UQbbwZx5EXq88VN/xOikDxobs+IZS 90D17w5XJz0GbqzABR6ylPeXk407Tkwtk7OsRpUPbBxeQ/SjR+JXGfhog5+/0inyjdfJ nHiR6bAanBQT3Dws5BNF1aYbP7hRNxSwKQwA9CZBxgYChJAjhLUYw4DZTmFOwKEwuDd3 Oe00A0G5Caf4irqBWtUcRyOrGJm65wxFo7hbYWRYwyhWEVO51pingYtDEG7E1yi4yLKd mNAQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; h=x-gm-message-state:from:to:cc:subject:date:message-id:in-reply-to :references:mime-version:content-transfer-encoding; bh=bd3gqszz7gs6LIL2BL1HsH705IubcRj7h7U8+wgbW3o=; b=zGHGjJqls4BWgumrZHh3BLR+UIRSnONRYGYX62NgAFYRGp6Wh5K42sUE8mA+I7yF0i ZGZZnKo7yrb5qmE1EPJbvEZUGH/3Gz8gdHXwgJgcODjQeLu9iVY/ALJ7iUZ8xQwoh13a Qw8Ep6RdcrtiiAl4z5wZSx57/4tyKkPXxp6Ze2/703R75Qo/Sr1uKyO4LYFL1XCspHq7 CjOhDkGs9QKIkKxnqV0Gbx5I4xkTBUKz9m6nMDVjtihPWe45F71CLBx17oefHa1/0u8Z AYYpQMTl1eac3C4U+HdfbV7Ym4Do3x9qRLieEW2fpCQhUtCcTnaAEbrzpmSvfE3K6k43 cZgw== X-Gm-Message-State: AOAM530BRdfYViMAEFmxlUykoCX69c6YgeMppUsABryfjmphdvFQQhr4 +hEmEoYenuvij72sZUOIZgTOxkt/JAQb0w== X-Google-Smtp-Source: ABdhPJz4qUb0cS531l1QF+Zj8D7wLTXHukfMfmSy15ZVTxSdWDRy6QHTt8O+iNWh1qkSa2I/aNJPTw== X-Received: by 2002:a17:902:cf02:b0:14d:54cf:9a6a with SMTP id i2-20020a170902cf0200b0014d54cf9a6amr1438268plg.137.1645691013412; Thu, 24 Feb 2022 00:23:33 -0800 (PST) Received: from Pavilion-x360.bbrouter ([103.133.123.155]) by smtp.googlemail.com with ESMTPSA id k11sm2174503pff.169.2022.02.24.00.23.28 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Thu, 24 Feb 2022 00:23:33 -0800 (PST) From: Shubhanshu Saxena To: ffmpeg-devel@ffmpeg.org Date: Thu, 24 Feb 2022 13:52:41 +0530 Message-Id: <20220224082243.11208-6-shubhanshu.e01@gmail.com> X-Mailer: git-send-email 2.32.0 In-Reply-To: <20220224082243.11208-1-shubhanshu.e01@gmail.com> References: <20220224082243.11208-1-shubhanshu.e01@gmail.com> MIME-Version: 1.0 Subject: [FFmpeg-devel] [PATCH 6/8] lavfi/dnn_backend_native: Return Specific Error Codes X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches Cc: Shubhanshu Saxena Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" Archived-At: List-Archive: List-Post: Switch to returning specific error codes or DNN_GENERIC_ERROR when an error is encountered. Signed-off-by: Shubhanshu Saxena --- libavfilter/dnn/dnn_backend_native.c | 82 +++++++++++++++------------- libavfilter/dnn/dnn_backend_native.h | 4 +- 2 files changed, 45 insertions(+), 41 deletions(-) diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c index 13436c0484..f29e0e06bd 100644 --- a/libavfilter/dnn/dnn_backend_native.c +++ b/libavfilter/dnn/dnn_backend_native.c @@ -46,9 +46,9 @@ static const AVClass dnn_native_class = { .category = AV_CLASS_CATEGORY_FILTER, }; -static DNNReturnType execute_model_native(Queue *lltask_queue); +static int execute_model_native(Queue *lltask_queue); -static DNNReturnType extract_lltask_from_task(TaskItem *task, Queue *lltask_queue) +static int extract_lltask_from_task(TaskItem *task, Queue *lltask_queue) { NativeModel *native_model = task->model; NativeContext *ctx = &native_model->ctx; @@ -56,7 +56,7 @@ static DNNReturnType extract_lltask_from_task(TaskItem *task, Queue *lltask_queu if (!lltask) { av_log(ctx, AV_LOG_ERROR, "Unable to allocate space for LastLevelTaskItem\n"); - return DNN_ERROR; + return AVERROR(ENOMEM); } task->inference_todo = 1; task->inference_done = 0; @@ -65,12 +65,12 @@ static DNNReturnType extract_lltask_from_task(TaskItem *task, Queue *lltask_queu if (ff_queue_push_back(lltask_queue, lltask) < 0) { av_log(ctx, AV_LOG_ERROR, "Failed to push back lltask_queue.\n"); av_freep(&lltask); - return DNN_ERROR; + return AVERROR(ENOMEM); } return DNN_SUCCESS; } -static DNNReturnType get_input_native(void *model, DNNData *input, const char *input_name) +static int get_input_native(void *model, DNNData *input, const char *input_name) { NativeModel *native_model = model; NativeContext *ctx = &native_model->ctx; @@ -80,7 +80,7 @@ static DNNReturnType get_input_native(void *model, DNNData *input, const char *i if (strcmp(oprd->name, input_name) == 0) { if (oprd->type != DOT_INPUT) { av_log(ctx, AV_LOG_ERROR, "Found \"%s\" in model, but it is not input node\n", input_name); - return DNN_ERROR; + return AVERROR(EINVAL); } input->dt = oprd->data_type; av_assert0(oprd->dims[0] == 1); @@ -93,13 +93,13 @@ static DNNReturnType get_input_native(void *model, DNNData *input, const char *i // do not find the input operand av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", input_name); - return DNN_ERROR; + return AVERROR(EINVAL); } -static DNNReturnType get_output_native(void *model, const char *input_name, int input_width, int input_height, +static int get_output_native(void *model, const char *input_name, int input_width, int input_height, const char *output_name, int *output_width, int *output_height) { - DNNReturnType ret = 0; + int ret = 0; NativeModel *native_model = model; NativeContext *ctx = &native_model->ctx; TaskItem task; @@ -111,14 +111,14 @@ static DNNReturnType get_output_native(void *model, const char *input_name, int .out_frame = NULL, }; - if (ff_dnn_fill_gettingoutput_task(&task, &exec_params, native_model, input_height, input_width, ctx) != DNN_SUCCESS) { - ret = DNN_ERROR; + ret = ff_dnn_fill_gettingoutput_task(&task, &exec_params, native_model, input_height, input_width, ctx); + if (ret != DNN_SUCCESS) { goto err; } - if (extract_lltask_from_task(&task, native_model->lltask_queue) != DNN_SUCCESS) { + ret = extract_lltask_from_task(&task, native_model->lltask_queue); + if (ret != DNN_SUCCESS) { av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n"); - ret = DNN_ERROR; goto err; } @@ -297,7 +297,7 @@ fail: return NULL; } -static DNNReturnType execute_model_native(Queue *lltask_queue) +static int execute_model_native(Queue *lltask_queue) { NativeModel *native_model = NULL; NativeContext *ctx = NULL; @@ -306,12 +306,12 @@ static DNNReturnType execute_model_native(Queue *lltask_queue) DnnOperand *oprd = NULL; LastLevelTaskItem *lltask = NULL; TaskItem *task = NULL; - DNNReturnType ret = 0; + int ret = 0; lltask = ff_queue_pop_front(lltask_queue); if (!lltask) { av_log(NULL, AV_LOG_ERROR, "Failed to get LastLevelTaskItem\n"); - ret = DNN_ERROR; + ret = AVERROR(EINVAL); goto err; } task = lltask->task; @@ -320,7 +320,7 @@ static DNNReturnType execute_model_native(Queue *lltask_queue) if (native_model->layers_num <= 0 || native_model->operands_num <= 0) { av_log(ctx, AV_LOG_ERROR, "No operands or layers in model\n"); - ret = DNN_ERROR; + ret = AVERROR(EINVAL); goto err; } @@ -329,7 +329,7 @@ static DNNReturnType execute_model_native(Queue *lltask_queue) if (strcmp(oprd->name, task->input_name) == 0) { if (oprd->type != DOT_INPUT) { av_log(ctx, AV_LOG_ERROR, "Found \"%s\" in model, but it is not input node\n", task->input_name); - ret = DNN_ERROR; + ret = AVERROR(EINVAL); goto err; } break; @@ -338,7 +338,7 @@ static DNNReturnType execute_model_native(Queue *lltask_queue) } if (!oprd) { av_log(ctx, AV_LOG_ERROR, "Could not find \"%s\" in model\n", task->input_name); - ret = DNN_ERROR; + ret = AVERROR(EINVAL); goto err; } @@ -349,13 +349,13 @@ static DNNReturnType execute_model_native(Queue *lltask_queue) oprd->length = ff_calculate_operand_data_length(oprd); if (oprd->length <= 0) { av_log(ctx, AV_LOG_ERROR, "The input data length overflow\n"); - ret = DNN_ERROR; + ret = AVERROR(EINVAL); goto err; } oprd->data = av_malloc(oprd->length); if (!oprd->data) { av_log(ctx, AV_LOG_ERROR, "Failed to malloc memory for input data\n"); - ret = DNN_ERROR; + ret = AVERROR(ENOMEM); goto err; } @@ -376,19 +376,19 @@ static DNNReturnType execute_model_native(Queue *lltask_queue) // currently, the filter does not need multiple outputs, // so we just pending the support until we really need it. avpriv_report_missing_feature(ctx, "multiple outputs"); - ret = DNN_ERROR; + ret = AVERROR(ENOSYS); goto err; } for (layer = 0; layer < native_model->layers_num; ++layer){ DNNLayerType layer_type = native_model->layers[layer].type; - if (ff_layer_funcs[layer_type].pf_exec(native_model->operands, - native_model->layers[layer].input_operand_indexes, - native_model->layers[layer].output_operand_index, - native_model->layers[layer].params, - &native_model->ctx) == DNN_ERROR) { + ret = ff_layer_funcs[layer_type].pf_exec(native_model->operands, + native_model->layers[layer].input_operand_indexes, + native_model->layers[layer].output_operand_index, + native_model->layers[layer].params, + &native_model->ctx); + if (ret != DNN_SUCCESS) { av_log(ctx, AV_LOG_ERROR, "Failed to execute model\n"); - ret = DNN_ERROR; goto err; } } @@ -405,7 +405,7 @@ static DNNReturnType execute_model_native(Queue *lltask_queue) if (oprd == NULL) { av_log(ctx, AV_LOG_ERROR, "Could not find output in model\n"); - ret = DNN_ERROR; + ret = AVERROR(EINVAL); goto err; } @@ -432,42 +432,46 @@ err: return ret; } -DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNExecBaseParams *exec_params) +int ff_dnn_execute_model_native(const DNNModel *model, DNNExecBaseParams *exec_params) { NativeModel *native_model = model->model; NativeContext *ctx = &native_model->ctx; TaskItem *task; + int ret = 0; - if (ff_check_exec_params(ctx, DNN_NATIVE, model->func_type, exec_params) != 0) { - return DNN_ERROR; + ret = ff_check_exec_params(ctx, DNN_NATIVE, model->func_type, exec_params); + if (ret != 0) { + return ret; } task = av_malloc(sizeof(*task)); if (!task) { av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n"); - return DNN_ERROR; + return AVERROR(ENOMEM); } - if (ff_dnn_fill_task(task, exec_params, native_model, ctx->options.async, 1) != DNN_SUCCESS) { + ret = ff_dnn_fill_task(task, exec_params, native_model, ctx->options.async, 1); + if (ret != DNN_SUCCESS) { av_freep(&task); - return DNN_ERROR; + return ret; } if (ff_queue_push_back(native_model->task_queue, task) < 0) { av_freep(&task); av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n"); - return DNN_ERROR; + return AVERROR(ENOMEM); } - if (extract_lltask_from_task(task, native_model->lltask_queue) != DNN_SUCCESS) { + ret = extract_lltask_from_task(task, native_model->lltask_queue); + if (ret != DNN_SUCCESS) { av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n"); - return DNN_ERROR; + return ret; } return execute_model_native(native_model->lltask_queue); } -DNNReturnType ff_dnn_flush_native(const DNNModel *model) +int ff_dnn_flush_native(const DNNModel *model) { NativeModel *native_model = model->model; diff --git a/libavfilter/dnn/dnn_backend_native.h b/libavfilter/dnn/dnn_backend_native.h index e8017ee4b4..75bd9a44f7 100644 --- a/libavfilter/dnn/dnn_backend_native.h +++ b/libavfilter/dnn/dnn_backend_native.h @@ -134,11 +134,11 @@ typedef struct NativeModel{ DNNModel *ff_dnn_load_model_native(const char *model_filename, DNNFunctionType func_type, const char *options, AVFilterContext *filter_ctx); -DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, DNNExecBaseParams *exec_params); +int ff_dnn_execute_model_native(const DNNModel *model, DNNExecBaseParams *exec_params); DNNAsyncStatusType ff_dnn_get_result_native(const DNNModel *model, AVFrame **in, AVFrame **out); -DNNReturnType ff_dnn_flush_native(const DNNModel *model); +int ff_dnn_flush_native(const DNNModel *model); void ff_dnn_free_model_native(DNNModel **model); -- 2.32.0 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".