From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org [79.124.17.100]) by master.gitmailbox.com (Postfix) with ESMTPS id 7C7BB4E307 for ; Mon, 10 Mar 2025 19:55:03 +0000 (UTC) Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id C5F8368E0BA; Mon, 10 Mar 2025 21:54:46 +0200 (EET) Received: from mail-wm1-f41.google.com (mail-wm1-f41.google.com [209.85.128.41]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id 7A48268DFEE for ; Mon, 10 Mar 2025 21:54:45 +0200 (EET) Received: by mail-wm1-f41.google.com with SMTP id 5b1f17b1804b1-43cf58eea0fso9412575e9.0 for ; Mon, 10 Mar 2025 12:54:45 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20230601; t=1741636484; x=1742241284; darn=ffmpeg.org; h=thread-index:content-language:content-transfer-encoding :mime-version:message-id:date:subject:to:from:from:to:cc:subject :date:message-id:reply-to; bh=9jjGjHUw1s4JgHkwxXNTtghbqG8VxnbK/jVmf4WXMRY=; b=dCPaJH8pYF9iRZ7ZhwJUd5qTmsyUd8ib6SYOOPvXVcw0gUil+heDJrYkvr/uGrn3Xx wYSHbQi1AWwaiwpzcJYabusocCycBnkArll3TV9LJlU/7I5AE/JbNT0kcNognnqJuMVN y9K6scJntfqm1mQIni12dBJZGoaKDzbqDZsLZkz0mT58hTQcy6x9ZYiGdpldco+WJS5V IZ0yeZdvBF/WveBvbC4w6VwYahptLG7sWK6852c16Kj71DT76QMpC6vF6lI/IbgmQtHZ rEYqVroDzR3HE0zzZ3f0zNg2j4epM7UwuKqq/HwFQ74u9sQlH3OaUWU4F2+76AFGT6Xq 2UtQ== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1741636484; x=1742241284; h=thread-index:content-language:content-transfer-encoding :mime-version:message-id:date:subject:to:from:x-gm-message-state :from:to:cc:subject:date:message-id:reply-to; bh=9jjGjHUw1s4JgHkwxXNTtghbqG8VxnbK/jVmf4WXMRY=; b=rzFFmDZoVXHw+PNo0CIjjCLMAVShFR++yqhuBaDwPJKFGDKpuspqWfNnfSarT5R6xh lOHmNYhs+NGiMKGLCu8w3SRn06B7RuoG+vaUl1ouQAwfT8OEehNAcO2tF4SdDh5OvP9y iz0yRiIY0/Z2Lu91QfLM6kdm4UAeKuSTOXkbHbQ6KCHt624sadfvzmVTTwmFyNUaO4ep aMtwnPECPnpH877Ljr0MH44SA1QslhOFDpk7ygsp6FzFZwBalvs9XSTwj6dSknyAtFtb 5kIwaJaV99bYwJl82QlOiVpFDGfyROrWi9ecKlYlyWMNqnMIplm/M8z6QpMiuD6J/tLm aXlw== X-Gm-Message-State: AOJu0Ywd2gXB3belarT3UHjKJhb1s4SdoNHFg0WEf5/y/kk8PJ39TJUr a8tlXd3HgHKAEty3kty0X22oCFKiAIc6EKAbzQ2dL/quDGequ5pRXofD9Q== X-Gm-Gg: ASbGncuEHNnInsQGJ87MX08zOPv9Tp/w+c6bF9rs1lidSeyoY/smZpyn/qeMUQU6TCP ulQ+9RD1dFI9w8cvh9hMRXVdjAicYvqEMd5ixw8GiEkTA4yIXs+fo3PNSpto5zX0VcCSZN9TYJ7 G2v9WL7UAqOkUtuRThQTKFOG9FUDPxubN85fI/hmv+KjrVCwuMqYUXuUFgDJUeB0duK3QG6FzXf IBiRr4XFhOBybQrtd6X86OJrjKkRRS7In9awMqFKbpra6692ic8cyg43fV2rlXCtL4X4y/OvcxX W+PV6IYlEVKgo40f9+ORWcw2R3mGMOqpUqnUSLeqFt+dWAyhENEqSN9KxuGUBMEyyGlHG068v+7 /QHlEf+700OpVKS5/ X-Google-Smtp-Source: AGHT+IGnjTk0a0mfiBjwuGv227uRP9Dk+SEbZzgl1Av+K6Mpiq2gJfY0FC3kKP1PDl4FoLEMv+d8HA== X-Received: by 2002:a05:600c:5248:b0:43c:f4b3:b094 with SMTP id 5b1f17b1804b1-43d01bd1b62mr13380885e9.6.1741636484091; Mon, 10 Mar 2025 12:54:44 -0700 (PDT) Received: from MK2 (80-108-16-220.cable.dynamic.surfer.at. [80.108.16.220]) by smtp.gmail.com with ESMTPSA id 5b1f17b1804b1-43ce5c35709sm96976465e9.19.2025.03.10.12.54.43 for (version=TLS1_2 cipher=ECDHE-ECDSA-AES128-GCM-SHA256 bits=128/128); Mon, 10 Mar 2025 12:54:43 -0700 (PDT) From: To: Date: Mon, 10 Mar 2025 20:54:43 +0100 Message-ID: <004101db91f6$44d84c90$ce88e5b0$@gmail.com> MIME-Version: 1.0 X-Mailer: Microsoft Outlook 16.0 Content-Language: en-at Thread-Index: AduR9kM0j4xEDAHCSY2fsyuE+fxiPA== Subject: [FFmpeg-devel] [PATCH v2 FFmpeg 13/20] libavfilter/dnn/dnn_backend_torch: Clxp model loading implementation X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" Archived-At: List-Archive: List-Post: Signed-off-by: MaximilianKaindl --- libavfilter/dnn/dnn_backend_torch.cpp | 297 +++++++++++++++++++++++++- 1 file changed, 288 insertions(+), 9 deletions(-) diff --git a/libavfilter/dnn/dnn_backend_torch.cpp b/libavfilter/dnn/dnn_backend_torch.cpp index ea09845e05..3a0ef931f9 100644 --- a/libavfilter/dnn/dnn_backend_torch.cpp +++ b/libavfilter/dnn/dnn_backend_torch.cpp @@ -27,9 +27,11 @@ #include extern "C" { +#include "../dnn_filter_common.h" #include "dnn_io_proc.h" #include "dnn_backend_common.h" #include "libavutil/opt.h" +#include "libavutil/avstring.h" #include "libavutil/mem.h" #include "queue.h" #include "safe_queue.h" @@ -190,6 +192,196 @@ static void deleter(void *arg) av_freep(&arg); } +#if (CONFIG_LIBTOKENIZERS == 1) +static int get_tokenized_batch(THClxpContext *clxp_ctx, const char **labels, int label_count, + const char *tokenizer_path, DnnContext *ctx, const c10::Device &device) +{ + if (!labels || label_count <= 0) { + av_log(ctx, AV_LOG_ERROR, "Label file invalid.\n"); + return AVERROR(EINVAL); + } + + if (!tokenizer_path) { + av_log(ctx, AV_LOG_ERROR, "Tokenizer path not provided.\n"); + return AVERROR(EINVAL); + } + + TokenizerEncodeResult *results = NULL; + int ret; + + ret = ff_dnn_create_tokenizer_and_encode_batch(tokenizer_path, labels, label_count, &results, ctx); + + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "Failed to tokenize batch text\n"); + return ret; + } + + const int64_t token_dimension = ctx->torch_option.token_dimension; + + // Create the tensors directly with the final batch dimensions + // Shape: [batch_size, token_dimension] + auto tokenized_text = torch::zeros({label_count, token_dimension}, torch::TensorOptions().dtype(torch::kInt64)); + auto attention_mask = torch::zeros({label_count, token_dimension}, torch::TensorOptions().dtype(torch::kInt64)); + + // Get accessors for direct, efficient memory access + auto tokens_accessor = tokenized_text.accessor(); + auto attention_accessor = attention_mask.accessor(); + + // Fill the tensors directly + for (int i = 0; i < label_count; i++) { + const int current_token_count = results[i].len; + + // Fill only the valid token positions, leaving zeros elsewhere + for (int j = 0; j < current_token_count && j < token_dimension; j++) { + tokens_accessor[i][j] = static_cast(results[i].token_ids[j]); + attention_accessor[i][j] = 1; + } + } + + clxp_ctx->tokenized_text = new torch::Tensor(tokenized_text); + clxp_ctx->attention_mask = new torch::Tensor(attention_mask); + + if (clxp_ctx->tokenized_text->device() != device) { + *clxp_ctx->tokenized_text = clxp_ctx->tokenized_text->to(device); + } + if (clxp_ctx->attention_mask->device() != device) { + *clxp_ctx->attention_mask = clxp_ctx->attention_mask->to(device); + } + + ff_dnn_tokenizer_free_results(results, label_count); + + return 0; +} + +static int test_clip_inference(THModel *th_model, const c10::Device &device) +{ + // Try given resolution + if (th_model->ctx->torch_option.input_resolution >= 0) { + try { + torch::Tensor test_input = torch::zeros(th_model->ctx->torch_option.input_resolution); + if (test_input.device() != device) { + test_input = test_input.to(device); + } + std::vector inputs; + inputs.push_back(test_input); + inputs.push_back(*th_model->clxp_ctx->tokenized_text); + auto output = th_model->jit_model->forward(inputs); + } catch (const std::exception &e) { + av_log(th_model->ctx, AV_LOG_ERROR, "CLIP Input Resolution %ld did not work\n", + th_model->ctx->torch_option.input_resolution); + return AVERROR(EINVAL); + } + return 0; + } + + // Common CLIP input dimensions to test + std::vector test_dims[] = { + {1, 3, 224, 224}, + {1, 3, 240, 240}, + {1, 3, 256, 256}, + {1, 3, 336, 336}, + {1, 3, 378, 378}, + {1, 3, 384, 384}, + {1, 3, 512, 512} + }; + bool found_dims = false; + int64_t resolution = 0; + + for (const auto &dims : test_dims) { + // Create test input tensor + torch::Tensor test_input = torch::zeros(dims); + if (test_input.device() != device) { + test_input = test_input.to(device); + } + try { + std::vector inputs; + inputs.push_back(test_input); + inputs.push_back(*th_model->clxp_ctx->tokenized_text); + auto output = th_model->jit_model->forward(inputs); + } catch (const std::exception &e) { + av_log(th_model->ctx, AV_LOG_WARNING, "CLIP Input Resolution %ld did not work\n", dims[2]); + continue; + } + resolution = dims[2]; + found_dims = true; + break; + } + if (!found_dims || resolution <= 0) { + av_log(th_model->ctx, AV_LOG_ERROR, "Failed to determine input resolution for CLIP model\n"); + return AVERROR(EINVAL); + } + // Log the resolution chosen for the CLIP model + av_log(th_model->ctx, AV_LOG_INFO, "Using input resolution %ldx%ld for CLIP model\n", resolution, resolution); + th_model->ctx->torch_option.input_resolution = resolution; + return 0; +} + +static int test_clap_inference(THModel *th_model, int64_t sample_rate, int64_t sample_duration, const c10::Device &device) +{ + try { + // Create dummy audio tensor to test model compatibility + int target_samples = sample_rate * sample_duration; + torch::Tensor dummy_audio = torch::zeros({1, target_samples}); + + // Try to move the tensor to the correct device + if (dummy_audio.device() != device) { + dummy_audio = dummy_audio.to(device); + } + + // Test inference with dummy audio using forward method< + std::vector inputs; + inputs.push_back(dummy_audio); + inputs.push_back(*th_model->clxp_ctx->tokenized_text); + inputs.push_back(*th_model->clxp_ctx->attention_mask); + + auto audio_features = th_model->jit_model->forward(inputs); + } catch (const c10::Error &e) { + av_log(th_model->ctx, AV_LOG_ERROR, "Error during CLIP model initialization: %s\n", e.what()); + return AVERROR(EINVAL); + } catch (const std::exception &e) { + av_log(th_model->ctx, AV_LOG_ERROR, "Error during CLAP model inference testing\n"); + return AVERROR(EINVAL); + } + return 0; +} + +static int init_clxp_model(THModel *th_model, DNNFunctionType func_type, const char **labels, int label_count, + const char *tokenizer_path, const AVFilterContext *filter_ctx) +{ + c10::Device device = (*th_model->jit_model->parameters().begin()).device(); + th_model->clxp_ctx = (THClxpContext *)av_mallocz(sizeof(THClxpContext)); + if (!th_model->clxp_ctx) { + av_log(th_model->ctx, AV_LOG_ERROR, "Failed to allocate memory for CLIP context\n"); + return AVERROR(ENOMEM); + } + + int ret = get_tokenized_batch(th_model->clxp_ctx, labels, label_count, tokenizer_path, th_model->ctx, device); + if (ret < 0) { + av_log(th_model->ctx, AV_LOG_ERROR, "Failed to tokenize batch text for CLIP model\n"); + return ret; + } + return 0; +} +#endif + +static int copy_softmax_units(THModel *th_model, const int *softmax_units, int softmax_units_count) +{ + if (softmax_units && softmax_units_count > 0) { + th_model->clxp_ctx->softmax_units = (int *)av_malloc_array(softmax_units_count, sizeof(int)); + if (!th_model->clxp_ctx->softmax_units) { + av_log(th_model->ctx, AV_LOG_ERROR, "Failed to allocate memory for softmax units\n"); + return AVERROR(ENOMEM); + } + memcpy(th_model->clxp_ctx->softmax_units, softmax_units, softmax_units_count * sizeof(int)); + th_model->clxp_ctx->softmax_units_count = softmax_units_count; + } else { + th_model->clxp_ctx->softmax_units = NULL; + th_model->clxp_ctx->softmax_units_count = 0; + } + return 0; +} + + static int fill_model_input_th(THModel *th_model, THRequestItem *request) { LastLevelTaskItem *lltask = NULL; @@ -446,7 +638,7 @@ static THInferRequest *th_create_inference_request(void) return request; } -static DNNModel *dnn_load_model_th(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx) +static THModel *init_model_th(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx) { DNNModel *model = NULL; THModel *th_model = NULL; @@ -551,7 +743,7 @@ static DNNModel *dnn_load_model_th(DnnContext *ctx, DNNFunctionType func_type, A model->get_output = &get_output_th; model->filter_ctx = filter_ctx; model->func_type = func_type; - return model; + return th_model; fail: if (item) { @@ -562,6 +754,92 @@ fail: return NULL; } +static DNNModel *dnn_load_model_th(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx) +{ + THModel *th_model = init_model_th(ctx, func_type, filter_ctx); + if (!th_model) { + return NULL; + } + return &th_model->model; +} + +static DNNModel *dnn_load_model_with_tokenizer_th(DnnContext *ctx, DNNFunctionType func_type, const char **labels, + int label_count, int *softmax_units, int softmax_units_count, + const char *tokenizer_path, AVFilterContext *filter_ctx) +{ + int ret; + THModel *th_model = init_model_th(ctx, func_type, filter_ctx); + if (th_model == NULL) { + return NULL; + } + + if (ctx->torch_option.forward_order < 0) { + // set default value for forward_order + ctx->torch_option.forward_order = func_type == DFT_ANALYTICS_CLAP ? 1 : 0; + // Log the default value for forward_order + av_log(ctx, AV_LOG_INFO, "Using default forward_order=%d for %s input\n", ctx->torch_option.forward_order, + func_type == DFT_ANALYTICS_CLAP ? "audio" : "video"); + } + if (ctx->torch_option.logit_scale <= 0) { + // set default value for logit_scale + ctx->torch_option.logit_scale = func_type == DFT_ANALYTICS_CLAP ? 33.37 : 4.6052; + // Log the default value for logit_scale + av_log(ctx, AV_LOG_INFO, "Using default logit_scale=%.4f for %s input\n", ctx->torch_option.logit_scale, + func_type == DFT_ANALYTICS_CLAP ? "audio" : "video"); + } + if (ctx->torch_option.temperature <= 0) { + // set default value for logit_scale + ctx->torch_option.temperature = 1; + // Log the default value for logit_scale + av_log(ctx, AV_LOG_INFO, "Using default temperature=%.4f for %s input\n", ctx->torch_option.temperature, + func_type == DFT_ANALYTICS_CLAP ? "audio" : "video"); + } + if (ctx->torch_option.normalize < 0) { + ctx->torch_option.normalize = func_type == DFT_ANALYTICS_CLAP ? 1 : 0; + // Log the default value for logit_scale + av_log(ctx, AV_LOG_INFO, "Using default normalize=%d for %s input\n", ctx->torch_option.normalize, + func_type == DFT_ANALYTICS_CLAP ? "audio" : "video"); + } + +#if (CONFIG_LIBTOKENIZERS == 1) + // Check if this is a CLXP model and initialize accordingly + auto model = &th_model->model; + if ((func_type == DFT_ANALYTICS_CLIP || func_type == DFT_ANALYTICS_CLAP)) { + ret = init_clxp_model(th_model, func_type, labels, label_count, tokenizer_path, filter_ctx); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "Failed to initialize CLXP model\n"); + dnn_free_model_th(&model); + return NULL; + } + ret = copy_softmax_units(th_model, softmax_units, softmax_units_count); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "Failed to copy softmax units\n"); + dnn_free_model_th(&model); + return NULL; + } + } + c10::Device device = (*th_model->jit_model->parameters().begin()).device(); + + if (func_type == DFT_ANALYTICS_CLIP) { + ret = test_clip_inference(th_model, device); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "Failed to test CLIP inference\n"); + dnn_free_model_th(&model); + return NULL; + } + } else if (func_type == DFT_ANALYTICS_CLAP) { + ret = test_clap_inference(th_model, th_model->ctx->torch_option.sample_rate, + th_model->ctx->torch_option.sample_duration, device); + if (ret < 0) { + av_log(ctx, AV_LOG_ERROR, "Failed to test CLAP inference\n"); + dnn_free_model_th(&model); + return NULL; + } + } +#endif + return &th_model->model; +} + static int dnn_execute_model_th(const DNNModel *model, DNNExecBaseParams *exec_params) { THModel *th_model = (THModel *)model; @@ -636,11 +914,12 @@ static int dnn_flush_th(const DNNModel *model) } extern const DNNModule ff_dnn_backend_torch = { - .clazz = DNN_DEFINE_CLASS(dnn_th), - .type = DNN_TH, - .load_model = dnn_load_model_th, - .execute_model = dnn_execute_model_th, - .get_result = dnn_get_result_th, - .flush = dnn_flush_th, - .free_model = dnn_free_model_th, + .clazz = DNN_DEFINE_CLASS(dnn_th), + .type = DNN_TH, + .load_model = dnn_load_model_th, + .load_model_with_tokenizer = dnn_load_model_with_tokenizer_th, + .execute_model = dnn_execute_model_th, + .get_result = dnn_get_result_th, + .flush = dnn_flush_th, + .free_model = dnn_free_model_th, }; -- 2.34.1 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".