Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
* [FFmpeg-devel] [PATCH v2 FFmpeg 13/20] libavfilter/dnn/dnn_backend_torch: Clxp model loading implementation
@ 2025-03-10 19:54 m.kaindl0208
  0 siblings, 0 replies; only message in thread
From: m.kaindl0208 @ 2025-03-10 19:54 UTC (permalink / raw)
  To: ffmpeg-devel

Signed-off-by: MaximilianKaindl <m.kaindl0208@gmail.com>
---
 libavfilter/dnn/dnn_backend_torch.cpp | 297 +++++++++++++++++++++++++-
 1 file changed, 288 insertions(+), 9 deletions(-)

diff --git a/libavfilter/dnn/dnn_backend_torch.cpp b/libavfilter/dnn/dnn_backend_torch.cpp
index ea09845e05..3a0ef931f9 100644
--- a/libavfilter/dnn/dnn_backend_torch.cpp
+++ b/libavfilter/dnn/dnn_backend_torch.cpp
@@ -27,9 +27,11 @@
 #include <torch/script.h>

 extern "C" {
+#include "../dnn_filter_common.h"
 #include "dnn_io_proc.h"
 #include "dnn_backend_common.h"
 #include "libavutil/opt.h"
+#include "libavutil/avstring.h"
 #include "libavutil/mem.h"
 #include "queue.h"
 #include "safe_queue.h"
@@ -190,6 +192,196 @@ static void deleter(void *arg)
     av_freep(&arg);
 }

+#if (CONFIG_LIBTOKENIZERS == 1)
+static int get_tokenized_batch(THClxpContext *clxp_ctx, const char **labels, int label_count,
+                                const char *tokenizer_path, DnnContext *ctx, const c10::Device &device)
+{
+    if (!labels || label_count <= 0) {
+        av_log(ctx, AV_LOG_ERROR, "Label file invalid.\n");
+        return AVERROR(EINVAL);
+    }
+
+    if (!tokenizer_path) {
+        av_log(ctx, AV_LOG_ERROR, "Tokenizer path not provided.\n");
+        return AVERROR(EINVAL);
+    }
+
+    TokenizerEncodeResult *results = NULL;
+    int ret;
+
+    ret = ff_dnn_create_tokenizer_and_encode_batch(tokenizer_path, labels, label_count, &results, ctx);
+
+    if (ret < 0) {
+        av_log(ctx, AV_LOG_ERROR, "Failed to tokenize batch text\n");
+        return ret;
+    }
+
+    const int64_t token_dimension = ctx->torch_option.token_dimension;
+
+    // Create the tensors directly with the final batch dimensions
+    // Shape: [batch_size, token_dimension]
+    auto tokenized_text = torch::zeros({label_count, token_dimension}, torch::TensorOptions().dtype(torch::kInt64));
+    auto attention_mask = torch::zeros({label_count, token_dimension}, torch::TensorOptions().dtype(torch::kInt64));
+
+    // Get accessors for direct, efficient memory access
+    auto tokens_accessor = tokenized_text.accessor<int64_t, 2>();
+    auto attention_accessor = attention_mask.accessor<int64_t, 2>();
+
+    // Fill the tensors directly
+    for (int i = 0; i < label_count; i++) {
+        const int current_token_count = results[i].len;
+
+        // Fill only the valid token positions, leaving zeros elsewhere
+        for (int j = 0; j < current_token_count && j < token_dimension; j++) {
+            tokens_accessor[i][j] = static_cast<int64_t>(results[i].token_ids[j]);
+            attention_accessor[i][j] = 1;
+        }
+    }
+
+    clxp_ctx->tokenized_text = new torch::Tensor(tokenized_text);
+    clxp_ctx->attention_mask = new torch::Tensor(attention_mask);
+
+    if (clxp_ctx->tokenized_text->device() != device) {
+        *clxp_ctx->tokenized_text = clxp_ctx->tokenized_text->to(device);
+    }
+    if (clxp_ctx->attention_mask->device() != device) {
+        *clxp_ctx->attention_mask = clxp_ctx->attention_mask->to(device);
+    }
+
+    ff_dnn_tokenizer_free_results(results, label_count);
+
+    return 0;
+}
+
+static int test_clip_inference(THModel *th_model, const c10::Device &device)
+{
+    // Try given resolution
+    if (th_model->ctx->torch_option.input_resolution >= 0) {
+        try {
+            torch::Tensor test_input = torch::zeros(th_model->ctx->torch_option.input_resolution);
+            if (test_input.device() != device) {
+                test_input = test_input.to(device);
+            }
+            std::vector<torch::jit::IValue> inputs;
+            inputs.push_back(test_input);
+            inputs.push_back(*th_model->clxp_ctx->tokenized_text);
+            auto output = th_model->jit_model->forward(inputs);
+        } catch (const std::exception &e) {
+            av_log(th_model->ctx, AV_LOG_ERROR, "CLIP Input Resolution %ld did not work\n",
+                    th_model->ctx->torch_option.input_resolution);
+            return AVERROR(EINVAL);
+        }
+        return 0;
+    }
+
+    // Common CLIP input dimensions to test
+    std::vector<int64_t> test_dims[] = {
+        {1, 3, 224, 224},
+        {1, 3, 240, 240},
+        {1, 3, 256, 256},
+        {1, 3, 336, 336},
+        {1, 3, 378, 378},
+        {1, 3, 384, 384},
+        {1, 3, 512, 512}
+    };
+    bool found_dims = false;
+    int64_t resolution = 0;
+
+    for (const auto &dims : test_dims) {
+        // Create test input tensor
+        torch::Tensor test_input = torch::zeros(dims);
+        if (test_input.device() != device) {
+            test_input = test_input.to(device);
+        }
+        try {
+            std::vector<torch::jit::IValue> inputs;
+            inputs.push_back(test_input);
+            inputs.push_back(*th_model->clxp_ctx->tokenized_text);
+            auto output = th_model->jit_model->forward(inputs);
+        } catch (const std::exception &e) {
+            av_log(th_model->ctx, AV_LOG_WARNING, "CLIP Input Resolution %ld did not work\n", dims[2]);
+            continue;
+        }
+        resolution = dims[2];
+        found_dims = true;
+        break;
+    }
+    if (!found_dims || resolution <= 0) {
+        av_log(th_model->ctx, AV_LOG_ERROR, "Failed to determine input resolution for CLIP model\n");
+        return AVERROR(EINVAL);
+    }
+    // Log the resolution chosen for the CLIP model
+    av_log(th_model->ctx, AV_LOG_INFO, "Using input resolution %ldx%ld for CLIP model\n", resolution, resolution);
+    th_model->ctx->torch_option.input_resolution = resolution;
+    return 0;
+}
+
+static int test_clap_inference(THModel *th_model, int64_t sample_rate, int64_t sample_duration, const c10::Device &device)
+{
+    try {
+        // Create dummy audio tensor to test model compatibility
+        int target_samples = sample_rate * sample_duration;
+        torch::Tensor dummy_audio = torch::zeros({1, target_samples});
+
+        // Try to move the tensor to the correct device
+        if (dummy_audio.device() != device) {
+            dummy_audio = dummy_audio.to(device);
+        }
+
+        // Test inference with dummy audio using forward method<
+        std::vector<torch::jit::IValue> inputs;
+        inputs.push_back(dummy_audio);
+        inputs.push_back(*th_model->clxp_ctx->tokenized_text);
+        inputs.push_back(*th_model->clxp_ctx->attention_mask);
+
+        auto audio_features = th_model->jit_model->forward(inputs);
+    } catch (const c10::Error &e) {
+        av_log(th_model->ctx, AV_LOG_ERROR, "Error during CLIP model initialization: %s\n", e.what());
+        return AVERROR(EINVAL);
+    } catch (const std::exception &e) {
+        av_log(th_model->ctx, AV_LOG_ERROR, "Error during CLAP model inference testing\n");
+        return AVERROR(EINVAL);
+    }
+    return 0;
+}
+
+static int init_clxp_model(THModel *th_model, DNNFunctionType func_type, const char **labels, int label_count,
+                            const char *tokenizer_path, const AVFilterContext *filter_ctx)
+{
+    c10::Device device = (*th_model->jit_model->parameters().begin()).device();
+    th_model->clxp_ctx = (THClxpContext *)av_mallocz(sizeof(THClxpContext));
+    if (!th_model->clxp_ctx) {
+        av_log(th_model->ctx, AV_LOG_ERROR, "Failed to allocate memory for CLIP context\n");
+        return AVERROR(ENOMEM);
+    }
+
+    int ret = get_tokenized_batch(th_model->clxp_ctx, labels, label_count, tokenizer_path, th_model->ctx, device);
+    if (ret < 0) {
+        av_log(th_model->ctx, AV_LOG_ERROR, "Failed to tokenize batch text for CLIP model\n");
+        return ret;
+    }
+    return 0;
+}
+#endif
+
+static int copy_softmax_units(THModel *th_model, const int *softmax_units, int softmax_units_count)
+{
+    if (softmax_units && softmax_units_count > 0) {
+        th_model->clxp_ctx->softmax_units = (int *)av_malloc_array(softmax_units_count, sizeof(int));
+        if (!th_model->clxp_ctx->softmax_units) {
+            av_log(th_model->ctx, AV_LOG_ERROR, "Failed to allocate memory for softmax units\n");
+            return AVERROR(ENOMEM);
+        }
+        memcpy(th_model->clxp_ctx->softmax_units, softmax_units, softmax_units_count * sizeof(int));
+        th_model->clxp_ctx->softmax_units_count = softmax_units_count;
+    } else {
+        th_model->clxp_ctx->softmax_units = NULL;
+        th_model->clxp_ctx->softmax_units_count = 0;
+    }
+    return 0;
+}
+
+
 static int fill_model_input_th(THModel *th_model, THRequestItem *request)
 {
     LastLevelTaskItem *lltask = NULL;
@@ -446,7 +638,7 @@ static THInferRequest *th_create_inference_request(void)
     return request;
 }

-static DNNModel *dnn_load_model_th(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
+static THModel *init_model_th(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
 {
     DNNModel *model = NULL;
     THModel *th_model = NULL;
@@ -551,7 +743,7 @@ static DNNModel *dnn_load_model_th(DnnContext *ctx, DNNFunctionType func_type, A
     model->get_output = &get_output_th;
     model->filter_ctx = filter_ctx;
     model->func_type = func_type;
-    return model;
+    return th_model;

 fail:
     if (item) {
@@ -562,6 +754,92 @@ fail:
     return NULL;
 }

+static DNNModel *dnn_load_model_th(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
+{
+    THModel *th_model = init_model_th(ctx, func_type, filter_ctx);
+    if (!th_model) {
+        return NULL;
+    }
+    return &th_model->model;
+}
+
+static DNNModel *dnn_load_model_with_tokenizer_th(DnnContext *ctx, DNNFunctionType func_type, const char **labels,
+                                                    int label_count, int *softmax_units, int softmax_units_count,
+                                                    const char *tokenizer_path, AVFilterContext *filter_ctx)
+{
+    int ret;
+    THModel *th_model = init_model_th(ctx, func_type, filter_ctx);
+    if (th_model == NULL) {
+        return NULL;
+    }
+
+    if (ctx->torch_option.forward_order < 0) {
+        // set default value for forward_order
+        ctx->torch_option.forward_order = func_type == DFT_ANALYTICS_CLAP ? 1 : 0;
+        // Log the default value for forward_order
+        av_log(ctx, AV_LOG_INFO, "Using default forward_order=%d for %s input\n", ctx->torch_option.forward_order,
+                func_type == DFT_ANALYTICS_CLAP ? "audio" : "video");
+    }
+    if (ctx->torch_option.logit_scale <= 0) {
+        // set default value for logit_scale
+        ctx->torch_option.logit_scale = func_type == DFT_ANALYTICS_CLAP ? 33.37 : 4.6052;
+        // Log the default value for logit_scale
+        av_log(ctx, AV_LOG_INFO, "Using default logit_scale=%.4f for %s input\n", ctx->torch_option.logit_scale,
+                func_type == DFT_ANALYTICS_CLAP ? "audio" : "video");
+    }
+    if (ctx->torch_option.temperature <= 0) {
+        // set default value for logit_scale
+        ctx->torch_option.temperature = 1;
+        // Log the default value for logit_scale
+        av_log(ctx, AV_LOG_INFO, "Using default temperature=%.4f for %s input\n", ctx->torch_option.temperature,
+                func_type == DFT_ANALYTICS_CLAP ? "audio" : "video");
+    }
+    if (ctx->torch_option.normalize < 0) {
+        ctx->torch_option.normalize = func_type == DFT_ANALYTICS_CLAP ? 1 : 0;
+        // Log the default value for logit_scale
+        av_log(ctx, AV_LOG_INFO, "Using default normalize=%d for %s input\n", ctx->torch_option.normalize,
+                func_type == DFT_ANALYTICS_CLAP ? "audio" : "video");
+    }
+
+#if (CONFIG_LIBTOKENIZERS == 1)
+    // Check if this is a CLXP model and initialize accordingly
+    auto model = &th_model->model;
+    if ((func_type == DFT_ANALYTICS_CLIP || func_type == DFT_ANALYTICS_CLAP)) {
+        ret = init_clxp_model(th_model, func_type, labels, label_count, tokenizer_path, filter_ctx);
+        if (ret < 0) {
+            av_log(ctx, AV_LOG_ERROR, "Failed to initialize CLXP model\n");
+            dnn_free_model_th(&model);
+            return NULL;
+        }
+        ret = copy_softmax_units(th_model, softmax_units, softmax_units_count);
+        if (ret < 0) {
+            av_log(ctx, AV_LOG_ERROR, "Failed to copy softmax units\n");
+            dnn_free_model_th(&model);
+            return NULL;
+        }
+    }
+    c10::Device device = (*th_model->jit_model->parameters().begin()).device();
+
+    if (func_type == DFT_ANALYTICS_CLIP) {
+        ret = test_clip_inference(th_model, device);
+        if (ret < 0) {
+            av_log(ctx, AV_LOG_ERROR, "Failed to test CLIP inference\n");
+            dnn_free_model_th(&model);
+            return NULL;
+        }
+    } else if (func_type == DFT_ANALYTICS_CLAP) {
+        ret = test_clap_inference(th_model, th_model->ctx->torch_option.sample_rate,
+                                    th_model->ctx->torch_option.sample_duration, device);
+        if (ret < 0) {
+            av_log(ctx, AV_LOG_ERROR, "Failed to test CLAP inference\n");
+            dnn_free_model_th(&model);
+            return NULL;
+        }
+    }
+#endif
+    return &th_model->model;
+}
+
 static int dnn_execute_model_th(const DNNModel *model, DNNExecBaseParams *exec_params)
 {
     THModel *th_model = (THModel *)model;
@@ -636,11 +914,12 @@ static int dnn_flush_th(const DNNModel *model)
 }

 extern const DNNModule ff_dnn_backend_torch = {
-    .clazz          = DNN_DEFINE_CLASS(dnn_th),
-    .type           = DNN_TH,
-    .load_model     = dnn_load_model_th,
-    .execute_model  = dnn_execute_model_th,
-    .get_result     = dnn_get_result_th,
-    .flush          = dnn_flush_th,
-    .free_model     = dnn_free_model_th,
+    .clazz                      = DNN_DEFINE_CLASS(dnn_th),
+    .type                       = DNN_TH,
+    .load_model                 = dnn_load_model_th,
+    .load_model_with_tokenizer  = dnn_load_model_with_tokenizer_th,
+    .execute_model              = dnn_execute_model_th,
+    .get_result                 = dnn_get_result_th,
+    .flush                      = dnn_flush_th,
+    .free_model                 = dnn_free_model_th,
 };
--
2.34.1


_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2025-03-10 19:55 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-03-10 19:54 [FFmpeg-devel] [PATCH v2 FFmpeg 13/20] libavfilter/dnn/dnn_backend_torch: Clxp model loading implementation m.kaindl0208

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git