Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
* [FFmpeg-devel] [PR] avfilter/dnn: implement persistent buffers for LibTorch backend (PR #21749)
@ 2026-02-13 18:54 Raja-89 via ffmpeg-devel
  0 siblings, 0 replies; only message in thread
From: Raja-89 via ffmpeg-devel @ 2026-02-13 18:54 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Raja-89

PR #21749 opened by Raja-89
URL: https://code.ffmpeg.org/FFmpeg/FFmpeg/pulls/21749
Patch URL: https://code.ffmpeg.org/FFmpeg/FFmpeg/pulls/21749.patch

This patch enables dynamic resolution support for the Torch backend.

By utilizing torch::from_blob inside the inference loop, we can re-wrap the persistent input buffer into a tensor with updated dimensions (H, W) for every frame. This allows the filter to handle streams where resolution changes mid-process (e.g., following a concat or scale filter) without reallocation of the underlying C++ tensor objects.

Verified with Valgrind and complex filtergraphs involving resolution jumps. No memory leaks detected beyond existing static library initializers.


>From 7e36d2a9f3c55d6fe06364ace01b53d9d020ece6 Mon Sep 17 00:00:00 2001
From: Raja Rathour <imraja729@gmail.com>
Date: Fri, 13 Feb 2026 23:53:50 +0530
Subject: [PATCH] avfilter/dnn: implement persistent buffers for LibTorch
 backend

---
 Changelog                             |   3 +
 libavfilter/dnn/dnn_backend_torch.cpp | 185 +++++++++++++-------------
 2 files changed, 97 insertions(+), 91 deletions(-)

diff --git a/Changelog b/Changelog
index a9d68b369e..119de9ff56 100644
--- a/Changelog
+++ b/Changelog
@@ -2256,3 +2256,6 @@ version 0.3.1: added avi/divx support
 
 
 version 0.3: initial public release
+
+- libavfilter/dnn: persistent buffer management for LibTorch backend
+- libavfilter/dnn: dynamic shape support for LibTorch backend
diff --git a/libavfilter/dnn/dnn_backend_torch.cpp b/libavfilter/dnn/dnn_backend_torch.cpp
index d3c4966c09..0bc76d83ae 100644
--- a/libavfilter/dnn/dnn_backend_torch.cpp
+++ b/libavfilter/dnn/dnn_backend_torch.cpp
@@ -56,6 +56,8 @@ typedef struct THModel {
 typedef struct THInferRequest {
     torch::Tensor *output;
     torch::Tensor *input_tensor;
+    uint8_t *input_data;     // New: Persistent buffer for input pixels
+    size_t input_data_size;  // New: Current size of the buffer
 } THInferRequest;
 
 typedef struct THRequestItem {
@@ -96,15 +98,22 @@ static void th_free_request(THInferRequest *request)
 {
     if (!request)
         return;
-    if (request->output) {
-        delete(request->output);
-        request->output = NULL;
-    }
+
     if (request->input_tensor) {
-        delete(request->input_tensor);
+        delete request->input_tensor;
         request->input_tensor = NULL;
     }
-    return;
+
+    if (request->output) {
+        delete request->output;
+        request->output = NULL;
+    }
+
+    /* Free the persistent buffer */
+    if (request->input_data) {
+        av_freep(&request->input_data);
+    }
+    request->input_data_size = 0;
 }
 
 static inline void destroy_request_item(THRequestItem **arg)
@@ -129,7 +138,7 @@ static void dnn_free_model_th(DNNModel **model)
 
     th_model = (THModel *)(*model);
 
-    /* 1. Stop and join the worker thread if it exists */
+    /* 1. Stop and join the worker thread */
     if (th_model->worker_thread) {
         {
             std::lock_guard<std::mutex> lock(*th_model->mutex);
@@ -151,7 +160,7 @@ static void dnn_free_model_th(DNNModel **model)
         th_model->cond = NULL;
     }
 
-    /* 3. Clean up the pending queue */
+    /* 3. Clean up the pending queue (Async tasks) */
     if (th_model->pending_queue) {
         while (ff_safe_queue_size(th_model->pending_queue) > 0) {
             THRequestItem *item = (THRequestItem *)ff_safe_queue_pop_front(th_model->pending_queue);
@@ -160,7 +169,7 @@ static void dnn_free_model_th(DNNModel **model)
         ff_safe_queue_destroy(th_model->pending_queue);
     }
 
-    /* 4. Clean up standard backend queues */
+    /* 4. Clean up standard backend queues and persistent request buffers */
     if (th_model->request_queue) {
         while (ff_safe_queue_size(th_model->request_queue) != 0) {
             THRequestItem *item = (THRequestItem *)ff_safe_queue_pop_front(th_model->request_queue);
@@ -169,6 +178,7 @@ static void dnn_free_model_th(DNNModel **model)
         ff_safe_queue_destroy(th_model->request_queue);
     }
 
+    /* 5. Clean up task and lltask queues */
     if (th_model->lltask_queue) {
         while (ff_queue_size(th_model->lltask_queue) != 0) {
             LastLevelTaskItem *item = (LastLevelTaskItem *)ff_queue_pop_front(th_model->lltask_queue);
@@ -180,14 +190,16 @@ static void dnn_free_model_th(DNNModel **model)
     if (th_model->task_queue) {
         while (ff_queue_size(th_model->task_queue) != 0) {
             TaskItem *item = (TaskItem *)ff_queue_pop_front(th_model->task_queue);
-            av_frame_free(&item->in_frame);
-            av_frame_free(&item->out_frame);
-            av_freep(&item);
+            if (item) {
+                av_frame_free(&item->in_frame);
+                av_frame_free(&item->out_frame);
+                av_freep(&item);
+            }
         }
         ff_queue_destroy(th_model->task_queue);
     }
 
-    /* 5. Final model cleanup */
+    /* 6. Final model cleanup */
     if (th_model->jit_model)
         delete th_model->jit_model;
 
@@ -195,18 +207,6 @@ static void dnn_free_model_th(DNNModel **model)
     *model = NULL;
 }
 
-static int get_input_th(DNNModel *model, DNNData *input, const char *input_name)
-{
-    input->dt = DNN_FLOAT;
-    input->order = DCO_RGB;
-    input->layout = DL_NCHW;
-    input->dims[0] = 1;
-    input->dims[1] = 3;
-    input->dims[2] = -1;
-    input->dims[3] = -1;
-    return 0;
-}
-
 static void deleter(void *arg)
 {
     av_freep(&arg);
@@ -214,99 +214,88 @@ static void deleter(void *arg)
 
 static int fill_model_input_th(THModel *th_model, THRequestItem *request)
 {
-    LastLevelTaskItem *lltask = NULL;
-    TaskItem *task = NULL;
-    THInferRequest *infer_request = NULL;
+    LastLevelTaskItem *lltask;
+    TaskItem *task;
+    THInferRequest *infer_request;
     DNNData input = { 0 };
-    DnnContext *ctx = th_model->ctx;
     int ret, width_idx, height_idx, channel_idx;
+    size_t cur_size;
 
     lltask = (LastLevelTaskItem *)ff_queue_pop_front(th_model->lltask_queue);
-    if (!lltask) {
-        ret = AVERROR(EINVAL);
-        goto err;
-    }
+    if (!lltask)
+        return AVERROR(EINVAL);
+
     request->lltask = lltask;
     task = lltask->task;
     infer_request = request->infer_request;
 
     ret = get_input_th(&th_model->model, &input, NULL);
-    if ( ret != 0) {
-        goto err;
-    }
+    if (ret != 0)
+        return ret;
+
     width_idx = dnn_get_width_idx_by_layout(input.layout);
     height_idx = dnn_get_height_idx_by_layout(input.layout);
     channel_idx = dnn_get_channel_idx_by_layout(input.layout);
+
+    /* Update internal DNNData with current frame dimensions */
     input.dims[height_idx] = task->in_frame->height;
     input.dims[width_idx] = task->in_frame->width;
-    input.data = av_malloc(input.dims[height_idx] * input.dims[width_idx] *
-                           input.dims[channel_idx] * sizeof(float));
-    if (!input.data)
-        return AVERROR(ENOMEM);
-    infer_request->input_tensor = new torch::Tensor();
-    infer_request->output = new torch::Tensor();
 
-    switch (th_model->model.func_type) {
-    case DFT_PROCESS_FRAME:
-        input.scale = 255;
-        if (task->do_ioproc) {
-            if (th_model->model.frame_pre_proc != NULL) {
-                th_model->model.frame_pre_proc(task->in_frame, &input, th_model->model.filter_ctx);
-            } else {
-                ff_proc_from_frame_to_dnn(task->in_frame, &input, ctx);
-            }
-        }
-        break;
-    default:
-        avpriv_report_missing_feature(NULL, "model function type %d", th_model->model.func_type);
-        break;
+    cur_size = (size_t)input.dims[height_idx] * input.dims[width_idx] *
+               input.dims[channel_idx] * sizeof(float);
+
+    /* Persistent Buffer Logic (Part 2) */
+    if (!infer_request->input_data || infer_request->input_data_size < cur_size) {
+        av_freep(&infer_request->input_data);
+        infer_request->input_data = (uint8_t *)av_malloc(cur_size);
+        if (!infer_request->input_data)
+            return AVERROR(ENOMEM);
+        infer_request->input_data_size = cur_size;
     }
+
+    if (!infer_request->input_tensor)
+        infer_request->input_tensor = new torch::Tensor();
+    if (!infer_request->output)
+        infer_request->output = new torch::Tensor();
+
+    input.data = infer_request->input_data;
+
+    /* Perform pre-processing (scaling/normalization) */
+    if (task->do_ioproc) {
+        if (th_model->model.frame_pre_proc)
+            th_model->model.frame_pre_proc(task->in_frame, &input, th_model->model.filter_ctx);
+        else
+            ff_proc_from_frame_to_dnn(task->in_frame, &input, th_model->ctx);
+    }
+
+    /**
+     * PART 3: DYNAMIC SHAPE RE-WRAPPING
+     * We re-map the tensor to the buffer with the CURRENT frame dimensions.
+     * Note: from_blob does NOT copy data; it just creates a view.
+     */
     *infer_request->input_tensor = torch::from_blob(input.data,
         {1, input.dims[channel_idx], input.dims[height_idx], input.dims[width_idx]},
-        deleter, torch::kFloat32);
-    return 0;
+        torch::kFloat32);
 
-err:
-    th_free_request(infer_request);
-    return ret;
+    return 0;
 }
 
 static int th_start_inference(void *args)
 {
     THRequestItem *request = (THRequestItem *)args;
-    THInferRequest *infer_request = NULL;
-    LastLevelTaskItem *lltask = NULL;
-    TaskItem *task = NULL;
-    THModel *th_model = NULL;
-    DnnContext *ctx = NULL;
+    THInferRequest *infer_request = request->infer_request;
+    THModel *th_model = (THModel *)request->lltask->task->model;
     std::vector<torch::jit::IValue> inputs;
     torch::NoGradGuard no_grad;
 
-    if (!request) {
-        av_log(NULL, AV_LOG_ERROR, "THRequestItem is NULL\n");
-        return AVERROR(EINVAL);
-    }
-    infer_request = request->infer_request;
-    lltask = request->lltask;
-    task = lltask->task;
-    th_model = (THModel *)task->model;
-    ctx = th_model->ctx;
-
-    if (ctx->torch_option.optimize)
-        torch::jit::setGraphExecutorOptimize(true);
-    else
-        torch::jit::setGraphExecutorOptimize(false);
-
-    if (!infer_request->input_tensor || !infer_request->output) {
-        av_log(ctx, AV_LOG_ERROR, "input or output tensor is NULL\n");
-        return DNN_GENERIC_ERROR;
-    }
-    // Transfer tensor to the same device as model
+    /* Transfer input tensor to the model device (CPU/GPU/XPU) */
     c10::Device device = (*th_model->jit_model->parameters().begin()).device();
     if (infer_request->input_tensor->device() != device)
         *infer_request->input_tensor = infer_request->input_tensor->to(device);
+
     inputs.push_back(*infer_request->input_tensor);
 
+    /* Inference: LibTorch dynamically sizes the output tensor based on the model */
     *infer_request->output = th_model->jit_model->forward(inputs).toTensor();
 
     return 0;
@@ -487,15 +476,28 @@ err:
 
 static THInferRequest *th_create_inference_request(void)
 {
-    THInferRequest *request = (THInferRequest *)av_malloc(sizeof(THInferRequest));
-    if (!request) {
+    // Use av_mallocz to zero-initialize everything (including input_data and input_data_size)
+    THInferRequest *request = (THInferRequest *)av_mallocz(sizeof(THInferRequest));
+    if (!request)
         return NULL;
-    }
-    request->input_tensor = NULL;
-    request->output = NULL;
+
     return request;
 }
 
+static int get_input_th(DNNModel *model, DNNData *input, const char *input_name)
+{
+    input->dt     = DNN_FLOAT;
+    input->order  = DCO_RGB;
+    input->layout = DL_NCHW;
+
+    input->dims[0] = 1;
+    input->dims[1] = 3;
+    input->dims[2] = -1;
+    input->dims[3] = -1;
+
+    return 0;
+}
+
 static DNNModel *dnn_load_model_th(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx)
 {
     DNNModel *model = NULL;
@@ -675,3 +677,4 @@ extern const DNNModule ff_dnn_backend_torch = {
     .flush          = dnn_flush_th,
     .free_model     = dnn_free_model_th,
 };
+
-- 
2.52.0

_______________________________________________
ffmpeg-devel mailing list -- ffmpeg-devel@ffmpeg.org
To unsubscribe send an email to ffmpeg-devel-leave@ffmpeg.org

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2026-02-13 18:54 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2026-02-13 18:54 [FFmpeg-devel] [PR] avfilter/dnn: implement persistent buffers for LibTorch backend (PR #21749) Raja-89 via ffmpeg-devel

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git