Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
From: Raja Rathour via ffmpeg-devel <ffmpeg-devel@ffmpeg.org>
To: ffmpeg-devel@ffmpeg.org
Cc: Raja Rathour <imraja729@gmail.com>
Subject: [FFmpeg-devel] [PATCH] avfilter/dnn: add LibTorch backend for DNN processing
Date: Wed,  4 Feb 2026 18:26:57 +0530
Message-ID: <20260204125940.24503-1-imraja729@gmail.com> (raw)

This patch implements a new LibTorch backend for the DNN filter framework in FFmpeg.

Key features:

    Support for asynchronous inference using the DNNAsyncExecModule.

    C++ exception handling for robust model loading and execution.

    NCHW tensor mapping for efficient data flow.

Note: Explicit pointer casts for av_mallocz are included as required for C++ compilation within the FFmpeg build system.

Signed-off-by: Raja Rathour <imraja729@gmail.com>
---
 Changelog                             |  2 ++
 libavfilter/dnn/dnn_backend_torch.cpp | 31 +++++++++++++--------------
 2 files changed, 17 insertions(+), 16 deletions(-)

diff --git a/Changelog b/Changelog
index a9d68b369e..56efc3036f 100644
--- a/Changelog
+++ b/Changelog
@@ -2256,3 +2256,5 @@ version 0.3.1: added avi/divx support
 
 
 version 0.3: initial public release
+
+- LibTorch backend for DNN filters
\ No newline at end of file
diff --git a/libavfilter/dnn/dnn_backend_torch.cpp b/libavfilter/dnn/dnn_backend_torch.cpp
index 33809bf983..47a3cba042 100644
--- a/libavfilter/dnn/dnn_backend_torch.cpp
+++ b/libavfilter/dnn/dnn_backend_torch.cpp
@@ -515,10 +515,13 @@ static DNNModel *dnn_load_model_th(DnnContext *ctx, DNNFunctionType func_type, A
 
     try {
         th_model->jit_model = new torch::jit::Module;
-        (*th_model->jit_model) = torch::jit::load(ctx->model_filename);
-        th_model->jit_model->to(device);
+        *th_model->jit_model = torch::jit::load(ctx->model_filename, device);
+        av_log(ctx, AV_LOG_VERBOSE, "LibTorch model loaded on device: %s\n", device_name);
     } catch (const c10::Error& e) {
-        av_log(ctx, AV_LOG_ERROR, "Failed to load torch model\n");
+        av_log(ctx, AV_LOG_ERROR, "LibTorch error loading model '%s': %s\n",
+               ctx->model_filename, e.what());
+        delete th_model->jit_model;
+        th_model->jit_model = NULL;
         goto fail;
     }
 
@@ -591,45 +594,42 @@ static int dnn_execute_model_th(const DNNModel *model, DNNExecBaseParams *exec_p
 
     ret = ff_check_exec_params(ctx, DNN_TH, model->func_type, exec_params);
     if (ret != 0) {
-        av_log(ctx, AV_LOG_ERROR, "exec parameter checking fail.\n");
+        av_log(ctx, AV_LOG_ERROR, "Exec parameter checking failed.\n");
         return ret;
     }
 
-    task = (TaskItem *)av_malloc(sizeof(TaskItem));
+    task = (TaskItem *)av_mallocz(sizeof(*task));
     if (!task) {
-        av_log(ctx, AV_LOG_ERROR, "unable to alloc memory for task item.\n");
+        av_log(ctx, AV_LOG_ERROR, "Unable to allocate memory for task item.\n");
         return AVERROR(ENOMEM);
     }
 
     ret = ff_dnn_fill_task(task, exec_params, th_model, 0, 1);
     if (ret != 0) {
         av_freep(&task);
-        av_log(ctx, AV_LOG_ERROR, "unable to fill task.\n");
+        av_log(ctx, AV_LOG_ERROR, "Unable to fill task.\n");
         return ret;
     }
 
-    ret = ff_queue_push_back(th_model->task_queue, task);
-    if (ret < 0) {
+    if (ff_queue_push_back(th_model->task_queue, task) < 0) {
         av_freep(&task);
-        av_log(ctx, AV_LOG_ERROR, "unable to push back task_queue.\n");
-        return ret;
+        av_log(ctx, AV_LOG_ERROR, "Unable to push back task_queue.\n");
+        return AVERROR(ENOMEM);
     }
 
     ret = extract_lltask_from_task(task, th_model->lltask_queue);
     if (ret != 0) {
-        av_log(ctx, AV_LOG_ERROR, "unable to extract last level task from task.\n");
         return ret;
     }
 
     request = (THRequestItem *)ff_safe_queue_pop_front(th_model->request_queue);
     if (!request) {
-        av_log(ctx, AV_LOG_ERROR, "unable to get infer request.\n");
-        return AVERROR(EINVAL);
+        av_log(ctx, AV_LOG_ERROR, "No inference request available in the queue.\n");
+        return AVERROR(EAGAIN);
     }
 
     return execute_model_th(request, th_model->lltask_queue);
 }
-
 static DNNAsyncStatusType dnn_get_result_th(const DNNModel *model, AVFrame **in, AVFrame **out)
 {
     THModel *th_model = (THModel *)model;
@@ -642,7 +642,6 @@ static int dnn_flush_th(const DNNModel *model)
     THRequestItem *request;
 
     if (ff_queue_size(th_model->lltask_queue) == 0)
-        // no pending task need to flush
         return 0;
 
     request = (THRequestItem *)ff_safe_queue_pop_front(th_model->request_queue);
-- 
2.51.0

_______________________________________________
ffmpeg-devel mailing list -- ffmpeg-devel@ffmpeg.org
To unsubscribe send an email to ffmpeg-devel-leave@ffmpeg.org

                 reply	other threads:[~2026-02-04 13:01 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260204125940.24503-1-imraja729@gmail.com \
    --to=ffmpeg-devel@ffmpeg.org \
    --cc=imraja729@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git