* [FFmpeg-devel] [PATCH 1/3] avfilter/dnn_backend_torch: implement async execution
@ 2025-12-25 10:33 Raja Rathour via ffmpeg-devel
2025-12-31 6:41 ` [FFmpeg-devel] " Guo, Yejun via ffmpeg-devel
0 siblings, 1 reply; 6+ messages in thread
From: Raja Rathour via ffmpeg-devel @ 2025-12-25 10:33 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: Raja Rathour
Signed-off-by: Raja Rathour <imraja729@gmail.com>
This patch implements asynchronous model execution for the LibTorch backend
in FFmpeg's DNN module.
Key changes:
- Integrated a worker thread and a pending queue to handle inference.
- Prevents the main filter thread from blocking during model execution.
- Aligns LibTorch backend behavior with the existing OpenVINO async implementation.
- Improves overall throughput for deep learning filters using LibTorch.
The implementation has been tested with various torch models to ensure
stability and correct frame synchronization.
---
libavfilter/dnn/dnn_backend_torch.cpp | 115 +++++++++++++++++++++++++-
1 file changed, 111 insertions(+), 4 deletions(-)
diff --git a/libavfilter/dnn/dnn_backend_torch.cpp b/libavfilter/dnn/dnn_backend_torch.cpp
index 2e4326d9d4..ad81aff8da 100644
--- a/libavfilter/dnn/dnn_backend_torch.cpp
+++ b/libavfilter/dnn/dnn_backend_torch.cpp
@@ -25,6 +25,10 @@
#include <torch/torch.h>
#include <torch/script.h>
+#include <thread>
+#include <mutex>
+#include <condition_variable>
+#include <atomic>
extern "C" {
#include "dnn_io_proc.h"
@@ -39,9 +43,16 @@ typedef struct THModel {
DNNModel model;
DnnContext *ctx;
torch::jit::Module *jit_model;
- SafeQueue *request_queue;
+ SafeQueue *request_queue; // Holds available/idle request slots
Queue *task_queue;
Queue *lltask_queue;
+
+ // --- Async Support ---
+ SafeQueue *pending_queue; // Holds requests waiting for inference
+ std::thread *worker_thread; // The background worker
+ std::mutex *mutex; // Protects the condition variable
+ std::condition_variable *cond; // Wakes up worker when new task arrives
+ std::atomic<bool> worker_stop; // Flag to stop the thread
} THModel;
typedef struct THInferRequest {
@@ -119,6 +130,32 @@ static void dnn_free_model_th(DNNModel **model)
return;
th_model = (THModel *) (*model);
+
+ // --- Stop and Join Worker Thread ---
+ if (th_model->worker_thread) {
+ {
+ std::lock_guard<std::mutex> lock(*th_model->mutex);
+ th_model->worker_stop = true;
+ }
+ th_model->cond->notify_all();
+
+ if (th_model->worker_thread->joinable()) {
+ th_model->worker_thread->join();
+ }
+ delete th_model->worker_thread;
+ delete th_model->mutex;
+ delete th_model->cond;
+ }
+
+ if (th_model->pending_queue) {
+ // Clear remaining items (if any)
+ while (ff_safe_queue_size(th_model->pending_queue) != 0) {
+ ff_safe_queue_pop_front(th_model->pending_queue);
+ }
+ ff_safe_queue_destroy(th_model->pending_queue);
+ }
+ // -----------------------------------
+
while (ff_safe_queue_size(th_model->request_queue) != 0) {
THRequestItem *item = (THRequestItem *)ff_safe_queue_pop_front(th_model->request_queue);
destroy_request_item(&item);
@@ -318,6 +355,41 @@ err:
}
}
+// --- Worker Thread Function ---
+static void th_worker_thread(THModel *th_model) {
+ while (true) {
+ THRequestItem *request = NULL;
+
+ {
+ // Acquire lock to check condition
+ std::unique_lock<std::mutex> lock(*th_model->mutex);
+
+ // Wait until: We are told to stop OR there is work in the queue
+ th_model->cond->wait(lock, [&]{
+ return th_model->worker_stop || ff_safe_queue_size(th_model->pending_queue) > 0;
+ });
+
+ // If stopped and no work left, exit
+ if (th_model->worker_stop && ff_safe_queue_size(th_model->pending_queue) == 0) {
+ break;
+ }
+
+ // Get work
+ request = (THRequestItem *)ff_safe_queue_pop_front(th_model->pending_queue);
+ }
+
+ // Process work (Lock released so we don't block submission)
+ if (request) {
+ int ret = th_start_inference(request);
+ if (ret != 0) {
+ av_log(th_model->ctx, AV_LOG_ERROR, "Async inference failed\n");
+ }
+ // Always callback to clean up and notify FFmpeg
+ infer_completion_callback(request);
+ }
+ }
+}
+
static int execute_model_th(THRequestItem *request, Queue *lltask_queue)
{
THModel *th_model = NULL;
@@ -343,9 +415,24 @@ static int execute_model_th(THRequestItem *request, Queue *lltask_queue)
if ( ret != 0) {
goto err;
}
+
+ // --- EXECUTION LOGIC (ASYNC vs SYNC) ---
if (task->async) {
- avpriv_report_missing_feature(th_model->ctx, "LibTorch async");
+ // 1. Acquire lock
+ std::lock_guard<std::mutex> lock(*th_model->mutex);
+
+ // 2. Push to pending queue
+ if (ff_safe_queue_push_back(th_model->pending_queue, request) < 0) {
+ return AVERROR(ENOMEM);
+ }
+
+ // 3. Wake up worker
+ th_model->cond->notify_one();
+
+ // 4. Return immediately (Success)
+ return 0;
} else {
+ // Synchronous fallback
ret = th_start_inference((void *)(request));
if (ret != 0) {
goto err;
@@ -484,6 +571,25 @@ static DNNModel *dnn_load_model_th(DnnContext *ctx, DNNFunctionType func_type, A
goto fail;
}
+ // --- INITIALIZE ASYNC QUEUE AND THREAD ---
+ th_model->pending_queue = ff_safe_queue_create();
+ if (!th_model->pending_queue) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to create pending queue\n");
+ goto fail;
+ }
+
+ try {
+ th_model->mutex = new std::mutex();
+ th_model->cond = new std::condition_variable();
+ th_model->worker_stop = false;
+
+ // Start worker thread
+ th_model->worker_thread = new std::thread(th_worker_thread, th_model);
+ } catch (...) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to initialize worker thread or mutexes\n");
+ goto fail;
+ }
+
model->get_input = &get_input_th;
model->get_output = &get_output_th;
model->filter_ctx = filter_ctx;
@@ -519,7 +625,8 @@ static int dnn_execute_model_th(const DNNModel *model, DNNExecBaseParams *exec_p
return AVERROR(ENOMEM);
}
- ret = ff_dnn_fill_task(task, exec_params, th_model, 0, 1);
+ // Set 'async' flag based on context (ctx->async) instead of hardcoded 0
+ ret = ff_dnn_fill_task(task, exec_params, th_model, ctx->async, 1);
if (ret != 0) {
av_freep(&task);
av_log(ctx, AV_LOG_ERROR, "unable to fill task.\n");
@@ -580,4 +687,4 @@ extern const DNNModule ff_dnn_backend_torch = {
.get_result = dnn_get_result_th,
.flush = dnn_flush_th,
.free_model = dnn_free_model_th,
-};
+};
\ No newline at end of file
--
2.48.1
_______________________________________________
ffmpeg-devel mailing list -- ffmpeg-devel@ffmpeg.org
To unsubscribe send an email to ffmpeg-devel-leave@ffmpeg.org
^ permalink raw reply [flat|nested] 6+ messages in thread
* [FFmpeg-devel] Re: [PATCH 1/3] avfilter/dnn_backend_torch: implement async execution
2025-12-25 10:33 [FFmpeg-devel] [PATCH 1/3] avfilter/dnn_backend_torch: implement async execution Raja Rathour via ffmpeg-devel
@ 2025-12-31 6:41 ` Guo, Yejun via ffmpeg-devel
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 0/3] avfilter/dnn: add async support for LibTorch backend Raja Rathour via ffmpeg-devel
0 siblings, 1 reply; 6+ messages in thread
From: Guo, Yejun via ffmpeg-devel @ 2025-12-31 6:41 UTC (permalink / raw)
To: FFmpeg development discussions and patches; +Cc: Raja Rathour, Guo, Yejun
> -----Original Message-----
> From: Raja Rathour via ffmpeg-devel <ffmpeg-devel@ffmpeg.org>
> Sent: Thursday, December 25, 2025 6:33 PM
> To: ffmpeg-devel@ffmpeg.org
> Cc: Raja Rathour <imraja729@gmail.com>
> Subject: [FFmpeg-devel] [PATCH 1/3] avfilter/dnn_backend_torch: implement
> async execution
>
>
> Signed-off-by: Raja Rathour <imraja729@gmail.com>
>
> This patch implements asynchronous model execution for the LibTorch
> backend in FFmpeg's DNN module.
>
> Key changes:
> - Integrated a worker thread and a pending queue to handle inference.
> - Prevents the main filter thread from blocking during model execution.
> - Aligns LibTorch backend behavior with the existing OpenVINO async
> implementation.
> - Improves overall throughput for deep learning filters using LibTorch.
>
> The implementation has been tested with various torch models to ensure
> stability and correct frame synchronization.
>
Thanks for the patch, is there PATCH 2/3 and 3/3?
_______________________________________________
ffmpeg-devel mailing list -- ffmpeg-devel@ffmpeg.org
To unsubscribe send an email to ffmpeg-devel-leave@ffmpeg.org
^ permalink raw reply [flat|nested] 6+ messages in thread
* [FFmpeg-devel] [PATCH v2 0/3] avfilter/dnn: add async support for LibTorch backend
2025-12-31 6:41 ` [FFmpeg-devel] " Guo, Yejun via ffmpeg-devel
@ 2026-01-01 8:07 ` Raja Rathour via ffmpeg-devel
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 1/3] avfilter/dnn_backend_torch: add async infrastructure Raja Rathour via ffmpeg-devel
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Raja Rathour via ffmpeg-devel @ 2026-01-01 8:07 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: guoyejun, Raja Rathour
This series implements asynchronous model execution for the LibTorch backend.
It introduces a dedicated worker thread and a task queue to prevent the main
filter thread from blocking during inference, aligning the Torch backend
functionality with the OpenVINO backend.
v2 changes:
- Split the implementation into 3 patches for better reviewability.
- Added proper thread joining and NULL pointer safety in dnn_free_model_th.
- Cleaned up comments to follow FFmpeg Doxygen standards.
Raja Rathour (3):
avfilter/dnn_backend_torch: add async infrastructure
avfilter/dnn_backend_torch: implement async execution logic
avfilter/dnn_backend_torch: handle lifecycle of worker thread
libavfilter/dnn/dnn_backend_torch.cpp | 128 +++++++++++++++++++++-----
1 file changed, 105 insertions(+), 23 deletions(-)
--
2.48.1
_______________________________________________
ffmpeg-devel mailing list -- ffmpeg-devel@ffmpeg.org
To unsubscribe send an email to ffmpeg-devel-leave@ffmpeg.org
^ permalink raw reply [flat|nested] 6+ messages in thread
* [FFmpeg-devel] [PATCH v2 1/3] avfilter/dnn_backend_torch: add async infrastructure
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 0/3] avfilter/dnn: add async support for LibTorch backend Raja Rathour via ffmpeg-devel
@ 2026-01-01 8:07 ` Raja Rathour via ffmpeg-devel
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 2/3] avfilter/dnn_backend_torch: implement async execution logic Raja Rathour via ffmpeg-devel
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 3/3] avfilter/dnn_backend_torch: handle lifecycle of worker thread Raja Rathour via ffmpeg-devel
2 siblings, 0 replies; 6+ messages in thread
From: Raja Rathour via ffmpeg-devel @ 2026-01-01 8:07 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: guoyejun, Raja Rathour
Add worker thread, mutex, and condition variable to THModel.
---
libavfilter/dnn/dnn_backend_torch.cpp | 31 +++++++++++++++++++++++++++
1 file changed, 31 insertions(+)
diff --git a/libavfilter/dnn/dnn_backend_torch.cpp b/libavfilter/dnn/dnn_backend_torch.cpp
index 2e4326d9d4..af92cd13da 100644
--- a/libavfilter/dnn/dnn_backend_torch.cpp
+++ b/libavfilter/dnn/dnn_backend_torch.cpp
@@ -25,6 +25,10 @@
#include <torch/torch.h>
#include <torch/script.h>
+#include <thread>
+#include <mutex>
+#include <condition_variable>
+#include <atomic>
extern "C" {
#include "dnn_io_proc.h"
@@ -42,6 +46,11 @@ typedef struct THModel {
SafeQueue *request_queue;
Queue *task_queue;
Queue *lltask_queue;
+ SafeQueue *pending_queue; ///< requests waiting for inference
+ std::thread *worker_thread; ///< background worker thread
+ std::mutex *mutex; ///< mutex for the condition variable
+ std::condition_variable *cond; ///< condition variable for worker wakeup
+ std::atomic<bool> worker_stop; ///< signal for thread exit
} THModel;
typedef struct THInferRequest {
@@ -318,6 +327,28 @@ err:
}
}
+static void th_worker_thread(THModel *th_model) {
+ while (true) {
+ THRequestItem *request = NULL;
+ {
+ std::unique_lock<std::mutex> lock(*th_model->mutex);
+ th_model->cond->wait(lock, [&]{
+ return th_model->worker_stop || ff_safe_queue_size(th_model->pending_queue) > 0;
+ });
+
+ if (th_model->worker_stop && ff_safe_queue_size(th_model->pending_queue) == 0)
+ break;
+
+ request = (THRequestItem *)ff_safe_queue_pop_front(th_model->pending_queue);
+ }
+
+ if (request) {
+ th_start_inference(request);
+ infer_completion_callback(request);
+ }
+ }
+}
+
static int execute_model_th(THRequestItem *request, Queue *lltask_queue)
{
THModel *th_model = NULL;
--
2.48.1
_______________________________________________
ffmpeg-devel mailing list -- ffmpeg-devel@ffmpeg.org
To unsubscribe send an email to ffmpeg-devel-leave@ffmpeg.org
^ permalink raw reply [flat|nested] 6+ messages in thread
* [FFmpeg-devel] [PATCH v2 2/3] avfilter/dnn_backend_torch: implement async execution logic
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 0/3] avfilter/dnn: add async support for LibTorch backend Raja Rathour via ffmpeg-devel
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 1/3] avfilter/dnn_backend_torch: add async infrastructure Raja Rathour via ffmpeg-devel
@ 2026-01-01 8:07 ` Raja Rathour via ffmpeg-devel
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 3/3] avfilter/dnn_backend_torch: handle lifecycle of worker thread Raja Rathour via ffmpeg-devel
2 siblings, 0 replies; 6+ messages in thread
From: Raja Rathour via ffmpeg-devel @ 2026-01-01 8:07 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: guoyejun, Raja Rathour
Update execute_model_th to support asynchronous execution by pushing tasks to the pending queue and notifying the worker thread.
Signed-off-by: Raja Rathour <imraja729@gmail.com>
---
libavfilter/dnn/dnn_backend_torch.cpp | 12 +++++-------
1 file changed, 5 insertions(+), 7 deletions(-)
diff --git a/libavfilter/dnn/dnn_backend_torch.cpp b/libavfilter/dnn/dnn_backend_torch.cpp
index af92cd13da..b5ff5b44d1 100644
--- a/libavfilter/dnn/dnn_backend_torch.cpp
+++ b/libavfilter/dnn/dnn_backend_torch.cpp
@@ -375,14 +375,12 @@ static int execute_model_th(THRequestItem *request, Queue *lltask_queue)
goto err;
}
if (task->async) {
- avpriv_report_missing_feature(th_model->ctx, "LibTorch async");
- } else {
- ret = th_start_inference((void *)(request));
- if (ret != 0) {
- goto err;
+ std::lock_guard<std::mutex> lock(*th_model->mutex);
+ if (ff_safe_queue_push_back(th_model->pending_queue, request) < 0) {
+ return AVERROR(ENOMEM);
}
- infer_completion_callback(request);
- return (task->inference_done == task->inference_todo) ? 0 : DNN_GENERIC_ERROR;
+ th_model->cond->notify_one();
+ return 0;
}
err:
--
2.48.1
_______________________________________________
ffmpeg-devel mailing list -- ffmpeg-devel@ffmpeg.org
To unsubscribe send an email to ffmpeg-devel-leave@ffmpeg.org
^ permalink raw reply [flat|nested] 6+ messages in thread
* [FFmpeg-devel] [PATCH v2 3/3] avfilter/dnn_backend_torch: handle lifecycle of worker thread
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 0/3] avfilter/dnn: add async support for LibTorch backend Raja Rathour via ffmpeg-devel
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 1/3] avfilter/dnn_backend_torch: add async infrastructure Raja Rathour via ffmpeg-devel
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 2/3] avfilter/dnn_backend_torch: implement async execution logic Raja Rathour via ffmpeg-devel
@ 2026-01-01 8:07 ` Raja Rathour via ffmpeg-devel
2 siblings, 0 replies; 6+ messages in thread
From: Raja Rathour via ffmpeg-devel @ 2026-01-01 8:07 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: guoyejun, Raja Rathour
Initialize async resources in dnn_load_model_th and ensure proper cleanup and thread joining in dnn_free_model_th.
Signed-off-by: Raja Rathour <imraja729@gmail.com>
---
libavfilter/dnn/dnn_backend_torch.cpp | 85 ++++++++++++++++++++++-----
1 file changed, 69 insertions(+), 16 deletions(-)
diff --git a/libavfilter/dnn/dnn_backend_torch.cpp b/libavfilter/dnn/dnn_backend_torch.cpp
index b5ff5b44d1..f384999152 100644
--- a/libavfilter/dnn/dnn_backend_torch.cpp
+++ b/libavfilter/dnn/dnn_backend_torch.cpp
@@ -127,27 +127,70 @@ static void dnn_free_model_th(DNNModel **model)
if (!model || !*model)
return;
- th_model = (THModel *) (*model);
- while (ff_safe_queue_size(th_model->request_queue) != 0) {
- THRequestItem *item = (THRequestItem *)ff_safe_queue_pop_front(th_model->request_queue);
- destroy_request_item(&item);
+ th_model = (THModel *)(*model);
+
+ /* 1. Stop and join the worker thread if it exists */
+ if (th_model->worker_thread) {
+ {
+ std::lock_guard<std::mutex> lock(*th_model->mutex);
+ th_model->worker_stop = true;
+ }
+ th_model->cond->notify_all();
+ th_model->worker_thread->join();
+ delete th_model->worker_thread;
+ th_model->worker_thread = NULL;
}
- ff_safe_queue_destroy(th_model->request_queue);
- while (ff_queue_size(th_model->lltask_queue) != 0) {
- LastLevelTaskItem *item = (LastLevelTaskItem *)ff_queue_pop_front(th_model->lltask_queue);
- av_freep(&item);
+ /* 2. Safely delete C++ synchronization objects */
+ if (th_model->mutex) {
+ delete th_model->mutex;
+ th_model->mutex = NULL;
+ }
+ if (th_model->cond) {
+ delete th_model->cond;
+ th_model->cond = NULL;
}
- ff_queue_destroy(th_model->lltask_queue);
- while (ff_queue_size(th_model->task_queue) != 0) {
- TaskItem *item = (TaskItem *)ff_queue_pop_front(th_model->task_queue);
- av_frame_free(&item->in_frame);
- av_frame_free(&item->out_frame);
- av_freep(&item);
+ /* 3. Clean up the pending queue */
+ if (th_model->pending_queue) {
+ while (ff_safe_queue_size(th_model->pending_queue) > 0) {
+ THRequestItem *item = (THRequestItem *)ff_safe_queue_pop_front(th_model->pending_queue);
+ destroy_request_item(&item);
+ }
+ ff_safe_queue_destroy(th_model->pending_queue);
+ }
+
+ /* 4. Clean up standard backend queues */
+ if (th_model->request_queue) {
+ while (ff_safe_queue_size(th_model->request_queue) != 0) {
+ THRequestItem *item = (THRequestItem *)ff_safe_queue_pop_front(th_model->request_queue);
+ destroy_request_item(&item);
+ }
+ ff_safe_queue_destroy(th_model->request_queue);
}
- ff_queue_destroy(th_model->task_queue);
- delete th_model->jit_model;
+
+ if (th_model->lltask_queue) {
+ while (ff_queue_size(th_model->lltask_queue) != 0) {
+ LastLevelTaskItem *item = (LastLevelTaskItem *)ff_queue_pop_front(th_model->lltask_queue);
+ av_freep(&item);
+ }
+ ff_queue_destroy(th_model->lltask_queue);
+ }
+
+ if (th_model->task_queue) {
+ while (ff_queue_size(th_model->task_queue) != 0) {
+ TaskItem *item = (TaskItem *)ff_queue_pop_front(th_model->task_queue);
+ av_frame_free(&item->in_frame);
+ av_frame_free(&item->out_frame);
+ av_freep(&item);
+ }
+ ff_queue_destroy(th_model->task_queue);
+ }
+
+ /* 5. Final model cleanup */
+ if (th_model->jit_model)
+ delete th_model->jit_model;
+
av_freep(&th_model);
*model = NULL;
}
@@ -513,6 +556,16 @@ static DNNModel *dnn_load_model_th(DnnContext *ctx, DNNFunctionType func_type, A
goto fail;
}
+ th_model->pending_queue = ff_safe_queue_create();
+ if (!th_model->pending_queue) {
+ goto fail;
+ }
+
+ th_model->mutex = new std::mutex();
+ th_model->cond = new std::condition_variable();
+ th_model->worker_stop = false;
+ th_model->worker_thread = new std::thread(th_worker_thread, th_model);
+
model->get_input = &get_input_th;
model->get_output = &get_output_th;
model->filter_ctx = filter_ctx;
--
2.48.1
_______________________________________________
ffmpeg-devel mailing list -- ffmpeg-devel@ffmpeg.org
To unsubscribe send an email to ffmpeg-devel-leave@ffmpeg.org
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2026-01-01 8:11 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-12-25 10:33 [FFmpeg-devel] [PATCH 1/3] avfilter/dnn_backend_torch: implement async execution Raja Rathour via ffmpeg-devel
2025-12-31 6:41 ` [FFmpeg-devel] " Guo, Yejun via ffmpeg-devel
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 0/3] avfilter/dnn: add async support for LibTorch backend Raja Rathour via ffmpeg-devel
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 1/3] avfilter/dnn_backend_torch: add async infrastructure Raja Rathour via ffmpeg-devel
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 2/3] avfilter/dnn_backend_torch: implement async execution logic Raja Rathour via ffmpeg-devel
2026-01-01 8:07 ` [FFmpeg-devel] [PATCH v2 3/3] avfilter/dnn_backend_torch: handle lifecycle of worker thread Raja Rathour via ffmpeg-devel
Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
This inbox may be cloned and mirrored by anyone:
git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git
# If you have public-inbox 1.1+ installed, you may
# initialize and index your mirror using the following commands:
public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
ffmpegdev@gitmailbox.com
public-inbox-index ffmpegdev
Example config snippet for mirrors.
AGPL code for this site: git clone https://public-inbox.org/public-inbox.git