From: <m.kaindl0208@gmail.com> To: <ffmpeg-devel@ffmpeg.org> Subject: [FFmpeg-devel] [PATCH FFmpeg 5/15] libavfilter: filter common introduce interfaces for CLIP/CLAP Classification and model loading with tokenizer Date: Sat, 8 Mar 2025 15:59:25 +0100 Message-ID: <007701db903a$af836b50$0e8a41f0$@gmail.com> (raw) Extends the DNN filter common code to support CLIP/CLAP classification and model loading with tokenizers. Adds new execution functions for both image and audio classification. Try the new filters using my Github Repo https://github.com/MaximilianKaindl/DeepFFMPEGVideoClassification. Any Feedback is appreciated! Signed-off-by: MaximilianKaindl <m.kaindl0208@gmail.com> --- libavfilter/dnn_filter_common.c | 78 ++++++++++++++++++++++++++++++--- libavfilter/dnn_filter_common.h | 4 ++ 2 files changed, 77 insertions(+), 5 deletions(-) diff --git a/libavfilter/dnn_filter_common.c b/libavfilter/dnn_filter_common.c index 6a1e9ace2e..d9b2cc9bcd 100644 --- a/libavfilter/dnn_filter_common.c +++ b/libavfilter/dnn_filter_common.c @@ -75,7 +75,7 @@ void *ff_dnn_filter_child_next(void *obj, void *prev) return ff_dnn_child_next(&base->dnnctx, prev); } -int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx) +int ff_dnn_init_priv(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx) { DNNBackendType backend = ctx->backend_type; @@ -87,10 +87,19 @@ int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *fil if (backend == DNN_TH) { if (ctx->model_inputname) av_log(filter_ctx, AV_LOG_WARNING, "LibTorch backend do not require inputname, "\ - "inputname will be ignored.\n"); + "inputname will be ignored.\n"); if (ctx->model_outputnames) av_log(filter_ctx, AV_LOG_WARNING, "LibTorch backend do not require outputname(s), "\ "all outputname(s) will be ignored.\n"); + +#if (CONFIG_LIBTOKENIZERS == 0) + if ((func_type == DFT_ANALYTICS_CLIP || func_type == DFT_ANALYTICS_CLAP)) { + av_log(ctx, AV_LOG_ERROR, + "tokenizers-cpp is not included. CLIP/CLAP Classification requires tokenizers-cpp library. Include " + "it with configure.\n"); + return AVERROR(EINVAL); + } +#endif ctx->nb_outputs = 1; } else if (backend == DNN_TF) { if (!ctx->model_inputname) { @@ -118,26 +127,50 @@ int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *fil void *child = NULL; av_log(filter_ctx, AV_LOG_WARNING, - "backend_configs is deprecated, please set backend options directly\n"); + "backend_configs is deprecated, please set backend options directly\n"); while (child = ff_dnn_child_next(ctx, child)) { if (*(const AVClass **)child == &ctx->dnn_module->clazz) { int ret = av_opt_set_from_string(child, ctx->backend_options, - NULL, "=", "&"); + NULL, "=", "&"); if (ret < 0) { av_log(filter_ctx, AV_LOG_ERROR, "failed to parse options \"%s\"\n", - ctx->backend_options); + ctx->backend_options); return ret; } } } } + return 0; +} +int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx) +{ + int ret = ff_dnn_init_priv(ctx, func_type, filter_ctx); + if (ret < 0) { + return ret; + } ctx->model = (ctx->dnn_module->load_model)(ctx, func_type, filter_ctx); if (!ctx->model) { av_log(filter_ctx, AV_LOG_ERROR, "could not load DNN model\n"); return AVERROR(EINVAL); } + return 0; +} +int ff_dnn_init_with_tokenizer(DnnContext *ctx, DNNFunctionType func_type, char **labels, int label_count, + int *softmax_units, int softmax_units_count, char *tokenizer_path, + AVFilterContext *filter_ctx) +{ + int ret = ff_dnn_init_priv(ctx, func_type, filter_ctx); + if (ret < 0) { + return ret; + } + ctx->model = (ctx->dnn_module->load_model_with_tokenizer)(ctx, func_type, labels, label_count, softmax_units, + softmax_units_count, tokenizer_path, filter_ctx); + if (!ctx->model) { + av_log(filter_ctx, AV_LOG_ERROR, "could not load DNN model\n"); + return AVERROR(EINVAL); + } return 0; } @@ -200,6 +233,41 @@ int ff_dnn_execute_model_classification(DnnContext *ctx, AVFrame *in_frame, AVFr return (ctx->dnn_module->execute_model)(ctx->model, &class_params.base); } +int ff_dnn_execute_model_clip(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame, const char **labels, int label_count, const char* tokenizer_path, char *target) +{ + DNNExecZeroShotClassificationParams class_params = { + { + .input_name = ctx->model_inputname, + .output_names = (const char **)ctx->model_outputnames, + .nb_output = ctx->nb_outputs, + .in_frame = in_frame, + .out_frame = out_frame, + }, + .labels = labels, + .label_count = label_count, + .tokenizer_path = tokenizer_path, + .target = target, + }; + return (ctx->dnn_module->execute_model)(ctx->model, &class_params.base); +} + +int ff_dnn_execute_model_clap(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame, const char **labels, int label_count, const char* tokenizer_path) +{ + DNNExecZeroShotClassificationParams class_params = { + { + .input_name = ctx->model_inputname, + .output_names = (const char **)ctx->model_outputnames, + .nb_output = ctx->nb_outputs, + .in_frame = in_frame, + .out_frame = out_frame, + }, + .labels = labels, + .label_count = label_count, + .tokenizer_path = tokenizer_path, + }; + return (ctx->dnn_module->execute_model)(ctx->model, &class_params.base); +} + DNNAsyncStatusType ff_dnn_get_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame) { return (ctx->dnn_module->get_result)(ctx->model, in_frame, out_frame); diff --git a/libavfilter/dnn_filter_common.h b/libavfilter/dnn_filter_common.h index fffa676a9e..b05acf5d55 100644 --- a/libavfilter/dnn_filter_common.h +++ b/libavfilter/dnn_filter_common.h @@ -54,7 +54,9 @@ void *ff_dnn_filter_child_next(void *obj, void *prev); int ff_dnn_filter_init_child_class(AVFilterContext *filter); +int ff_dnn_init_priv(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx); int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx); +int ff_dnn_init_with_tokenizer(DnnContext *ctx, DNNFunctionType func_type, char** labels, int label_count, int* softmax_units, int softmax_units_count, char* tokenizer_path, AVFilterContext *filter_ctx); int ff_dnn_set_frame_proc(DnnContext *ctx, FramePrePostProc pre_proc, FramePrePostProc post_proc); int ff_dnn_set_detect_post_proc(DnnContext *ctx, DetectPostProc post_proc); int ff_dnn_set_classify_post_proc(DnnContext *ctx, ClassifyPostProc post_proc); @@ -62,6 +64,8 @@ int ff_dnn_get_input(DnnContext *ctx, DNNData *input); int ff_dnn_get_output(DnnContext *ctx, int input_width, int input_height, int *output_width, int *output_height); int ff_dnn_execute_model(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame); int ff_dnn_execute_model_classification(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame, const char *target); +int ff_dnn_execute_model_clip(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame, const char **labels, int label_count, const char* tokenizer_path, char *target); +int ff_dnn_execute_model_clap(DnnContext *ctx, AVFrame *in_frame, AVFrame *out_frame, const char **labels, int label_count, const char* tokenizer_path); DNNAsyncStatusType ff_dnn_get_result(DnnContext *ctx, AVFrame **in_frame, AVFrame **out_frame); int ff_dnn_flush(DnnContext *ctx); void ff_dnn_uninit(DnnContext *ctx); -- 2.34.1 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
reply other threads:[~2025-03-08 14:59 UTC|newest] Thread overview: [no followups] expand[flat|nested] mbox.gz Atom feed
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to='007701db903a$af836b50$0e8a41f0$@gmail.com' \ --to=m.kaindl0208@gmail.com \ --cc=ffmpeg-devel@ffmpeg.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel This inbox may be cloned and mirrored by anyone: git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \ ffmpegdev@gitmailbox.com public-inbox-index ffmpegdev Example config snippet for mirrors. AGPL code for this site: git clone https://public-inbox.org/public-inbox.git