From: cenzhanquan2@gmail.com To: ffmpeg-devel@ffmpeg.org Cc: zhanquan cen <cenzhanquan2@gmail.com> Subject: [FFmpeg-devel] [PATCH 1/1] libavfilter/volume: do fade when adjusting the volume. Date: Wed, 23 Apr 2025 22:17:20 +0800 Message-ID: <20250423141720.4033649-1-cenzhanquan2@gmail.com> (raw) From: zhanquan cen <cenzhanquan2@gmail.com> 1. add simple fade when volume. 2. do fade when adjust volume to maximal. when to adjust volume we hope that the audio sample is smooth we need to calculate the gradient step between each sample based on the total change of the gradient (i.e. dst_volume - src_volume) and the number of samples (nb_samples) and apply it to the target sample. Signed-off-by: zhanquan cen <cenzhanquan2@gmail.com> --- libavfilter/af_volume.c | 125 ++++++++++++++++++++++++++++++++++++---- libavfilter/af_volume.h | 30 ++++++++++ 2 files changed, 144 insertions(+), 11 deletions(-) diff --git a/libavfilter/af_volume.c b/libavfilter/af_volume.c index 471bffeceb..283865f810 100644 --- a/libavfilter/af_volume.c +++ b/libavfilter/af_volume.c @@ -162,6 +162,22 @@ static int query_formats(const AVFilterContext *ctx, return 0; } +static inline void fade_samples_u8(uint8_t *dst, const uint8_t *src, + int nb_samples, int chs, int dst_volume, int src_volume) +{ + int i, j, k = 0; + int64_t sample; + int64_t step; + + step = (((int64_t)dst_volume - src_volume) << 8) / nb_samples; + for (i = 0; i < nb_samples; i++) { + for (j = 0; j < chs; j++, k++) { + sample = (int64_t)(src[k] - 128) * (src_volume + (step * i) >> 8) + 128; + dst[k] = av_clip_uint8((sample >> 8) + 128); + } + } +} + static inline void scale_samples_u8(uint8_t *dst, const uint8_t *src, int nb_samples, int volume) { @@ -170,6 +186,22 @@ static inline void scale_samples_u8(uint8_t *dst, const uint8_t *src, dst[i] = av_clip_uint8(((((int64_t)src[i] - 128) * volume + 128) >> 8) + 128); } +static inline void fade_samples_u8_small(uint8_t *dst, const uint8_t *src, + int nb_samples, int chs, int dst_volume, int src_volume) +{ + int i, j, k = 0; + int sample; + int step; + + step = ((dst_volume - src_volume) << 8) / nb_samples; + for (i = 0; i < nb_samples; i++) { + for (j = 0; j < chs; j++, k++) { + sample = (src[k] - 128) * (src_volume + (step * i >> 8)) + 128; + dst[k] = av_clip_uint8((sample >> 8) + 128); + } + } +} + static inline void scale_samples_u8_small(uint8_t *dst, const uint8_t *src, int nb_samples, int volume) { @@ -178,6 +210,22 @@ static inline void scale_samples_u8_small(uint8_t *dst, const uint8_t *src, dst[i] = av_clip_uint8((((src[i] - 128) * volume + 128) >> 8) + 128); } +static inline void fade_samples_s16(uint8_t *dst, const uint8_t *src, + int nb_samples, int chs, int dst_volume, int src_volume) +{ + const int16_t *smp_src = (const int16_t *)src; + int16_t *smp_dst = (int16_t *)dst; + int i, j, k = 0; + int64_t step; + + step = (((int64_t)dst_volume - src_volume) << 8) / nb_samples; + for (i = 0; i < nb_samples; i++) { + for (j = 0; j < chs; j++, k++) { + smp_dst[k] = av_clip_int16((int64_t)(smp_src[k] * (src_volume + (step * i >> 8)) + 128) >> 8); + } + } +} + static inline void scale_samples_s16(uint8_t *dst, const uint8_t *src, int nb_samples, int volume) { @@ -188,6 +236,22 @@ static inline void scale_samples_s16(uint8_t *dst, const uint8_t *src, smp_dst[i] = av_clip_int16(((int64_t)smp_src[i] * volume + 128) >> 8); } +static inline void fade_samples_s16_small(uint8_t *dst, const uint8_t *src, + int nb_samples, int chs, int dst_volume, int src_volume) +{ + const int16_t *smp_src = (const int16_t *)src; + int16_t *smp_dst = (int16_t *)dst; + int i, j, k = 0; + int step; + + step = ((dst_volume - src_volume) << 8) / nb_samples; + for (i = 0; i < nb_samples; i++) { + for (j = 0; j < chs; j++, k++) { + smp_dst[k] = av_clip_int16((smp_src[k] * (src_volume + (step * i >> 8)) + 128) >> 8); + } + } +} + static inline void scale_samples_s16_small(uint8_t *dst, const uint8_t *src, int nb_samples, int volume) { @@ -198,6 +262,22 @@ static inline void scale_samples_s16_small(uint8_t *dst, const uint8_t *src, smp_dst[i] = av_clip_int16((smp_src[i] * volume + 128) >> 8); } +static inline void fade_samples_s32(uint8_t *dst, const uint8_t *src, + int nb_samples, int chs, int dst_volume, int src_volume) +{ + const int32_t *smp_src = (const int32_t *)src; + int32_t *smp_dst = (int32_t *)dst; + int i, j, k = 0; + int64_t step; + + step = (((int64_t)dst_volume - src_volume) << 8) / nb_samples; + for (i = 0; i < nb_samples; i++) { + for (j = 0; j < chs; j++, k++) { + smp_dst[k] = av_clipl_int32((int64_t)(smp_src[k] * (src_volume + (step * i >> 8)) + 128) >> 8); + } + } +} + static inline void scale_samples_s32(uint8_t *dst, const uint8_t *src, int nb_samples, int volume) { @@ -214,19 +294,26 @@ static av_cold void volume_init(VolumeContext *vol) switch (av_get_packed_sample_fmt(vol->sample_fmt)) { case AV_SAMPLE_FMT_U8: - if (vol->volume_i < 0x1000000) + if (vol->volume_i < 0x1000000) { vol->scale_samples = scale_samples_u8_small; - else + vol->fade_samples = fade_samples_u8_small; + } else { vol->scale_samples = scale_samples_u8; + vol->fade_samples = fade_samples_u8; + } break; case AV_SAMPLE_FMT_S16: - if (vol->volume_i < 0x10000) + if (vol->volume_i < 0x10000) { vol->scale_samples = scale_samples_s16_small; - else + vol->fade_samples = fade_samples_s16_small; + } else { vol->scale_samples = scale_samples_s16; + vol->fade_samples = fade_samples_s16; + } break; case AV_SAMPLE_FMT_S32: vol->scale_samples = scale_samples_s32; + vol->fade_samples = fade_samples_s32; break; case AV_SAMPLE_FMT_FLT: vol->samples_align = 4; @@ -313,8 +400,11 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar if (!strcmp(cmd, "volume")) { if ((ret = set_expr(&vol->volume_pexpr, args, ctx)) < 0) return ret; - if (vol->eval_mode == EVAL_MODE_ONCE) + if (vol->eval_mode == EVAL_MODE_ONCE) { + vol->volume_isrc = vol->volume_i; set_volume(ctx); + vol->voluming = true; + } } return ret; @@ -329,6 +419,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf) int nb_samples = buf->nb_samples; AVFrame *out_buf; AVFrameSideData *sd = av_frame_get_side_data(buf, AV_FRAME_DATA_REPLAYGAIN); + bool planar; int ret; if (sd && vol->replaygain != REPLAYGAIN_IGNORE) { @@ -380,7 +471,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf) if (vol->eval_mode == EVAL_MODE_FRAME) set_volume(ctx); - if (vol->volume == 1.0 || vol->volume_i == 256) { + if ((vol->volume == 1.0 || vol->volume_i == 256) && !vol->voluming) { out_buf = buf; goto end; } @@ -406,16 +497,28 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *buf) if (vol->precision != PRECISION_FIXED || vol->volume_i > 0) { int p, plane_samples; - if (av_sample_fmt_is_planar(buf->format)) + planar = av_sample_fmt_is_planar(buf->format); + if (planar) plane_samples = FFALIGN(nb_samples, vol->samples_align); else plane_samples = FFALIGN(nb_samples * vol->channels, vol->samples_align); if (vol->precision == PRECISION_FIXED) { - for (p = 0; p < vol->planes; p++) { - vol->scale_samples(out_buf->extended_data[p], - buf->extended_data[p], plane_samples, - vol->volume_i); + if (vol->voluming && vol->fade_samples) { + for (p = 0; p < vol->planes; p++) { + vol->fade_samples(out_buf->extended_data[p], buf->extended_data[p], + nb_samples, planar ? 1 : vol->channels, + vol->volume_i, vol->volume_isrc); + } + + vol->voluming = false; + } else { + for (p = 0; p < vol->planes; p++) { + vol->scale_samples(out_buf->extended_data[p], + buf->extended_data[p], plane_samples, + vol->volume_i); + + } } } else if (av_get_packed_sample_fmt(vol->sample_fmt) == AV_SAMPLE_FMT_FLT) { for (p = 0; p < vol->planes; p++) { diff --git a/libavfilter/af_volume.h b/libavfilter/af_volume.h index e9527eea8a..a610422a43 100644 --- a/libavfilter/af_volume.h +++ b/libavfilter/af_volume.h @@ -25,6 +25,7 @@ #define AVFILTER_VOLUME_H #include <stdint.h> +#include <stdbool.h> #include "libavutil/eval.h" #include "libavutil/float_dsp.h" #include "libavutil/log.h" @@ -84,7 +85,36 @@ typedef struct VolumeContext { void (*scale_samples)(uint8_t *dst, const uint8_t *src, int nb_samples, int volume); + + /** + * @brief Function pointer for fading samples. + * + * @param dst Destination buffer for faded samples. + * @param src Source buffer for original samples. + * @param nb_samples Number of samples to fade. + * @param chs Number of channels in the audio samples. + * @param dst_volume Destination volume level for fading. + * @param src_volume Source volume level for fading. + */ + void (*fade_samples)(uint8_t *dst, const uint8_t *src, int nb_samples, int chs, + int dst_volume, int src_volume); + int samples_align; + + /** + * @brief Flag indicating whether fading is in progress. + * + * If this flag is set to true, it means that the audio samples are currently being faded. + */ + bool voluming; + + /** + * @brief Source volume level for fading. + * + * This variable stores the backup volume level for fading. It is used to calculate the + * step size when adjusting the volume. + */ + int volume_isrc; } VolumeContext; void ff_volume_init_x86(VolumeContext *vol); -- 2.34.1 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
next reply other threads:[~2025-04-23 14:17 UTC|newest] Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top 2025-04-23 14:17 cenzhanquan2 [this message] 2025-04-23 20:35 ` Marton Balint 2025-04-24 4:10 ` Gyan Doshi 2025-04-25 19:23 ` Marton Balint 2025-04-26 4:52 ` Gyan Doshi
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20250423141720.4033649-1-cenzhanquan2@gmail.com \ --to=cenzhanquan2@gmail.com \ --cc=ffmpeg-devel@ffmpeg.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel This inbox may be cloned and mirrored by anyone: git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \ ffmpegdev@gitmailbox.com public-inbox-index ffmpegdev Example config snippet for mirrors. AGPL code for this site: git clone https://public-inbox.org/public-inbox.git