* [FFmpeg-devel] [PR] [WIP] aarch64/h264pred: fix performance regression in NEON optimizations (PR #21410)
@ 2026-01-08 11:32 Zhao Zhili via ffmpeg-devel
0 siblings, 0 replies; only message in thread
From: Zhao Zhili via ffmpeg-devel @ 2026-01-08 11:32 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: Zhao Zhili
PR #21410 opened by Zhao Zhili (quink)
URL: https://code.ffmpeg.org/FFmpeg/FFmpeg/pulls/21410
Patch URL: https://code.ffmpeg.org/FFmpeg/FFmpeg/pulls/21410.patch
The NEON implementation of pred8x8_dc was slower than the C version
on A76, A510, A715, X3, and Apple M4, only A55 has a tiny gain.
A55 A76 A510 A715 X3 M4
pred8x8_dc_128_8_neon: 1.04x 0.58x 0.36x 0.44x 0.38x 0.20x
The initial NEON implementation used individual lane loads (ld1 v1.b[0],
ld1 v1.b[1], ..., ld1 v1.b[7]) which was already slower than the C version
even without doing any computation, due to high instruction count and poor
instruction-level parallelism.
This patch rewrite in scalar ARM64:
A55 A76 A510 A715 X3 M4
pred8x8_dc_8_armv8: 1.08x 1.48x 1.00x 1.14x 1.05x 1.00x
Signed-off-by: Zhao Zhili <zhilizhao@tencent.com>
>From 275ab63c720a15f848f9c0f34fbaef45c60be6ee Mon Sep 17 00:00:00 2001
From: Zhao Zhili <zhilizhao@tencent.com>
Date: Thu, 8 Jan 2026 11:54:55 +0800
Subject: [PATCH] aarch64/h264pred: fix performance regression in NEON
optimizations
The NEON implementation of pred8x8_dc was slower than the C version
on A76, A510, A715, X3, and Apple M4, only A55 has a tiny gain.
A55 A76 A510 A715 X3 M4
pred8x8_dc_128_8_neon: 1.04x 0.58x 0.36x 0.44x 0.38x 0.20x
The initial NEON implementation used individual lane loads (ld1 v1.b[0],
ld1 v1.b[1], ..., ld1 v1.b[7]) which was already slower than the C version
even without doing any computation, due to high instruction count and poor
instruction-level parallelism.
This patch rewrite in scalar ARM64:
A55 A76 A510 A715 X3 M4
pred8x8_dc_8_armv8: 1.08x 1.48x 1.00x 1.14x 1.05x 1.00x
Signed-off-by: Zhao Zhili <zhilizhao@tencent.com>
---
libavcodec/aarch64/Makefile | 1 +
libavcodec/aarch64/h264pred.S | 104 +++++++++++++++++++++++++++++
libavcodec/aarch64/h264pred_init.c | 19 +++++-
libavcodec/aarch64/h264pred_neon.S | 22 ------
4 files changed, 122 insertions(+), 24 deletions(-)
create mode 100644 libavcodec/aarch64/h264pred.S
diff --git a/libavcodec/aarch64/Makefile b/libavcodec/aarch64/Makefile
index 2bf48dfa28..6bbc6f2d17 100644
--- a/libavcodec/aarch64/Makefile
+++ b/libavcodec/aarch64/Makefile
@@ -34,6 +34,7 @@ OBJS-$(CONFIG_VP9_DECODER) += aarch64/vp9dsp_init_10bpp_aarch64.o \
# subsystems
ARMV8-OBJS-$(CONFIG_VIDEODSP) += aarch64/videodsp.o
+ARMV8-OBJS-$(CONFIG_H264PRED) += aarch64/h264pred.o
# NEON optimizations
diff --git a/libavcodec/aarch64/h264pred.S b/libavcodec/aarch64/h264pred.S
new file mode 100644
index 0000000000..c6bc95ae19
--- /dev/null
+++ b/libavcodec/aarch64/h264pred.S
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2026 Zhao Zhili <quinkblack@foxmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+function ff_pred8x8_dc_aarch64, export=1
+ sub x9, x0, x1 // top pointer
+ lsl x11, x1, #2 // stride * 4
+ add x14, x0, x1 // src + stride
+ add x12, x0, x11 // src + stride*4
+
+ ldurb w8, [x0, #-1] // left[0]
+ add x11, x14, x11 // src + stride*5
+ ldrb w7, [x9] // top[0]
+ lsl x16, x1, #1 // stride * 2
+ ldrb w10, [x9, #4] // top[4], dc1 init
+ ldurb w13, [x12, #-1] // left[4], dc2 init
+
+ add x17, x0, x16 // src + stride*2
+ add x16, x14, x16 // src + stride*3
+
+ add w8, w8, w7 // dc0 = left[0] + top[0]
+
+ ldurb w15, [x14, #-1] // left[1]
+ ldrb w2, [x9, #1] // top[1]
+ ldrb w4, [x9, #5] // top[5]
+ add w15, w15, w2
+ add w10, w10, w4 // dc1 += top[5]
+ add w8, w8, w15 // dc0 += left[1] + top[1]
+
+ ldurb w15, [x11, #-1] // left[5]
+ ldurb w2, [x17, #-1] // left[2]
+ ldrb w4, [x9, #2] // top[2]
+ ldrb w5, [x9, #6] // top[6]
+ add w2, w2, w4
+ add w8, w8, w2 // dc0 += left[2] + top[2]
+
+ mov w3, #6
+ madd x3, x1, x3, x0 // src + stride*6
+ ldurb w6, [x3, #-1] // left[6]
+
+ ldurb w2, [x16, #-1] // left[3]
+ ldrb w4, [x9, #3] // top[3]
+ ldrb w7, [x9, #7] // top[7]
+ add w2, w2, w4
+ add w8, w8, w2 // dc0 += left[3] + top[3]
+
+ add w5, w5, w7
+ add w10, w10, w5 // dc1 += top[6] + top[7]
+
+ add x9, x9, x1, lsl #3 // src + stride*7
+ ldurb w1, [x9, #-1] // left[7]
+
+ add w13, w13, w15 // dc2 += left[5]
+ add w13, w13, w1 // dc2 += left[7]
+ add w13, w13, w6 // dc2 += left[6]
+
+ add w8, w8, #4
+ add w2, w13, w10 // dc3_sum = dc2_sum + dc1_sum
+ lsr w8, w8, #3 // dc0 >> 3
+
+ add w1, w10, #2 // dc1_sum + 2
+ add w2, w2, #4 // dc3_sum + 4
+ mov w15, #0x01010101
+ lsr w1, w1, #2 // dc1 >> 2
+
+ mul w8, w8, w15 // dc0 splat
+ lsr w2, w2, #3 // dc3 >> 3
+ mul w1, w1, w15 // dc1 splat
+
+ add w13, w13, #2 // dc2_sum + 2
+ mul w10, w2, w15 // dc3 splat
+
+ stp w8, w1, [x0]
+ lsr w13, w13, #2 // dc2 >> 2
+ stp w8, w1, [x14]
+ mul w13, w13, w15 // dc2 splat
+ stp w8, w1, [x17]
+ stp w8, w1, [x16]
+ stp w13, w10, [x12]
+ stp w13, w10, [x11]
+ stp w13, w10, [x3]
+ stp w13, w10, [x9]
+
+ ret
+endfunc
+
diff --git a/libavcodec/aarch64/h264pred_init.c b/libavcodec/aarch64/h264pred_init.c
index 0ae8f70d23..ec6f6d9ee7 100644
--- a/libavcodec/aarch64/h264pred_init.c
+++ b/libavcodec/aarch64/h264pred_init.c
@@ -36,7 +36,7 @@ void ff_pred16x16_top_dc_neon(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_vert_neon(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_hor_neon(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_plane_neon(uint8_t *src, ptrdiff_t stride);
-void ff_pred8x8_dc_neon(uint8_t *src, ptrdiff_t stride);
+void ff_pred8x8_dc_aarch64(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_128_dc_neon(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_left_dc_neon(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_top_dc_neon(uint8_t *src, ptrdiff_t stride);
@@ -63,6 +63,20 @@ void ff_pred8x8_0lt_dc_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_l00_dc_neon_10(uint8_t *src, ptrdiff_t stride);
void ff_pred8x8_0l0_dc_neon_10(uint8_t *src, ptrdiff_t stride);
+static av_cold void h264_pred_init_armv8(H264PredContext *h, int codec_id,
+ const int bit_depth,
+ const int chroma_format_idc)
+{
+ if (bit_depth == 8) {
+ if (chroma_format_idc <= 1) {
+ if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 &&
+ codec_id != AV_CODEC_ID_VP8) {
+ h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_aarch64;
+ }
+ }
+ }
+}
+
static av_cold void h264_pred_init_neon(H264PredContext *h, int codec_id,
const int bit_depth,
const int chroma_format_idc)
@@ -76,7 +90,6 @@ static av_cold void h264_pred_init_neon(H264PredContext *h, int codec_id,
h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon;
if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 &&
codec_id != AV_CODEC_ID_VP8) {
- h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon;
h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon;
h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon;
h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] = ff_pred8x8_l0t_dc_neon;
@@ -130,6 +143,8 @@ av_cold void ff_h264_pred_init_aarch64(H264PredContext *h, int codec_id,
{
int cpu_flags = av_get_cpu_flags();
+ if (have_armv8(cpu_flags))
+ h264_pred_init_armv8(h, codec_id, bit_depth, chroma_format_idc);
if (have_neon(cpu_flags))
h264_pred_init_neon(h, codec_id, bit_depth, chroma_format_idc);
}
diff --git a/libavcodec/aarch64/h264pred_neon.S b/libavcodec/aarch64/h264pred_neon.S
index 795d2ce540..aad1eca5b8 100644
--- a/libavcodec/aarch64/h264pred_neon.S
+++ b/libavcodec/aarch64/h264pred_neon.S
@@ -265,28 +265,6 @@ function ff_pred8x8_left_dc_neon, export=1
rshrn v2.8b, v0.8h, #2
dup v1.8b, v2.b[1]
dup v0.8b, v2.b[0]
- b .L_pred8x8_dc_end
-endfunc
-
-function ff_pred8x8_dc_neon, export=1
- sub x2, x0, x1
- sub x3, x0, #1
- ld1 {v0.8b}, [x2]
- ldcol.8 v1, x3, x1
- uaddlp v0.4h, v0.8b
- uaddlp v1.4h, v1.8b
- trn1 v2.2s, v0.2s, v1.2s
- trn2 v3.2s, v0.2s, v1.2s
- addp v4.4h, v2.4h, v3.4h
- addp v5.4h, v4.4h, v4.4h
- rshrn v6.8b, v5.8h, #3
- rshrn v7.8b, v4.8h, #2
- dup v0.8b, v6.b[0]
- dup v2.8b, v7.b[2]
- dup v1.8b, v7.b[3]
- dup v3.8b, v6.b[1]
- zip1 v0.2s, v0.2s, v2.2s
- zip1 v1.2s, v1.2s, v3.2s
.L_pred8x8_dc_end:
mov w3, #4
add x2, x0, x1, lsl #2
--
2.49.1
_______________________________________________
ffmpeg-devel mailing list -- ffmpeg-devel@ffmpeg.org
To unsubscribe send an email to ffmpeg-devel-leave@ffmpeg.org
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2026-01-08 11:33 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2026-01-08 11:32 [FFmpeg-devel] [PR] [WIP] aarch64/h264pred: fix performance regression in NEON optimizations (PR #21410) Zhao Zhili via ffmpeg-devel
Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
This inbox may be cloned and mirrored by anyone:
git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git
# If you have public-inbox 1.1+ installed, you may
# initialize and index your mirror using the following commands:
public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
ffmpegdev@gitmailbox.com
public-inbox-index ffmpegdev
Example config snippet for mirrors.
AGPL code for this site: git clone https://public-inbox.org/public-inbox.git