* [FFmpeg-devel] [PATCH 4/6] lavc/aarch64: new optimization for 8-bit hevc_epel_bi_hv
@ 2023-11-18 2:06 Logan.Lyu
2023-12-01 18:16 ` Martin Storsjö
0 siblings, 1 reply; 2+ messages in thread
From: Logan.Lyu @ 2023-11-18 2:06 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: jdek
[-- Attachment #1: Type: text/plain, Size: 16340 bytes --]
put_hevc_epel_bi_hv4_8_c: 242.9
put_hevc_epel_bi_hv4_8_i8mm: 68.6
put_hevc_epel_bi_hv6_8_c: 402.4
put_hevc_epel_bi_hv6_8_i8mm: 135.9
put_hevc_epel_bi_hv8_8_c: 636.4
put_hevc_epel_bi_hv8_8_i8mm: 145.6
put_hevc_epel_bi_hv12_8_c: 1363.1
put_hevc_epel_bi_hv12_8_i8mm: 324.1
put_hevc_epel_bi_hv16_8_c: 2222.1
put_hevc_epel_bi_hv16_8_i8mm: 509.1
put_hevc_epel_bi_hv24_8_c: 4793.4
put_hevc_epel_bi_hv24_8_i8mm: 1091.9
put_hevc_epel_bi_hv32_8_c: 8393.9
put_hevc_epel_bi_hv32_8_i8mm: 1720.6
put_hevc_epel_bi_hv48_8_c: 19526.6
put_hevc_epel_bi_hv48_8_i8mm: 4285.9
put_hevc_epel_bi_hv64_8_c: 33915.4
put_hevc_epel_bi_hv64_8_i8mm: 6783.6
Co-Authored-By: J. Dekker <jdek@itanimul.li>
Signed-off-by: Logan Lyu <Logan.Lyu@myais.com.cn>
---
libavcodec/aarch64/hevcdsp_epel_neon.S | 331 ++++++++++++++++++++++
libavcodec/aarch64/hevcdsp_init_aarch64.c | 5 +
2 files changed, 336 insertions(+)
diff --git a/libavcodec/aarch64/hevcdsp_epel_neon.S
b/libavcodec/aarch64/hevcdsp_epel_neon.S
index 54e55cd508..b60091bf23 100644
--- a/libavcodec/aarch64/hevcdsp_epel_neon.S
+++ b/libavcodec/aarch64/hevcdsp_epel_neon.S
@@ -3212,6 +3212,337 @@ function
ff_hevc_put_hevc_epel_uni_w_hv64_8_neon_i8mm, export=1
ret
endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv4_8_neon_i8mm, export=1
+ add w10, w5, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3
+ mov x2, x3
+ add w3, w5, #3
+ mov x4, x6
+ mov x5, x7
+ bl X(ff_hevc_put_hevc_epel_h4_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ load_epel_filterh x7, x6
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.4h}, [sp], x10
+ ld1 {v17.4h}, [sp], x10
+ ld1 {v18.4h}, [sp], x10
+.macro calc src0, src1, src2, src3
+ ld1 {\src3\().4h}, [sp], x10
+ calc_epelh v4, \src0, \src1, \src2, \src3
+ ld1 {v6.4h}, [x4], x10
+ sqadd v4.4h, v4.4h, v6.4h
+ sqrshrun v4.8b, v4.8h, #7
+ subs w5, w5, #1
+ st1 {v4.s}[0], [x0], x1
+.endm
+1: calc_all4
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv6_8_neon_i8mm, export=1
+ add w10, w5, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3
+ mov x2, x3
+ add w3, w5, #3
+ mov x4, x6
+ mov x5, x7
+ bl X(ff_hevc_put_hevc_epel_h6_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ load_epel_filterh x7, x6
+ sub x1, x1, #4
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h}, [sp], x10
+ ld1 {v17.8h}, [sp], x10
+ ld1 {v18.8h}, [sp], x10
+.macro calc src0, src1, src2, src3
+ ld1 {\src3\().8h}, [sp], x10
+ calc_epelh v4, \src0, \src1, \src2, \src3
+ calc_epelh2 v4, v5, \src0, \src1, \src2, \src3
+ ld1 {v6.8h}, [x4], x10
+ sqadd v4.8h, v4.8h, v6.8h
+ sqrshrun v4.8b, v4.8h, #7
+ st1 {v4.s}[0], [x0], #4
+ subs w5, w5, #1
+ st1 {v4.h}[2], [x0], x1
+.endm
+1: calc_all4
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv8_8_neon_i8mm, export=1
+ add w10, w5, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3
+ mov x2, x3
+ add w3, w5, #3
+ mov x4, x6
+ mov x5, x7
+ bl X(ff_hevc_put_hevc_epel_h8_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ load_epel_filterh x7, x6
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h}, [sp], x10
+ ld1 {v17.8h}, [sp], x10
+ ld1 {v18.8h}, [sp], x10
+.macro calc src0, src1, src2, src3
+ ld1 {\src3\().8h}, [sp], x10
+ calc_epelh v4, \src0, \src1, \src2, \src3
+ calc_epelh2 v4, v5, \src0, \src1, \src2, \src3
+ ld1 {v6.8h}, [x4], x10
+ sqadd v4.8h, v4.8h, v6.8h
+ sqrshrun v4.8b, v4.8h, #7
+ subs w5, w5, #1
+ st1 {v4.8b}, [x0], x1
+.endm
+1: calc_all4
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv12_8_neon_i8mm, export=1
+ add w10, w5, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3
+ mov x2, x3
+ add w3, w5, #3
+ mov x4, x6
+ mov x5, x7
+ bl X(ff_hevc_put_hevc_epel_h12_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ load_epel_filterh x7, x6
+ sub x1, x1, #8
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h, v17.8h}, [sp], x10
+ ld1 {v18.8h, v19.8h}, [sp], x10
+ ld1 {v20.8h, v21.8h}, [sp], x10
+.macro calc src0, src1, src2, src3, src4, src5, src6, src7
+ ld1 {\src6\().8h, \src7\().8h}, [sp], x10
+ calc_epelh v4, \src0, \src2, \src4, \src6
+ calc_epelh2 v4, v5, \src0, \src2, \src4, \src6
+ calc_epelh v5, \src1, \src3, \src5, \src7
+ ld1 {v6.8h, v7.8h}, [x4], x10
+ sqadd v4.8h, v4.8h, v6.8h
+ sqadd v5.8h, v5.8h, v7.8h
+ sqrshrun v4.8b, v4.8h, #7
+ sqrshrun2 v4.16b, v5.8h, #7
+ st1 {v4.8b}, [x0], #8
+ subs w5, w5, #1
+ st1 {v4.s}[2], [x0], x1
+.endm
+1: calc_all8
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv16_8_neon_i8mm, export=1
+ add w10, w5, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3
+ mov x2, x3
+ add w3, w5, #3
+ mov x4, x6
+ mov x5, x7
+ bl X(ff_hevc_put_hevc_epel_h16_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ load_epel_filterh x7, x6
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h, v17.8h}, [sp], x10
+ ld1 {v18.8h, v19.8h}, [sp], x10
+ ld1 {v20.8h, v21.8h}, [sp], x10
+.macro calc src0, src1, src2, src3, src4, src5, src6, src7
+ ld1 {\src6\().8h, \src7\().8h}, [sp], x10
+ calc_epelh v4, \src0, \src2, \src4, \src6
+ calc_epelh2 v4, v5, \src0, \src2, \src4, \src6
+ calc_epelh v5, \src1, \src3, \src5, \src7
+ calc_epelh2 v5, v6, \src1, \src3, \src5, \src7
+ ld1 {v6.8h, v7.8h}, [x4], x10
+ sqadd v4.8h, v4.8h, v6.8h
+ sqadd v5.8h, v5.8h, v7.8h
+ sqrshrun v4.8b, v4.8h, #7
+ sqrshrun2 v4.16b, v5.8h, #7
+ st1 {v4.16b}, [x0], x1
+ subs w5, w5, #1
+.endm
+1: calc_all8
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv24_8_neon_i8mm, export=1
+ add w10, w5, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3
+ mov x2, x3
+ add w3, w5, #3
+ mov x4, x6
+ mov x5, x7
+ bl X(ff_hevc_put_hevc_epel_h24_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ load_epel_filterh x7, x6
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h, v17.8h, v18.8h}, [sp], x10
+ ld1 {v19.8h, v20.8h, v21.8h}, [sp], x10
+ ld1 {v22.8h, v23.8h, v24.8h}, [sp], x10
+.macro calc src0, src1, src2, src3, src4, src5, src6, src7, src8, src9,
src10, src11
+ ld1 {\src9\().8h, \src10\().8h, \src11\().8h}, [sp], x10
+ calc_epelh v1, \src0, \src3, \src6, \src9
+ calc_epelh2 v1, v2, \src0, \src3, \src6, \src9
+ calc_epelh v2, \src1, \src4, \src7, \src10
+ calc_epelh2 v2, v3, \src1, \src4, \src7, \src10
+ calc_epelh v3, \src2, \src5, \src8, \src11
+ calc_epelh2 v3, v4, \src2, \src5, \src8, \src11
+ ld1 {v4.8h, v5.8h, v6.8h}, [x4], x10
+ sqadd v1.8h, v1.8h, v4.8h
+ sqadd v2.8h, v2.8h, v5.8h
+ sqadd v3.8h, v3.8h, v6.8h
+ sqrshrun v1.8b, v1.8h, #7
+ sqrshrun v2.8b, v2.8h, #7
+ sqrshrun v3.8b, v3.8h, #7
+ subs w5, w5, #1
+ st1 {v1.8b, v2.8b, v3.8b}, [x0], x1
+.endm
+1: calc_all12
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv32_8_neon_i8mm, export=1
+ sub sp, sp, #16
+ str d8, [sp]
+ add w10, w5, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3
+ mov x2, x3
+ add w3, w5, #3
+ mov x4, x6
+ mov x5, x7
+ bl X(ff_hevc_put_hevc_epel_h32_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ load_epel_filterh x7, x6
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [sp], x10
+ ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [sp], x10
+ ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [sp], x10
+.macro calc src0, src1, src2, src3, src4, src5, src6, src7, src8, src9,
src10, src11, src12, src13, src14, src15
+ ld1 {\src12\().8h, \src13\().8h, \src14\().8h,
\src15\().8h}, [sp], x10
+ calc_epelh v1, \src0, \src4, \src8, \src12
+ calc_epelh2 v1, v2, \src0, \src4, \src8, \src12
+ calc_epelh v2, \src1, \src5, \src9, \src13
+ calc_epelh2 v2, v3, \src1, \src5, \src9, \src13
+ calc_epelh v3, \src2, \src6, \src10, \src14
+ calc_epelh2 v3, v4, \src2, \src6, \src10, \src14
+ calc_epelh v4, \src3, \src7, \src11, \src15
+ calc_epelh2 v4, v5, \src3, \src7, \src11, \src15
+ ld1 {v5.8h, v6.8h, v7.8h, v8.8h}, [x4], x10
+ sqadd v1.8h, v1.8h, v5.8h
+ sqadd v2.8h, v2.8h, v6.8h
+ sqadd v3.8h, v3.8h, v7.8h
+ sqadd v4.8h, v4.8h, v8.8h
+ sqrshrun v1.8b, v1.8h, #7
+ sqrshrun v2.8b, v2.8h, #7
+ sqrshrun v3.8b, v3.8h, #7
+ sqrshrun v4.8b, v4.8h, #7
+ st1 {v1.8b, v2.8b, v3.8b, v4.8b}, [x0], x1
+ subs w5, w5, #1
+.endm
+1: calc_all16
+.purgem calc
+2: ldr d8, [sp], #16
+ ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv48_8_neon_i8mm, export=1
+ stp x6, x7, [sp, #-80]!
+ stp x4, x5, [sp, #16]
+ stp x2, x3, [sp, #32]
+ stp x0, x1, [sp, #48]
+ str x30, [sp, #64]
+ bl X(ff_hevc_put_hevc_epel_bi_hv24_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x2, x3, [sp, #32]
+ ldp x0, x1, [sp, #48]
+ ldp x6, x7, [sp], #64
+ add x0, x0, #24
+ add x2, x2, #24
+ add x4, x4, #48
+ bl X(ff_hevc_put_hevc_epel_bi_hv24_8_neon_i8mm)
+ ldr x30, [sp], #16
+ ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv64_8_neon_i8mm, export=1
+ stp x6, x7, [sp, #-80]!
+ stp x4, x5, [sp, #16]
+ stp x2, x3, [sp, #32]
+ stp x0, x1, [sp, #48]
+ str x30, [sp, #64]
+ bl X(ff_hevc_put_hevc_epel_bi_hv32_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x2, x3, [sp, #32]
+ ldp x0, x1, [sp, #48]
+ ldp x6, x7, [sp], #64
+ add x0, x0, #32
+ add x2, x2, #32
+ add x4, x4, #64
+ bl X(ff_hevc_put_hevc_epel_bi_hv32_8_neon_i8mm)
+ ldr x30, [sp], #16
+ ret
+endfunc
+
DISABLE_I8MM
#endif
diff --git a/libavcodec/aarch64/hevcdsp_init_aarch64.c
b/libavcodec/aarch64/hevcdsp_init_aarch64.c
index a0f0f072f8..da5e23575d 100644
--- a/libavcodec/aarch64/hevcdsp_init_aarch64.c
+++ b/libavcodec/aarch64/hevcdsp_init_aarch64.c
@@ -168,6 +168,10 @@ NEON8_FNPROTO(epel_bi_v, (uint8_t *dst, ptrdiff_t
dststride,
const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2,
int height, intptr_t mx, intptr_t my, int width),);
+NEON8_FNPROTO(epel_bi_hv, (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2,
+ int height, intptr_t mx, intptr_t my, int width), _i8mm);
+
NEON8_FNPROTO(epel_v, (uint8_t *dst, ptrdiff_t dststride,
const uint8_t *_src, ptrdiff_t _srcstride, const int16_t *src2,
int height, intptr_t mx, intptr_t my, int width),);
@@ -354,6 +358,7 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext
*c, const int bit_depth)
NEON8_FNASSIGN(c->put_hevc_epel, 1, 1, epel_hv, _i8mm);
NEON8_FNASSIGN(c->put_hevc_epel_uni, 1, 1, epel_uni_hv,
_i8mm);
NEON8_FNASSIGN(c->put_hevc_epel_uni_w, 0, 1, epel_uni_w_h
,_i8mm);
+ NEON8_FNASSIGN(c->put_hevc_epel_bi, 1, 1, epel_bi_hv, _i8mm);
NEON8_FNASSIGN(c->put_hevc_qpel, 0, 1, qpel_h, _i8mm);
NEON8_FNASSIGN(c->put_hevc_qpel, 1, 1, qpel_hv, _i8mm);
NEON8_FNASSIGN(c->put_hevc_qpel_uni, 1, 1, qpel_uni_hv,
_i8mm);
--
2.38.0.windows.1
[-- Attachment #2: 0004-lavc-aarch64-new-optimization-for-8-bit-hevc_epel_bi_hv.patch --]
[-- Type: text/plain, Size: 16491 bytes --]
From ee08ecdba909235233cb545153d538009af9184f Mon Sep 17 00:00:00 2001
From: Logan Lyu <Logan.Lyu@myais.com.cn>
Date: Sat, 11 Nov 2023 19:57:40 +0800
Subject: [PATCH 4/6] lavc/aarch64: new optimization for 8-bit hevc_epel_bi_hv
put_hevc_epel_bi_hv4_8_c: 242.9
put_hevc_epel_bi_hv4_8_i8mm: 68.6
put_hevc_epel_bi_hv6_8_c: 402.4
put_hevc_epel_bi_hv6_8_i8mm: 135.9
put_hevc_epel_bi_hv8_8_c: 636.4
put_hevc_epel_bi_hv8_8_i8mm: 145.6
put_hevc_epel_bi_hv12_8_c: 1363.1
put_hevc_epel_bi_hv12_8_i8mm: 324.1
put_hevc_epel_bi_hv16_8_c: 2222.1
put_hevc_epel_bi_hv16_8_i8mm: 509.1
put_hevc_epel_bi_hv24_8_c: 4793.4
put_hevc_epel_bi_hv24_8_i8mm: 1091.9
put_hevc_epel_bi_hv32_8_c: 8393.9
put_hevc_epel_bi_hv32_8_i8mm: 1720.6
put_hevc_epel_bi_hv48_8_c: 19526.6
put_hevc_epel_bi_hv48_8_i8mm: 4285.9
put_hevc_epel_bi_hv64_8_c: 33915.4
put_hevc_epel_bi_hv64_8_i8mm: 6783.6
Co-Authored-By: J. Dekker <jdek@itanimul.li>
---
libavcodec/aarch64/hevcdsp_epel_neon.S | 331 ++++++++++++++++++++++
libavcodec/aarch64/hevcdsp_init_aarch64.c | 5 +
2 files changed, 336 insertions(+)
diff --git a/libavcodec/aarch64/hevcdsp_epel_neon.S b/libavcodec/aarch64/hevcdsp_epel_neon.S
index 54e55cd508..b60091bf23 100644
--- a/libavcodec/aarch64/hevcdsp_epel_neon.S
+++ b/libavcodec/aarch64/hevcdsp_epel_neon.S
@@ -3212,6 +3212,337 @@ function ff_hevc_put_hevc_epel_uni_w_hv64_8_neon_i8mm, export=1
ret
endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv4_8_neon_i8mm, export=1
+ add w10, w5, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3
+ mov x2, x3
+ add w3, w5, #3
+ mov x4, x6
+ mov x5, x7
+ bl X(ff_hevc_put_hevc_epel_h4_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ load_epel_filterh x7, x6
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.4h}, [sp], x10
+ ld1 {v17.4h}, [sp], x10
+ ld1 {v18.4h}, [sp], x10
+.macro calc src0, src1, src2, src3
+ ld1 {\src3\().4h}, [sp], x10
+ calc_epelh v4, \src0, \src1, \src2, \src3
+ ld1 {v6.4h}, [x4], x10
+ sqadd v4.4h, v4.4h, v6.4h
+ sqrshrun v4.8b, v4.8h, #7
+ subs w5, w5, #1
+ st1 {v4.s}[0], [x0], x1
+.endm
+1: calc_all4
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv6_8_neon_i8mm, export=1
+ add w10, w5, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3
+ mov x2, x3
+ add w3, w5, #3
+ mov x4, x6
+ mov x5, x7
+ bl X(ff_hevc_put_hevc_epel_h6_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ load_epel_filterh x7, x6
+ sub x1, x1, #4
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h}, [sp], x10
+ ld1 {v17.8h}, [sp], x10
+ ld1 {v18.8h}, [sp], x10
+.macro calc src0, src1, src2, src3
+ ld1 {\src3\().8h}, [sp], x10
+ calc_epelh v4, \src0, \src1, \src2, \src3
+ calc_epelh2 v4, v5, \src0, \src1, \src2, \src3
+ ld1 {v6.8h}, [x4], x10
+ sqadd v4.8h, v4.8h, v6.8h
+ sqrshrun v4.8b, v4.8h, #7
+ st1 {v4.s}[0], [x0], #4
+ subs w5, w5, #1
+ st1 {v4.h}[2], [x0], x1
+.endm
+1: calc_all4
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv8_8_neon_i8mm, export=1
+ add w10, w5, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3
+ mov x2, x3
+ add w3, w5, #3
+ mov x4, x6
+ mov x5, x7
+ bl X(ff_hevc_put_hevc_epel_h8_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ load_epel_filterh x7, x6
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h}, [sp], x10
+ ld1 {v17.8h}, [sp], x10
+ ld1 {v18.8h}, [sp], x10
+.macro calc src0, src1, src2, src3
+ ld1 {\src3\().8h}, [sp], x10
+ calc_epelh v4, \src0, \src1, \src2, \src3
+ calc_epelh2 v4, v5, \src0, \src1, \src2, \src3
+ ld1 {v6.8h}, [x4], x10
+ sqadd v4.8h, v4.8h, v6.8h
+ sqrshrun v4.8b, v4.8h, #7
+ subs w5, w5, #1
+ st1 {v4.8b}, [x0], x1
+.endm
+1: calc_all4
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv12_8_neon_i8mm, export=1
+ add w10, w5, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3
+ mov x2, x3
+ add w3, w5, #3
+ mov x4, x6
+ mov x5, x7
+ bl X(ff_hevc_put_hevc_epel_h12_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ load_epel_filterh x7, x6
+ sub x1, x1, #8
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h, v17.8h}, [sp], x10
+ ld1 {v18.8h, v19.8h}, [sp], x10
+ ld1 {v20.8h, v21.8h}, [sp], x10
+.macro calc src0, src1, src2, src3, src4, src5, src6, src7
+ ld1 {\src6\().8h, \src7\().8h}, [sp], x10
+ calc_epelh v4, \src0, \src2, \src4, \src6
+ calc_epelh2 v4, v5, \src0, \src2, \src4, \src6
+ calc_epelh v5, \src1, \src3, \src5, \src7
+ ld1 {v6.8h, v7.8h}, [x4], x10
+ sqadd v4.8h, v4.8h, v6.8h
+ sqadd v5.8h, v5.8h, v7.8h
+ sqrshrun v4.8b, v4.8h, #7
+ sqrshrun2 v4.16b, v5.8h, #7
+ st1 {v4.8b}, [x0], #8
+ subs w5, w5, #1
+ st1 {v4.s}[2], [x0], x1
+.endm
+1: calc_all8
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv16_8_neon_i8mm, export=1
+ add w10, w5, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3
+ mov x2, x3
+ add w3, w5, #3
+ mov x4, x6
+ mov x5, x7
+ bl X(ff_hevc_put_hevc_epel_h16_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ load_epel_filterh x7, x6
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h, v17.8h}, [sp], x10
+ ld1 {v18.8h, v19.8h}, [sp], x10
+ ld1 {v20.8h, v21.8h}, [sp], x10
+.macro calc src0, src1, src2, src3, src4, src5, src6, src7
+ ld1 {\src6\().8h, \src7\().8h}, [sp], x10
+ calc_epelh v4, \src0, \src2, \src4, \src6
+ calc_epelh2 v4, v5, \src0, \src2, \src4, \src6
+ calc_epelh v5, \src1, \src3, \src5, \src7
+ calc_epelh2 v5, v6, \src1, \src3, \src5, \src7
+ ld1 {v6.8h, v7.8h}, [x4], x10
+ sqadd v4.8h, v4.8h, v6.8h
+ sqadd v5.8h, v5.8h, v7.8h
+ sqrshrun v4.8b, v4.8h, #7
+ sqrshrun2 v4.16b, v5.8h, #7
+ st1 {v4.16b}, [x0], x1
+ subs w5, w5, #1
+.endm
+1: calc_all8
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv24_8_neon_i8mm, export=1
+ add w10, w5, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3
+ mov x2, x3
+ add w3, w5, #3
+ mov x4, x6
+ mov x5, x7
+ bl X(ff_hevc_put_hevc_epel_h24_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ load_epel_filterh x7, x6
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h, v17.8h, v18.8h}, [sp], x10
+ ld1 {v19.8h, v20.8h, v21.8h}, [sp], x10
+ ld1 {v22.8h, v23.8h, v24.8h}, [sp], x10
+.macro calc src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10, src11
+ ld1 {\src9\().8h, \src10\().8h, \src11\().8h}, [sp], x10
+ calc_epelh v1, \src0, \src3, \src6, \src9
+ calc_epelh2 v1, v2, \src0, \src3, \src6, \src9
+ calc_epelh v2, \src1, \src4, \src7, \src10
+ calc_epelh2 v2, v3, \src1, \src4, \src7, \src10
+ calc_epelh v3, \src2, \src5, \src8, \src11
+ calc_epelh2 v3, v4, \src2, \src5, \src8, \src11
+ ld1 {v4.8h, v5.8h, v6.8h}, [x4], x10
+ sqadd v1.8h, v1.8h, v4.8h
+ sqadd v2.8h, v2.8h, v5.8h
+ sqadd v3.8h, v3.8h, v6.8h
+ sqrshrun v1.8b, v1.8h, #7
+ sqrshrun v2.8b, v2.8h, #7
+ sqrshrun v3.8b, v3.8h, #7
+ subs w5, w5, #1
+ st1 {v1.8b, v2.8b, v3.8b}, [x0], x1
+.endm
+1: calc_all12
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv32_8_neon_i8mm, export=1
+ sub sp, sp, #16
+ str d8, [sp]
+ add w10, w5, #3
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3
+ mov x2, x3
+ add w3, w5, #3
+ mov x4, x6
+ mov x5, x7
+ bl X(ff_hevc_put_hevc_epel_h32_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ load_epel_filterh x7, x6
+ mov x10, #(MAX_PB_SIZE * 2)
+ ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [sp], x10
+ ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [sp], x10
+ ld1 {v24.8h, v25.8h, v26.8h, v27.8h}, [sp], x10
+.macro calc src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10, src11, src12, src13, src14, src15
+ ld1 {\src12\().8h, \src13\().8h, \src14\().8h, \src15\().8h}, [sp], x10
+ calc_epelh v1, \src0, \src4, \src8, \src12
+ calc_epelh2 v1, v2, \src0, \src4, \src8, \src12
+ calc_epelh v2, \src1, \src5, \src9, \src13
+ calc_epelh2 v2, v3, \src1, \src5, \src9, \src13
+ calc_epelh v3, \src2, \src6, \src10, \src14
+ calc_epelh2 v3, v4, \src2, \src6, \src10, \src14
+ calc_epelh v4, \src3, \src7, \src11, \src15
+ calc_epelh2 v4, v5, \src3, \src7, \src11, \src15
+ ld1 {v5.8h, v6.8h, v7.8h, v8.8h}, [x4], x10
+ sqadd v1.8h, v1.8h, v5.8h
+ sqadd v2.8h, v2.8h, v6.8h
+ sqadd v3.8h, v3.8h, v7.8h
+ sqadd v4.8h, v4.8h, v8.8h
+ sqrshrun v1.8b, v1.8h, #7
+ sqrshrun v2.8b, v2.8h, #7
+ sqrshrun v3.8b, v3.8h, #7
+ sqrshrun v4.8b, v4.8h, #7
+ st1 {v1.8b, v2.8b, v3.8b, v4.8b}, [x0], x1
+ subs w5, w5, #1
+.endm
+1: calc_all16
+.purgem calc
+2: ldr d8, [sp], #16
+ ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv48_8_neon_i8mm, export=1
+ stp x6, x7, [sp, #-80]!
+ stp x4, x5, [sp, #16]
+ stp x2, x3, [sp, #32]
+ stp x0, x1, [sp, #48]
+ str x30, [sp, #64]
+ bl X(ff_hevc_put_hevc_epel_bi_hv24_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x2, x3, [sp, #32]
+ ldp x0, x1, [sp, #48]
+ ldp x6, x7, [sp], #64
+ add x0, x0, #24
+ add x2, x2, #24
+ add x4, x4, #48
+ bl X(ff_hevc_put_hevc_epel_bi_hv24_8_neon_i8mm)
+ ldr x30, [sp], #16
+ ret
+endfunc
+
+function ff_hevc_put_hevc_epel_bi_hv64_8_neon_i8mm, export=1
+ stp x6, x7, [sp, #-80]!
+ stp x4, x5, [sp, #16]
+ stp x2, x3, [sp, #32]
+ stp x0, x1, [sp, #48]
+ str x30, [sp, #64]
+ bl X(ff_hevc_put_hevc_epel_bi_hv32_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x2, x3, [sp, #32]
+ ldp x0, x1, [sp, #48]
+ ldp x6, x7, [sp], #64
+ add x0, x0, #32
+ add x2, x2, #32
+ add x4, x4, #64
+ bl X(ff_hevc_put_hevc_epel_bi_hv32_8_neon_i8mm)
+ ldr x30, [sp], #16
+ ret
+endfunc
+
DISABLE_I8MM
#endif
diff --git a/libavcodec/aarch64/hevcdsp_init_aarch64.c b/libavcodec/aarch64/hevcdsp_init_aarch64.c
index a0f0f072f8..da5e23575d 100644
--- a/libavcodec/aarch64/hevcdsp_init_aarch64.c
+++ b/libavcodec/aarch64/hevcdsp_init_aarch64.c
@@ -168,6 +168,10 @@ NEON8_FNPROTO(epel_bi_v, (uint8_t *dst, ptrdiff_t dststride,
const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2,
int height, intptr_t mx, intptr_t my, int width),);
+NEON8_FNPROTO(epel_bi_hv, (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2,
+ int height, intptr_t mx, intptr_t my, int width), _i8mm);
+
NEON8_FNPROTO(epel_v, (uint8_t *dst, ptrdiff_t dststride,
const uint8_t *_src, ptrdiff_t _srcstride, const int16_t *src2,
int height, intptr_t mx, intptr_t my, int width),);
@@ -354,6 +358,7 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth)
NEON8_FNASSIGN(c->put_hevc_epel, 1, 1, epel_hv, _i8mm);
NEON8_FNASSIGN(c->put_hevc_epel_uni, 1, 1, epel_uni_hv, _i8mm);
NEON8_FNASSIGN(c->put_hevc_epel_uni_w, 0, 1, epel_uni_w_h ,_i8mm);
+ NEON8_FNASSIGN(c->put_hevc_epel_bi, 1, 1, epel_bi_hv, _i8mm);
NEON8_FNASSIGN(c->put_hevc_qpel, 0, 1, qpel_h, _i8mm);
NEON8_FNASSIGN(c->put_hevc_qpel, 1, 1, qpel_hv, _i8mm);
NEON8_FNASSIGN(c->put_hevc_qpel_uni, 1, 1, qpel_uni_hv, _i8mm);
--
2.38.0.windows.1
[-- Attachment #3: Type: text/plain, Size: 251 bytes --]
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 2+ messages in thread
* Re: [FFmpeg-devel] [PATCH 4/6] lavc/aarch64: new optimization for 8-bit hevc_epel_bi_hv
2023-11-18 2:06 [FFmpeg-devel] [PATCH 4/6] lavc/aarch64: new optimization for 8-bit hevc_epel_bi_hv Logan.Lyu
@ 2023-12-01 18:16 ` Martin Storsjö
0 siblings, 0 replies; 2+ messages in thread
From: Martin Storsjö @ 2023-12-01 18:16 UTC (permalink / raw)
To: FFmpeg development discussions and patches; +Cc: jdek
On Sat, 18 Nov 2023, Logan.Lyu wrote:
> diff --git a/libavcodec/aarch64/hevcdsp_epel_neon.S
> b/libavcodec/aarch64/hevcdsp_epel_neon.S
> index 54e55cd508..b60091bf23 100644
> --- a/libavcodec/aarch64/hevcdsp_epel_neon.S
> +++ b/libavcodec/aarch64/hevcdsp_epel_neon.S
> +function ff_hevc_put_hevc_epel_bi_hv24_8_neon_i8mm, export=1
> + add w10, w5, #3
> + lsl x10, x10, #7
> + sub sp, sp, x10 // tmp_array
> + stp x7, x30, [sp, #-48]!
> + stp x4, x5, [sp, #16]
> + stp x0, x1, [sp, #32]
> + add x0, sp, #48
> + sub x1, x2, x3
> + mov x2, x3
> + add w3, w5, #3
> + mov x4, x6
> + mov x5, x7
> + bl X(ff_hevc_put_hevc_epel_h24_8_neon_i8mm)
> + ldp x4, x5, [sp, #16]
> + ldp x0, x1, [sp, #32]
> + ldp x7, x30, [sp], #48
> + load_epel_filterh x7, x6
> + mov x10, #(MAX_PB_SIZE * 2)
> + ld1 {v16.8h, v17.8h, v18.8h}, [sp], x10
> + ld1 {v19.8h, v20.8h, v21.8h}, [sp], x10
> + ld1 {v22.8h, v23.8h, v24.8h}, [sp], x10
> +.macro calc src0, src1, src2, src3, src4, src5, src6, src7, src8, src9,
> src10, src11
> + ld1 {\src9\().8h, \src10\().8h, \src11\().8h}, [sp], x10
> + calc_epelh v1, \src0, \src3, \src6, \src9
> + calc_epelh2 v1, v2, \src0, \src3, \src6, \src9
The ld1 instructions have misindented the {} parts
> + calc_epelh v2, \src1, \src4, \src7, \src10
> + calc_epelh2 v2, v3, \src1, \src4, \src7, \src10
> + calc_epelh v3, \src2, \src5, \src8, \src11
> + calc_epelh2 v3, v4, \src2, \src5, \src8, \src11
> + ld1 {v4.8h, v5.8h, v6.8h}, [x4], x10
> + sqadd v1.8h, v1.8h, v4.8h
> + sqadd v2.8h, v2.8h, v5.8h
> + sqadd v3.8h, v3.8h, v6.8h
> + sqrshrun v1.8b, v1.8h, #7
> + sqrshrun v2.8b, v2.8h, #7
> + sqrshrun v3.8b, v3.8h, #7
> + subs w5, w5, #1
> + st1 {v1.8b, v2.8b, v3.8b}, [x0], x1
> +.endm
> +1: calc_all12
> +.purgem calc
> +2: ret
> +endfunc
> +
> +function ff_hevc_put_hevc_epel_bi_hv32_8_neon_i8mm, export=1
> + sub sp, sp, #16
> + str d8, [sp]
This could be "str d8, [sp, #-16]!"
// Martin
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2023-12-01 18:16 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-18 2:06 [FFmpeg-devel] [PATCH 4/6] lavc/aarch64: new optimization for 8-bit hevc_epel_bi_hv Logan.Lyu
2023-12-01 18:16 ` Martin Storsjö
Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
This inbox may be cloned and mirrored by anyone:
git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git
# If you have public-inbox 1.1+ installed, you may
# initialize and index your mirror using the following commands:
public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
ffmpegdev@gitmailbox.com
public-inbox-index ffmpegdev
Example config snippet for mirrors.
AGPL code for this site: git clone https://public-inbox.org/public-inbox.git