* [FFmpeg-devel] [PATCH 6/6] lavc/aarch64: new optimization for 8-bit hevc_qpel_bi_hv
@ 2023-11-18 2:07 Logan.Lyu
0 siblings, 0 replies; only message in thread
From: Logan.Lyu @ 2023-11-18 2:07 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: jdek
[-- Attachment #1: Type: text/plain, Size: 15107 bytes --]
put_hevc_qpel_bi_hv4_8_c: 433.7
put_hevc_qpel_bi_hv4_8_i8mm: 117.9
put_hevc_qpel_bi_hv6_8_c: 803.9
put_hevc_qpel_bi_hv6_8_i8mm: 252.7
put_hevc_qpel_bi_hv8_8_c: 1296.4
put_hevc_qpel_bi_hv8_8_i8mm: 316.2
put_hevc_qpel_bi_hv12_8_c: 2867.4
put_hevc_qpel_bi_hv12_8_i8mm: 669.2
put_hevc_qpel_bi_hv16_8_c: 4709.4
put_hevc_qpel_bi_hv16_8_i8mm: 929.9
put_hevc_qpel_bi_hv24_8_c: 9639.7
put_hevc_qpel_bi_hv24_8_i8mm: 2072.4
put_hevc_qpel_bi_hv32_8_c: 16663.7
put_hevc_qpel_bi_hv32_8_i8mm: 3391.4
put_hevc_qpel_bi_hv48_8_c: 36972.9
put_hevc_qpel_bi_hv48_8_i8mm: 7505.7
put_hevc_qpel_bi_hv64_8_c: 64106.4
put_hevc_qpel_bi_hv64_8_i8mm: 13145.2
Co-Authored-By: J. Dekker <jdek@itanimul.li>
Signed-off-by: Logan Lyu <Logan.Lyu@myais.com.cn>
---
libavcodec/aarch64/hevcdsp_init_aarch64.c | 5 +
libavcodec/aarch64/hevcdsp_qpel_neon.S | 299 ++++++++++++++++++++++
2 files changed, 304 insertions(+)
diff --git a/libavcodec/aarch64/hevcdsp_init_aarch64.c
b/libavcodec/aarch64/hevcdsp_init_aarch64.c
index f2cce3d221..cfc2f39a6b 100644
--- a/libavcodec/aarch64/hevcdsp_init_aarch64.c
+++ b/libavcodec/aarch64/hevcdsp_init_aarch64.c
@@ -255,6 +255,10 @@ NEON8_FNPROTO(qpel_bi_v, (uint8_t *dst, ptrdiff_t
dststride,
const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2,
int height, intptr_t mx, intptr_t my, int width),);
+NEON8_FNPROTO(qpel_bi_hv, (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2,
+ int height, intptr_t mx, intptr_t my, int width), _i8mm);
+
#define NEON8_FNASSIGN(member, v, h, fn, ext) \
member[1][v][h] = ff_hevc_put_hevc_##fn##4_8_neon##ext; \
member[2][v][h] = ff_hevc_put_hevc_##fn##6_8_neon##ext; \
@@ -370,6 +374,7 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext
*c, const int bit_depth)
NEON8_FNASSIGN(c->put_hevc_qpel_uni_w, 0, 1, qpel_uni_w_h,
_i8mm);
NEON8_FNASSIGN(c->put_hevc_epel_uni_w, 1, 1,
epel_uni_w_hv, _i8mm);
NEON8_FNASSIGN_PARTIAL_5(c->put_hevc_qpel_uni_w, 1, 1,
qpel_uni_w_hv, _i8mm);
+ NEON8_FNASSIGN(c->put_hevc_qpel_bi, 1, 1, qpel_bi_hv, _i8mm);
}
}
diff --git a/libavcodec/aarch64/hevcdsp_qpel_neon.S
b/libavcodec/aarch64/hevcdsp_qpel_neon.S
index f9422942d8..56bc352648 100644
--- a/libavcodec/aarch64/hevcdsp_qpel_neon.S
+++ b/libavcodec/aarch64/hevcdsp_qpel_neon.S
@@ -4210,5 +4210,304 @@ function
ff_hevc_put_hevc_qpel_uni_w_hv64_8_neon_i8mm, export=1
ret
endfunc
+function ff_hevc_put_hevc_qpel_bi_hv4_8_neon_i8mm, export=1
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ sub x1, x2, x3, lsl #1
+ sub x1, x1, x3
+ add x0, sp, #48
+ mov x2, x3
+ add w3, w5, #7
+ mov x4, x6
+ bl X(ff_hevc_put_hevc_qpel_h4_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ mov x9, #(MAX_PB_SIZE * 2)
+ load_qpel_filterh x7, x6
+ ld1 {v16.4h}, [sp], x9
+ ld1 {v17.4h}, [sp], x9
+ ld1 {v18.4h}, [sp], x9
+ ld1 {v19.4h}, [sp], x9
+ ld1 {v20.4h}, [sp], x9
+ ld1 {v21.4h}, [sp], x9
+ ld1 {v22.4h}, [sp], x9
+.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7
+ ld1 {\tmp\().4h}, [sp], x9
+ calc_qpelh v1, \src0, \src1, \src2, \src3, \src4, \src5,
\src6, \src7, sshr
+ ld1 {v5.4h}, [x4], x9 // src2
+ saddw v1.4s, v1.4s, v5.4h
+ rshrn v1.4h, v1.4s, #7
+ sqxtun v1.8b, v1.8h
+ subs w5, w5, #1
+ st1 {v1.s}[0], [x0], x1
+.endm
+1: calc_all
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv6_8_neon_i8mm, export=1
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ sub x1, x2, x3, lsl #1
+ sub x1, x1, x3
+ add x0, sp, #48
+ mov x2, x3
+ add x3, x5, #7
+ mov x4, x6
+ bl X(ff_hevc_put_hevc_qpel_h6_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ mov x9, #(MAX_PB_SIZE * 2)
+ load_qpel_filterh x7, x6
+ sub x1, x1, #4
+ ld1 {v16.8h}, [sp], x9
+ ld1 {v17.8h}, [sp], x9
+ ld1 {v18.8h}, [sp], x9
+ ld1 {v19.8h}, [sp], x9
+ ld1 {v20.8h}, [sp], x9
+ ld1 {v21.8h}, [sp], x9
+ ld1 {v22.8h}, [sp], x9
+.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7
+ ld1 {\tmp\().8h}, [sp], x9
+ calc_qpelh v1, \src0, \src1, \src2, \src3, \src4, \src5,
\src6, \src7, sshr
+ calc_qpelh2 v2, v2, \src0, \src1, \src2, \src3, \src4,
\src5, \src6, \src7, sshr
+ ld1 {v5.8h}, [x4], x9 // src2
+ saddw v1.4s, v1.4s, v5.4h
+ saddw2 v2.4s, v2.4s, v5.8h
+ rshrn v1.4h, v1.4s, #7
+ rshrn2 v1.8h, v2.4s, #7
+ sqxtun v1.8b, v1.8h
+ st1 {v1.s}[0], [x0], #4
+ subs w5, w5, #1
+ st1 {v1.h}[2], [x0], x1
+.endm
+1: calc_all
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm, export=1
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ sub x1, x2, x3, lsl #1
+ sub x1, x1, x3
+ add x0, sp, #48
+ mov x2, x3
+ add x3, x5, #7
+ mov x4, x6
+ bl X(ff_hevc_put_hevc_qpel_h8_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ mov x9, #(MAX_PB_SIZE * 2)
+ load_qpel_filterh x7, x6
+ ld1 {v16.8h}, [sp], x9
+ ld1 {v17.8h}, [sp], x9
+ ld1 {v18.8h}, [sp], x9
+ ld1 {v19.8h}, [sp], x9
+ ld1 {v20.8h}, [sp], x9
+ ld1 {v21.8h}, [sp], x9
+ ld1 {v22.8h}, [sp], x9
+.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7
+ ld1 {\tmp\().8h}, [sp], x9
+ calc_qpelh v1, \src0, \src1, \src2, \src3, \src4, \src5,
\src6, \src7, sshr
+ calc_qpelh2 v2, v2, \src0, \src1, \src2, \src3, \src4,
\src5, \src6, \src7, sshr
+ ld1 {v5.8h}, [x4], x9 // src2
+ saddw v1.4s, v1.4s, v5.4h
+ saddw2 v2.4s, v2.4s, v5.8h
+ rshrn v1.4h, v1.4s, #7
+ rshrn2 v1.8h, v2.4s, #7
+ sqxtun v1.8b, v1.8h
+ subs w5, w5, #1
+ st1 {v1.8b}, [x0], x1
+.endm
+1: calc_all
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv12_8_neon_i8mm, export=1
+ stp x6, x7, [sp, #-80]!
+ stp x4, x5, [sp, #16]
+ stp x2, x3, [sp, #32]
+ stp x0, x1, [sp, #48]
+ str x30, [sp, #64]
+ bl X(ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x2, x3, [sp, #32]
+ ldp x0, x1, [sp, #48]
+ ldp x6, x7, [sp], #64
+ add x4, x4, #16
+ add x2, x2, #8
+ add x0, x0, #8
+ bl X(ff_hevc_put_hevc_qpel_bi_hv4_8_neon_i8mm)
+ ldr x30, [sp], #16
+ ret
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv16_8_neon_i8mm, export=1
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3, lsl #1
+ sub x1, x1, x3
+ mov x2, x3
+ add w3, w5, #7
+ mov x4, x6
+ bl X(ff_hevc_put_hevc_qpel_h16_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ mov x6, #16 // width
+.Lqpel_bi_hv16_loop:
+ load_qpel_filterh x7, x8
+ mov x9, #(MAX_PB_SIZE * 2)
+ mov x10, x6
+0: mov x8, sp // src
+ ld1 {v16.8h, v17.8h}, [x8], x9
+ mov w11, w5 // height
+ ld1 {v18.8h, v19.8h}, [x8], x9
+ mov x12, x4 // src2
+ ld1 {v20.8h, v21.8h}, [x8], x9
+ mov x7, x0 // dst
+ ld1 {v22.8h, v23.8h}, [x8], x9
+ ld1 {v24.8h, v25.8h}, [x8], x9
+ ld1 {v26.8h, v27.8h}, [x8], x9
+ ld1 {v28.8h, v29.8h}, [x8], x9
+.macro calc tmp0, tmp1, src0, src1, src2, src3, src4, src5, src6, src7,
src8, src9, src10, src11, src12, src13, src14, src15
+ ld1 {\tmp0\().8h, \tmp1\().8h}, [x8], x9
+ calc_qpelh v1, \src0, \src1, \src2, \src3, \src4,
\src5, \src6, \src7, sshr
+ calc_qpelh2 v2, v2, \src0, \src1, \src2, \src3, \src4,
\src5, \src6, \src7, sshr
+ calc_qpelh v3, \src8, \src9, \src10, \src11, \src12,
\src13, \src14, \src15, sshr
+ calc_qpelh2 v4, v4, \src8, \src9, \src10, \src11, \src12,
\src13, \src14, \src15, sshr
+ ld1 {v5.8h, v6.8h}, [x12], x9 // src2
+ saddw v1.4s, v1.4s, v5.4h
+ saddw2 v2.4s, v2.4s, v5.8h
+ saddw v3.4s, v3.4s, v6.4h
+ saddw2 v4.4s, v4.4s, v6.8h
+ rshrn v1.4h, v1.4s, #7
+ rshrn2 v1.8h, v2.4s, #7
+ rshrn v2.4h, v3.4s, #7
+ rshrn2 v2.8h, v4.4s, #7
+ sqxtun v1.8b, v1.8h
+ sqxtun2 v1.16b, v2.8h
+ subs x11, x11, #1
+ st1 {v1.16b}, [x7], x1
+.endm
+1: calc_all2
+.purgem calc
+2: add x0, x0, #16
+ add sp, sp, #32
+ subs x10, x10, #16
+ add x4, x4, #32
+ b.ne 0b
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub x10, x10, x6, lsl #1 // part of first line
+ add sp, sp, x10 // tmp_array without first line
+ ret
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv24_8_neon_i8mm, export=1
+ stp x6, x7, [sp, #-80]!
+ stp x4, x5, [sp, #16]
+ stp x2, x3, [sp, #32]
+ stp x0, x1, [sp, #48]
+ str x30, [sp, #64]
+ bl X(ff_hevc_put_hevc_qpel_bi_hv16_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x2, x3, [sp, #32]
+ ldp x0, x1, [sp, #48]
+ ldp x6, x7, [sp], #64
+ add x4, x4, #32
+ add x2, x2, #16
+ add x0, x0, #16
+ bl X(ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm)
+ ldr x30, [sp], #16
+ ret
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv32_8_neon_i8mm, export=1
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3, lsl #1
+ mov x2, x3
+ sub x1, x1, x3
+ add w3, w5, #7
+ mov x4, x6
+ bl X(ff_hevc_put_hevc_qpel_h32_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ mov x6, #32 // width
+ b .Lqpel_bi_hv16_loop
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv48_8_neon_i8mm, export=1
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3, lsl #1
+ mov x2, x3
+ sub x1, x1, x3
+ add w3, w5, #7
+ mov x4, x6
+ bl X(ff_hevc_put_hevc_qpel_h48_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ mov x6, #48 // width
+ b .Lqpel_bi_hv16_loop
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv64_8_neon_i8mm, export=1
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3, lsl #1
+ mov x2, x3
+ sub x1, x1, x3
+ add w3, w5, #7
+ mov x4, x6
+ bl X(ff_hevc_put_hevc_qpel_h64_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ mov x6, #64 // width
+ b .Lqpel_bi_hv16_loop
+endfunc
+
DISABLE_I8MM
#endif // HAVE_I8MM
--
2.38.0.windows.1
[-- Attachment #2: 0006-lavc-aarch64-new-optimization-for-8-bit-hevc_qpel_bi_hv.patch --]
[-- Type: text/plain, Size: 15255 bytes --]
From 837103be64549b7b6c919174f880f58c099df8b6 Mon Sep 17 00:00:00 2001
From: Logan Lyu <Logan.Lyu@myais.com.cn>
Date: Sun, 12 Nov 2023 09:03:28 +0800
Subject: [PATCH 6/6] lavc/aarch64: new optimization for 8-bit hevc_qpel_bi_hv
put_hevc_qpel_bi_hv4_8_c: 433.7
put_hevc_qpel_bi_hv4_8_i8mm: 117.9
put_hevc_qpel_bi_hv6_8_c: 803.9
put_hevc_qpel_bi_hv6_8_i8mm: 252.7
put_hevc_qpel_bi_hv8_8_c: 1296.4
put_hevc_qpel_bi_hv8_8_i8mm: 316.2
put_hevc_qpel_bi_hv12_8_c: 2867.4
put_hevc_qpel_bi_hv12_8_i8mm: 669.2
put_hevc_qpel_bi_hv16_8_c: 4709.4
put_hevc_qpel_bi_hv16_8_i8mm: 929.9
put_hevc_qpel_bi_hv24_8_c: 9639.7
put_hevc_qpel_bi_hv24_8_i8mm: 2072.4
put_hevc_qpel_bi_hv32_8_c: 16663.7
put_hevc_qpel_bi_hv32_8_i8mm: 3391.4
put_hevc_qpel_bi_hv48_8_c: 36972.9
put_hevc_qpel_bi_hv48_8_i8mm: 7505.7
put_hevc_qpel_bi_hv64_8_c: 64106.4
put_hevc_qpel_bi_hv64_8_i8mm: 13145.2
Co-Authored-By: J. Dekker <jdek@itanimul.li>
---
libavcodec/aarch64/hevcdsp_init_aarch64.c | 5 +
libavcodec/aarch64/hevcdsp_qpel_neon.S | 299 ++++++++++++++++++++++
2 files changed, 304 insertions(+)
diff --git a/libavcodec/aarch64/hevcdsp_init_aarch64.c b/libavcodec/aarch64/hevcdsp_init_aarch64.c
index f2cce3d221..cfc2f39a6b 100644
--- a/libavcodec/aarch64/hevcdsp_init_aarch64.c
+++ b/libavcodec/aarch64/hevcdsp_init_aarch64.c
@@ -255,6 +255,10 @@ NEON8_FNPROTO(qpel_bi_v, (uint8_t *dst, ptrdiff_t dststride,
const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2,
int height, intptr_t mx, intptr_t my, int width),);
+NEON8_FNPROTO(qpel_bi_hv, (uint8_t *dst, ptrdiff_t dststride,
+ const uint8_t *src, ptrdiff_t srcstride, const int16_t *src2,
+ int height, intptr_t mx, intptr_t my, int width), _i8mm);
+
#define NEON8_FNASSIGN(member, v, h, fn, ext) \
member[1][v][h] = ff_hevc_put_hevc_##fn##4_8_neon##ext; \
member[2][v][h] = ff_hevc_put_hevc_##fn##6_8_neon##ext; \
@@ -370,6 +374,7 @@ av_cold void ff_hevc_dsp_init_aarch64(HEVCDSPContext *c, const int bit_depth)
NEON8_FNASSIGN(c->put_hevc_qpel_uni_w, 0, 1, qpel_uni_w_h, _i8mm);
NEON8_FNASSIGN(c->put_hevc_epel_uni_w, 1, 1, epel_uni_w_hv, _i8mm);
NEON8_FNASSIGN_PARTIAL_5(c->put_hevc_qpel_uni_w, 1, 1, qpel_uni_w_hv, _i8mm);
+ NEON8_FNASSIGN(c->put_hevc_qpel_bi, 1, 1, qpel_bi_hv, _i8mm);
}
}
diff --git a/libavcodec/aarch64/hevcdsp_qpel_neon.S b/libavcodec/aarch64/hevcdsp_qpel_neon.S
index f9422942d8..56bc352648 100644
--- a/libavcodec/aarch64/hevcdsp_qpel_neon.S
+++ b/libavcodec/aarch64/hevcdsp_qpel_neon.S
@@ -4210,5 +4210,304 @@ function ff_hevc_put_hevc_qpel_uni_w_hv64_8_neon_i8mm, export=1
ret
endfunc
+function ff_hevc_put_hevc_qpel_bi_hv4_8_neon_i8mm, export=1
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ sub x1, x2, x3, lsl #1
+ sub x1, x1, x3
+ add x0, sp, #48
+ mov x2, x3
+ add w3, w5, #7
+ mov x4, x6
+ bl X(ff_hevc_put_hevc_qpel_h4_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ mov x9, #(MAX_PB_SIZE * 2)
+ load_qpel_filterh x7, x6
+ ld1 {v16.4h}, [sp], x9
+ ld1 {v17.4h}, [sp], x9
+ ld1 {v18.4h}, [sp], x9
+ ld1 {v19.4h}, [sp], x9
+ ld1 {v20.4h}, [sp], x9
+ ld1 {v21.4h}, [sp], x9
+ ld1 {v22.4h}, [sp], x9
+.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7
+ ld1 {\tmp\().4h}, [sp], x9
+ calc_qpelh v1, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sshr
+ ld1 {v5.4h}, [x4], x9 // src2
+ saddw v1.4s, v1.4s, v5.4h
+ rshrn v1.4h, v1.4s, #7
+ sqxtun v1.8b, v1.8h
+ subs w5, w5, #1
+ st1 {v1.s}[0], [x0], x1
+.endm
+1: calc_all
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv6_8_neon_i8mm, export=1
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ sub x1, x2, x3, lsl #1
+ sub x1, x1, x3
+ add x0, sp, #48
+ mov x2, x3
+ add x3, x5, #7
+ mov x4, x6
+ bl X(ff_hevc_put_hevc_qpel_h6_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ mov x9, #(MAX_PB_SIZE * 2)
+ load_qpel_filterh x7, x6
+ sub x1, x1, #4
+ ld1 {v16.8h}, [sp], x9
+ ld1 {v17.8h}, [sp], x9
+ ld1 {v18.8h}, [sp], x9
+ ld1 {v19.8h}, [sp], x9
+ ld1 {v20.8h}, [sp], x9
+ ld1 {v21.8h}, [sp], x9
+ ld1 {v22.8h}, [sp], x9
+.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7
+ ld1 {\tmp\().8h}, [sp], x9
+ calc_qpelh v1, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sshr
+ calc_qpelh2 v2, v2, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sshr
+ ld1 {v5.8h}, [x4], x9 // src2
+ saddw v1.4s, v1.4s, v5.4h
+ saddw2 v2.4s, v2.4s, v5.8h
+ rshrn v1.4h, v1.4s, #7
+ rshrn2 v1.8h, v2.4s, #7
+ sqxtun v1.8b, v1.8h
+ st1 {v1.s}[0], [x0], #4
+ subs w5, w5, #1
+ st1 {v1.h}[2], [x0], x1
+.endm
+1: calc_all
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm, export=1
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ sub x1, x2, x3, lsl #1
+ sub x1, x1, x3
+ add x0, sp, #48
+ mov x2, x3
+ add x3, x5, #7
+ mov x4, x6
+ bl X(ff_hevc_put_hevc_qpel_h8_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ mov x9, #(MAX_PB_SIZE * 2)
+ load_qpel_filterh x7, x6
+ ld1 {v16.8h}, [sp], x9
+ ld1 {v17.8h}, [sp], x9
+ ld1 {v18.8h}, [sp], x9
+ ld1 {v19.8h}, [sp], x9
+ ld1 {v20.8h}, [sp], x9
+ ld1 {v21.8h}, [sp], x9
+ ld1 {v22.8h}, [sp], x9
+.macro calc tmp, src0, src1, src2, src3, src4, src5, src6, src7
+ ld1 {\tmp\().8h}, [sp], x9
+ calc_qpelh v1, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sshr
+ calc_qpelh2 v2, v2, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sshr
+ ld1 {v5.8h}, [x4], x9 // src2
+ saddw v1.4s, v1.4s, v5.4h
+ saddw2 v2.4s, v2.4s, v5.8h
+ rshrn v1.4h, v1.4s, #7
+ rshrn2 v1.8h, v2.4s, #7
+ sqxtun v1.8b, v1.8h
+ subs w5, w5, #1
+ st1 {v1.8b}, [x0], x1
+.endm
+1: calc_all
+.purgem calc
+2: ret
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv12_8_neon_i8mm, export=1
+ stp x6, x7, [sp, #-80]!
+ stp x4, x5, [sp, #16]
+ stp x2, x3, [sp, #32]
+ stp x0, x1, [sp, #48]
+ str x30, [sp, #64]
+ bl X(ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x2, x3, [sp, #32]
+ ldp x0, x1, [sp, #48]
+ ldp x6, x7, [sp], #64
+ add x4, x4, #16
+ add x2, x2, #8
+ add x0, x0, #8
+ bl X(ff_hevc_put_hevc_qpel_bi_hv4_8_neon_i8mm)
+ ldr x30, [sp], #16
+ ret
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv16_8_neon_i8mm, export=1
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3, lsl #1
+ sub x1, x1, x3
+ mov x2, x3
+ add w3, w5, #7
+ mov x4, x6
+ bl X(ff_hevc_put_hevc_qpel_h16_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ mov x6, #16 // width
+.Lqpel_bi_hv16_loop:
+ load_qpel_filterh x7, x8
+ mov x9, #(MAX_PB_SIZE * 2)
+ mov x10, x6
+0: mov x8, sp // src
+ ld1 {v16.8h, v17.8h}, [x8], x9
+ mov w11, w5 // height
+ ld1 {v18.8h, v19.8h}, [x8], x9
+ mov x12, x4 // src2
+ ld1 {v20.8h, v21.8h}, [x8], x9
+ mov x7, x0 // dst
+ ld1 {v22.8h, v23.8h}, [x8], x9
+ ld1 {v24.8h, v25.8h}, [x8], x9
+ ld1 {v26.8h, v27.8h}, [x8], x9
+ ld1 {v28.8h, v29.8h}, [x8], x9
+.macro calc tmp0, tmp1, src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10, src11, src12, src13, src14, src15
+ ld1 {\tmp0\().8h, \tmp1\().8h}, [x8], x9
+ calc_qpelh v1, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sshr
+ calc_qpelh2 v2, v2, \src0, \src1, \src2, \src3, \src4, \src5, \src6, \src7, sshr
+ calc_qpelh v3, \src8, \src9, \src10, \src11, \src12, \src13, \src14, \src15, sshr
+ calc_qpelh2 v4, v4, \src8, \src9, \src10, \src11, \src12, \src13, \src14, \src15, sshr
+ ld1 {v5.8h, v6.8h}, [x12], x9 // src2
+ saddw v1.4s, v1.4s, v5.4h
+ saddw2 v2.4s, v2.4s, v5.8h
+ saddw v3.4s, v3.4s, v6.4h
+ saddw2 v4.4s, v4.4s, v6.8h
+ rshrn v1.4h, v1.4s, #7
+ rshrn2 v1.8h, v2.4s, #7
+ rshrn v2.4h, v3.4s, #7
+ rshrn2 v2.8h, v4.4s, #7
+ sqxtun v1.8b, v1.8h
+ sqxtun2 v1.16b, v2.8h
+ subs x11, x11, #1
+ st1 {v1.16b}, [x7], x1
+.endm
+1: calc_all2
+.purgem calc
+2: add x0, x0, #16
+ add sp, sp, #32
+ subs x10, x10, #16
+ add x4, x4, #32
+ b.ne 0b
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub x10, x10, x6, lsl #1 // part of first line
+ add sp, sp, x10 // tmp_array without first line
+ ret
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv24_8_neon_i8mm, export=1
+ stp x6, x7, [sp, #-80]!
+ stp x4, x5, [sp, #16]
+ stp x2, x3, [sp, #32]
+ stp x0, x1, [sp, #48]
+ str x30, [sp, #64]
+ bl X(ff_hevc_put_hevc_qpel_bi_hv16_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x2, x3, [sp, #32]
+ ldp x0, x1, [sp, #48]
+ ldp x6, x7, [sp], #64
+ add x4, x4, #32
+ add x2, x2, #16
+ add x0, x0, #16
+ bl X(ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm)
+ ldr x30, [sp], #16
+ ret
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv32_8_neon_i8mm, export=1
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3, lsl #1
+ mov x2, x3
+ sub x1, x1, x3
+ add w3, w5, #7
+ mov x4, x6
+ bl X(ff_hevc_put_hevc_qpel_h32_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ mov x6, #32 // width
+ b .Lqpel_bi_hv16_loop
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv48_8_neon_i8mm, export=1
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3, lsl #1
+ mov x2, x3
+ sub x1, x1, x3
+ add w3, w5, #7
+ mov x4, x6
+ bl X(ff_hevc_put_hevc_qpel_h48_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ mov x6, #48 // width
+ b .Lqpel_bi_hv16_loop
+endfunc
+
+function ff_hevc_put_hevc_qpel_bi_hv64_8_neon_i8mm, export=1
+ add w10, w5, #7
+ lsl x10, x10, #7
+ sub sp, sp, x10 // tmp_array
+ stp x7, x30, [sp, #-48]!
+ stp x4, x5, [sp, #16]
+ stp x0, x1, [sp, #32]
+ add x0, sp, #48
+ sub x1, x2, x3, lsl #1
+ mov x2, x3
+ sub x1, x1, x3
+ add w3, w5, #7
+ mov x4, x6
+ bl X(ff_hevc_put_hevc_qpel_h64_8_neon_i8mm)
+ ldp x4, x5, [sp, #16]
+ ldp x0, x1, [sp, #32]
+ ldp x7, x30, [sp], #48
+ mov x6, #64 // width
+ b .Lqpel_bi_hv16_loop
+endfunc
+
DISABLE_I8MM
#endif // HAVE_I8MM
--
2.38.0.windows.1
[-- Attachment #3: Type: text/plain, Size: 251 bytes --]
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2023-11-18 2:07 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-11-18 2:07 [FFmpeg-devel] [PATCH 6/6] lavc/aarch64: new optimization for 8-bit hevc_qpel_bi_hv Logan.Lyu
Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
This inbox may be cloned and mirrored by anyone:
git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git
# If you have public-inbox 1.1+ installed, you may
# initialize and index your mirror using the following commands:
public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
ffmpegdev@gitmailbox.com
public-inbox-index ffmpegdev
Example config snippet for mirrors.
AGPL code for this site: git clone https://public-inbox.org/public-inbox.git