From b773a2b640ba38a106539da7f3414d6892364c4f Mon Sep 17 00:00:00 2001 From: sunyuechi Date: Fri, 23 Feb 2024 13:27:42 +0800 Subject: [PATCH 1/3] lavc/vp8dsp: R-V V put_bilin_h C908: vp8_put_bilin4_h_c: 373.5 vp8_put_bilin4_h_rvv_i32: 158.7 vp8_put_bilin8_h_c: 1437.7 vp8_put_bilin8_h_rvv_i32: 318.7 vp8_put_bilin16_h_c: 2845.7 vp8_put_bilin16_h_rvv_i32: 374.7 --- libavcodec/riscv/vp8dsp_init.c | 11 +++++++ libavcodec/riscv/vp8dsp_rvv.S | 54 ++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) diff --git a/libavcodec/riscv/vp8dsp_init.c b/libavcodec/riscv/vp8dsp_init.c index c364de3dc9..32cb4893a4 100644 --- a/libavcodec/riscv/vp8dsp_init.c +++ b/libavcodec/riscv/vp8dsp_init.c @@ -34,6 +34,10 @@ VP8_EPEL(16, rvv); VP8_EPEL(8, rvv); VP8_EPEL(4, rvv); +VP8_BILIN(16, rvv); +VP8_BILIN(8, rvv); +VP8_BILIN(4, rvv); + av_cold void ff_vp78dsp_init_riscv(VP8DSPContext *c) { #if HAVE_RVV @@ -47,6 +51,13 @@ av_cold void ff_vp78dsp_init_riscv(VP8DSPContext *c) c->put_vp8_bilinear_pixels_tab[0][0][0] = ff_put_vp8_pixels16_rvv; c->put_vp8_bilinear_pixels_tab[1][0][0] = ff_put_vp8_pixels8_rvv; c->put_vp8_bilinear_pixels_tab[2][0][0] = ff_put_vp8_pixels4_rvv; + + c->put_vp8_bilinear_pixels_tab[0][0][1] = ff_put_vp8_bilin16_h_rvv; + c->put_vp8_bilinear_pixels_tab[0][0][2] = ff_put_vp8_bilin16_h_rvv; + c->put_vp8_bilinear_pixels_tab[1][0][1] = ff_put_vp8_bilin8_h_rvv; + c->put_vp8_bilinear_pixels_tab[1][0][2] = ff_put_vp8_bilin8_h_rvv; + c->put_vp8_bilinear_pixels_tab[2][0][1] = ff_put_vp8_bilin4_h_rvv; + c->put_vp8_bilinear_pixels_tab[2][0][2] = ff_put_vp8_bilin4_h_rvv; } #endif } diff --git a/libavcodec/riscv/vp8dsp_rvv.S b/libavcodec/riscv/vp8dsp_rvv.S index 063ab7110c..c8d265e516 100644 --- a/libavcodec/riscv/vp8dsp_rvv.S +++ b/libavcodec/riscv/vp8dsp_rvv.S @@ -98,3 +98,57 @@ func ff_put_vp8_pixels4_rvv, zve32x vsetivli zero, 4, e8, mf4, ta, ma put_vp8_pixels endfunc + +.macro bilin_h_load dst len +.ifc \len,4 + vsetivli zero, 5, e8, mf2, ta, ma +.elseif \len == 8 + vsetivli zero, 9, e8, m1, ta, ma +.else + vsetivli zero, 17, e8, m2, ta, ma +.endif + + vle8.v \dst, (a2) + vslide1down.vx v2, \dst, t5 + +.ifc \len,4 + vsetivli zero, 4, e8, mf4, ta, ma +.elseif \len == 8 + vsetivli zero, 8, e8, mf2, ta, ma +.else + vsetivli zero, 16, e8, m1, ta, ma +.endif + + vwmulu.vx v28, \dst, t1 + vwmaccu.vx v28, a5, v2 + vwaddu.wx v24, v28, t4 + vnsra.wi \dst, v24, 3 +.endm + +.macro put_vp8_bilin_h len + li t1, 8 + li t4, 4 + li t5, 1 + sub t1, t1, a5 +1: + addi a4, a4, -1 + bilin_h_load v0, \len + vse8.v v0, (a0) + add a2, a2, a3 + add a0, a0, a1 + bnez a4, 1b + + ret +.endm + +func ff_put_vp8_bilin16_h_rvv, zve32x + put_vp8_bilin_h 16 +endfunc + +func ff_put_vp8_bilin8_h_rvv, zve32x + put_vp8_bilin_h 8 +endfunc + +func ff_put_vp8_bilin4_h_rvv, zve32x + put_vp8_bilin_h 4 +endfunc -- 2.43.2