Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
* [FFmpeg-devel] [PATCH] lavc/vc1dsp: R-V V vc1_inv_trans_8x8
@ 2024-06-03 15:30 Rémi Denis-Courmont
  2024-06-03 19:06 ` [FFmpeg-devel] [PATCH 2/2] lavc/vc1dsp: R-V V vc1_inv_trans_8x4 Rémi Denis-Courmont
  0 siblings, 1 reply; 2+ messages in thread
From: Rémi Denis-Courmont @ 2024-06-03 15:30 UTC (permalink / raw)
  To: ffmpeg-devel

T-Head C908:
vc1dsp.vc1_inv_trans_8x8_c:      14.7
vc1dsp.vc1_inv_trans_8x8_rvv_i32: 4.7
---
 libavcodec/riscv/vc1dsp_init.c |   2 +
 libavcodec/riscv/vc1dsp_rvv.S  | 112 +++++++++++++++++++++++++++++++++
 2 files changed, 114 insertions(+)

diff --git a/libavcodec/riscv/vc1dsp_init.c b/libavcodec/riscv/vc1dsp_init.c
index e4838fb347..b8a1015ce5 100644
--- a/libavcodec/riscv/vc1dsp_init.c
+++ b/libavcodec/riscv/vc1dsp_init.c
@@ -26,6 +26,7 @@
 #include "libavcodec/vc1.h"
 
 void ff_vc1_inv_trans_8x8_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_8x8_rvv(int16_t block[64]);
 void ff_vc1_inv_trans_4x8_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
 void ff_vc1_inv_trans_8x4_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
 void ff_vc1_inv_trans_4x4_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
@@ -53,6 +54,7 @@ av_cold void ff_vc1dsp_init_riscv(VC1DSPContext *dsp)
 # if HAVE_RVV
     if (flags & AV_CPU_FLAG_RVV_I32) {
         if (ff_rv_vlen_least(128)) {
+            dsp->vc1_inv_trans_8x8 = ff_vc1_inv_trans_8x8_rvv;
             dsp->vc1_inv_trans_4x8_dc = ff_vc1_inv_trans_4x8_dc_rvv;
             dsp->vc1_inv_trans_4x4_dc = ff_vc1_inv_trans_4x4_dc_rvv;
             dsp->avg_vc1_mspel_pixels_tab[0][0] = ff_avg_pixels16x16_rvv;
diff --git a/libavcodec/riscv/vc1dsp_rvv.S b/libavcodec/riscv/vc1dsp_rvv.S
index 8b3a830a4a..7a78241925 100644
--- a/libavcodec/riscv/vc1dsp_rvv.S
+++ b/libavcodec/riscv/vc1dsp_rvv.S
@@ -113,6 +113,118 @@ func ff_vc1_inv_trans_4x4_dc_rvv, zve32x
         ret
 endfunc
 
+        .variant_cc ff_vc1_inv_trans_8_rvv
+func ff_vc1_inv_trans_8_rvv, zve32x
+        li       t4, 12
+        vsll.vi  v18, v6, 4
+        li       t2, 6
+        vmul.vx  v8, v0, t4
+        li       t5, 15
+        vmul.vx  v10, v4, t4
+        li       t3, 9
+        # t[2..5] = [6 9 12 15]
+        vsll.vi  v12, v2, 4
+        vmul.vx  v14, v6, t2
+        vmul.vx  v16, v2, t2
+        vadd.vx  v8, v8, t1    # +4 or +64
+        vadd.vv  v26, v12, v14 # t3
+        vadd.vv  v24, v8, v10  # t1
+        vsub.vv  v25, v8, v10  # t2
+        vsub.vv  v27, v16, v18 # t4
+        vadd.vv  v28, v24, v26 # t5
+        vsub.vv  v31, v24, v26 # t8
+        vadd.vv  v29, v25, v27 # t6
+        vsub.vv  v30, v25, v27 # t7
+        vsll.vi  v20, v1, 4
+        vmul.vx  v21, v3, t5
+        vmul.vx  v22, v5, t3
+        vsll.vi  v23, v7, 2
+        vadd.vv  v20, v20, v21
+        vadd.vv  v22, v22, v23
+        vsll.vi  v21, v3, 2
+        vadd.vv  v16, v20, v22 # t1
+        vmul.vx  v20, v1, t5
+        vsll.vi  v22, v5, 4
+        vmul.vx  v23, v7, t3
+        vsub.vv  v20, v20, v21
+        vadd.vv  v22, v22, v23
+        vsll.vi  v21, v3, 4
+        vsub.vv  v17, v20, v22 # t2
+        vmul.vx  v20, v1, t3
+        vsll.vi  v22, v5, 2
+        vmul.vx  v23, v7, t5
+        vsub.vv  v20, v20, v21
+        vadd.vv  v22, v22, v23
+        vmul.vx  v21, v3, t3
+        vadd.vv  v18, v20, v22 # t3
+        vsll.vi  v20, v1, 2
+        vmul.vx  v22, v5, t5
+        vsll.vi  v23, v7, 4
+        vsub.vv  v20, v20, v21
+        vsub.vv  v22, v22, v23
+        vadd.vv  v0, v28, v16
+        vadd.vv  v19, v20, v22 # t4
+        vadd.vv  v1, v29, v17
+        vadd.vv  v2, v30, v18
+        vadd.vv  v3, v31, v19
+        vsub.vv  v4, v31, v19
+        vsub.vv  v5, v30, v18
+        vsub.vv  v6, v29, v17
+        vsub.vv  v7, v28, v16
+        jr       t0
+endfunc
+
+func ff_vc1_inv_trans_8x8_rvv, zve32x
+        vsetivli zero, 8, e16, m1, ta, ma
+        addi     a1, a0, 1 * 8 * 2
+        vle16.v  v0, (a0)
+        addi     a2, a0, 2 * 8 * 2
+        vle16.v  v1, (a1)
+        addi     a3, a0, 3 * 8 * 2
+        vle16.v  v2, (a2)
+        addi     a4, a0, 4 * 8 * 2
+        vle16.v  v3, (a3)
+        addi     a5, a0, 5 * 8 * 2
+        vle16.v  v4, (a4)
+        addi     a6, a0, 6 * 8 * 2
+        vle16.v  v5, (a5)
+        addi     a7, a0, 7 * 8 * 2
+        vle16.v  v6, (a6)
+        li       t1, 4
+        vle16.v  v7, (a7)
+        jal      t0, ff_vc1_inv_trans_8_rvv
+        .irp n,0,1,2,3,4,5,6,7
+        vsra.vi  v\n, v\n, 3
+        .endr
+        vsseg8e16.v v0, (a0)
+        li       t1, 64
+        .irp n,0,1,2,3,4,5,6,7
+        vle16.v v\n, (a\n)
+        .endr
+        jal      t0, ff_vc1_inv_trans_8_rvv
+        vadd.vi  v4, v4, 1
+        vadd.vi  v5, v5, 1
+        vsra.vi  v4, v4, 7
+        vsra.vi  v5, v5, 7
+        vse16.v  v4, (a4)
+        vadd.vi  v6, v6, 1
+        vse16.v  v5, (a5)
+        vadd.vi  v7, v7, 1
+        vsra.vi  v6, v6, 7
+        vsra.vi  v7, v7, 7
+        vse16.v  v6, (a6)
+        vsra.vi  v0, v0, 7
+        vse16.v  v7, (a7)
+        vsra.vi  v1, v1, 7
+        vse16.v  v0, (a0)
+        vsra.vi  v2, v2, 7
+        vse16.v  v1, (a1)
+        vsra.vi  v3, v3, 7
+        vse16.v  v2, (a2)
+        vse16.v  v3, (a3)
+        ret
+endfunc
+
 .macro mspel_op op pos n1 n2
         add           t1, \pos, a2
         v\op\()e8.v   v\n1, (\pos)
-- 
2.45.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 2+ messages in thread

* [FFmpeg-devel] [PATCH 2/2] lavc/vc1dsp: R-V V vc1_inv_trans_8x4
  2024-06-03 15:30 [FFmpeg-devel] [PATCH] lavc/vc1dsp: R-V V vc1_inv_trans_8x8 Rémi Denis-Courmont
@ 2024-06-03 19:06 ` Rémi Denis-Courmont
  0 siblings, 0 replies; 2+ messages in thread
From: Rémi Denis-Courmont @ 2024-06-03 19:06 UTC (permalink / raw)
  To: ffmpeg-devel

T-Head C908:
vc1dsp.vc1_inv_trans_8x4_c:      10.5
vc1dsp.vc1_inv_trans_8x4_rvv_i32: 3.5
---
 libavcodec/riscv/vc1dsp_init.c |  2 +
 libavcodec/riscv/vc1dsp_rvv.S  | 74 ++++++++++++++++++++++++++++++++++
 2 files changed, 76 insertions(+)

diff --git a/libavcodec/riscv/vc1dsp_init.c b/libavcodec/riscv/vc1dsp_init.c
index b8a1015ce5..e63870ad44 100644
--- a/libavcodec/riscv/vc1dsp_init.c
+++ b/libavcodec/riscv/vc1dsp_init.c
@@ -29,6 +29,7 @@ void ff_vc1_inv_trans_8x8_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block
 void ff_vc1_inv_trans_8x8_rvv(int16_t block[64]);
 void ff_vc1_inv_trans_4x8_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
 void ff_vc1_inv_trans_8x4_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_8x4_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
 void ff_vc1_inv_trans_4x4_dc_rvv(uint8_t *dest, ptrdiff_t stride, int16_t *block);
 void ff_put_pixels16x16_rvi(uint8_t *dst, const uint8_t *src, ptrdiff_t line_size, int rnd);
 void ff_put_pixels8x8_rvi(uint8_t *dst, const uint8_t *src, ptrdiff_t line_size, int rnd);
@@ -55,6 +56,7 @@ av_cold void ff_vc1dsp_init_riscv(VC1DSPContext *dsp)
     if (flags & AV_CPU_FLAG_RVV_I32) {
         if (ff_rv_vlen_least(128)) {
             dsp->vc1_inv_trans_8x8 = ff_vc1_inv_trans_8x8_rvv;
+            dsp->vc1_inv_trans_8x4 = ff_vc1_inv_trans_8x4_rvv;
             dsp->vc1_inv_trans_4x8_dc = ff_vc1_inv_trans_4x8_dc_rvv;
             dsp->vc1_inv_trans_4x4_dc = ff_vc1_inv_trans_4x4_dc_rvv;
             dsp->avg_vc1_mspel_pixels_tab[0][0] = ff_avg_pixels16x16_rvv;
diff --git a/libavcodec/riscv/vc1dsp_rvv.S b/libavcodec/riscv/vc1dsp_rvv.S
index 0af4d26a11..7ac1062dcd 100644
--- a/libavcodec/riscv/vc1dsp_rvv.S
+++ b/libavcodec/riscv/vc1dsp_rvv.S
@@ -180,6 +180,29 @@ func ff_vc1_inv_trans_8_rvv, zve32x
         jr       t0
 endfunc
 
+        .variant_cc ff_vc1_inv_trans_4_rvv
+func ff_vc1_inv_trans_4_rvv, zve32x
+        li       t3, 17
+        vmul.vx  v8, v0, t3
+        li       t4, 22
+        vmul.vx  v10, v2, t3
+        li       t2, 10
+        vadd.vx  v8, v8, t1 # +4 or +64
+        vmul.vx  v14, v1, t4
+        vadd.vv  v24, v8, v10  # t1
+        vsub.vv  v25, v8, v10  # t2
+        vmul.vx  v16, v3, t2
+        vmul.vx  v18, v3, t4
+        vmul.vx  v20, v1, t2
+        vadd.vv  v26, v14, v16 # t3
+        vsub.vv  v27, v18, v20 # t4
+        vadd.vv  v0, v24, v26
+        vsub.vv  v1, v25, v27
+        vadd.vv  v2, v25, v27
+        vsub.vv  v3, v24, v26
+        jr       t0
+endfunc
+
 func ff_vc1_inv_trans_8x8_rvv, zve32x
         vsetivli zero, 8, e16, m1, ta, ma
         addi     a1, a0, 1 * 8 * 2
@@ -231,6 +254,57 @@ func ff_vc1_inv_trans_8x8_rvv, zve32x
         ret
 endfunc
 
+func ff_vc1_inv_trans_8x4_rvv, zve32x
+        vsetivli  zero, 4, e16, mf2, ta, ma
+        vlseg8e16.v v0, (a2)
+        li        t1, 4
+        jal       t0, ff_vc1_inv_trans_8_rvv
+        vsseg8e16.v v0, (a2)
+        addi      a3, a2, 1 * 8 * 2
+        vsetivli  zero, 8, e16, m1, ta, ma
+        vle16.v   v0, (a2)
+        addi      a4, a2, 2 * 8 * 2
+        vle16.v   v1, (a3)
+        addi      a5, a2, 3 * 8 * 2
+        vle16.v   v2, (a4)
+        li        t1, 64 - (128 << 7) # bias for signed vnclip.wi below
+        vle16.v   v3, (a5)
+        .irp n,0,1,2,3
+        # shift 4 vectors of 8 elems after transpose instead of 8 of 4
+        vsra.vi   v\n, v\n, 3
+        .endr
+        jal       t0, ff_vc1_inv_trans_4_rvv
+        add       a3, a1, a0
+        vle8.v    v8, (a0)
+        add       a4, a1, a3
+        vle8.v    v9, (a3)
+        add       a5, a1, a4
+        vle8.v    v10, (a4)
+        li        t1, 128
+        vle8.v    v11, (a5)
+        .irp n,0,1,2,3
+        vsra.vi   v\n, v\n, 7
+        .endr
+        vsetvli   zero, zero, e8, mf2, ta, ma
+        vwaddu.wv v0, v0, v8
+        vwaddu.wv v1, v1, v9
+        vwaddu.wv v2, v2, v10
+        vwaddu.wv v3, v3, v11
+        vnclip.wi v8, v0, 0
+        vnclip.wi v9, v1, 0
+        vnclip.wi v10, v2, 0
+        vnclip.wi v11, v3, 0
+        vxor.vx   v8, v8, t1
+        vxor.vx   v9, v9, t1
+        vse8.v    v8, (a0)
+        vxor.vx   v10, v10, t1
+        vse8.v    v9, (a3)
+        vxor.vx   v11, v11, t1
+        vse8.v    v10, (a4)
+        vse8.v    v11, (a5)
+        ret
+endfunc
+
 .macro mspel_op op pos n1 n2
         add           t1, \pos, a2
         v\op\()e8.v   v\n1, (\pos)
-- 
2.45.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2024-06-03 19:06 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-06-03 15:30 [FFmpeg-devel] [PATCH] lavc/vc1dsp: R-V V vc1_inv_trans_8x8 Rémi Denis-Courmont
2024-06-03 19:06 ` [FFmpeg-devel] [PATCH 2/2] lavc/vc1dsp: R-V V vc1_inv_trans_8x4 Rémi Denis-Courmont

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git