Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
* [FFmpeg-devel] [PATCH 2/2] lavc/rv40dsp: R-V V chroma_mc
@ 2024-04-30 15:01 flow gg
  0 siblings, 0 replies; only message in thread
From: flow gg @ 2024-04-30 15:01 UTC (permalink / raw)
  To: FFmpeg development discussions and patches

[-- Attachment #1: Type: text/plain, Size: 1 bytes --]



[-- Attachment #2: 0002-lavc-rv40dsp-R-V-V-chroma_mc.patch --]
[-- Type: text/x-patch, Size: 16959 bytes --]

From 3e66b2bbe257cc91a4c2169362163e92aba6760b Mon Sep 17 00:00:00 2001
From: sunyuechi <sunyuechi@iscas.ac.cn>
Date: Tue, 30 Apr 2024 18:24:00 +0800
Subject: [PATCH 2/2] lavc/rv40dsp: R-V V chroma_mc

This is similar to h264, but here we use manual_avg instead of vaaddu
because rv40's OP differs from h264. If we use vaaddu,
rv40 would need to repeatedly switch between vxrm=0 and vxrm=2,
and switching vxrm is very slow.

C908:
avg_chroma_mc4_c: 2330.0
avg_chroma_mc4_rvv_i32: 602.7
avg_chroma_mc8_c: 1211.0
avg_chroma_mc8_rvv_i32: 602.7
put_chroma_mc4_c: 1825.0
put_chroma_mc4_rvv_i32: 414.7
put_chroma_mc8_c: 932.0
put_chroma_mc8_rvv_i32: 414.7
---
 libavcodec/riscv/Makefile       |   2 +
 libavcodec/riscv/rv40dsp_init.c |  51 +++++
 libavcodec/riscv/rv40dsp_rvv.S  | 371 ++++++++++++++++++++++++++++++++
 libavcodec/rv34dsp.h            |   1 +
 libavcodec/rv40dsp.c            |   2 +
 5 files changed, 427 insertions(+)
 create mode 100644 libavcodec/riscv/rv40dsp_init.c
 create mode 100644 libavcodec/riscv/rv40dsp_rvv.S

diff --git a/libavcodec/riscv/Makefile b/libavcodec/riscv/Makefile
index dce1236b84..43b5c21cf4 100644
--- a/libavcodec/riscv/Makefile
+++ b/libavcodec/riscv/Makefile
@@ -50,6 +50,8 @@ RV-OBJS-$(CONFIG_PIXBLOCKDSP) += riscv/pixblockdsp_rvi.o
 RVV-OBJS-$(CONFIG_PIXBLOCKDSP) += riscv/pixblockdsp_rvv.o
 OBJS-$(CONFIG_RV34DSP) += riscv/rv34dsp_init.o
 RVV-OBJS-$(CONFIG_RV34DSP) += riscv/rv34dsp_rvv.o
+OBJS-$(CONFIG_RV40_DECODER) += riscv/rv40dsp_init.o
+RVV-OBJS-$(CONFIG_RV40_DECODER) += riscv/rv40dsp_rvv.o
 OBJS-$(CONFIG_SVQ1_ENCODER) += riscv/svqenc_init.o
 RVV-OBJS-$(CONFIG_SVQ1_ENCODER) += riscv/svqenc_rvv.o
 OBJS-$(CONFIG_TAK_DECODER) += riscv/takdsp_init.o
diff --git a/libavcodec/riscv/rv40dsp_init.c b/libavcodec/riscv/rv40dsp_init.c
new file mode 100644
index 0000000000..f5a5510b28
--- /dev/null
+++ b/libavcodec/riscv/rv40dsp_init.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2024 Institue of Software Chinese Academy of Sciences (ISCAS).
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/riscv/cpu.h"
+#include "libavcodec/rv34dsp.h"
+
+void ff_put_rv40_chroma_mc8_rvv(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+                                 int h, int x, int y);
+void ff_put_rv40_chroma_mc4_rvv(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+                                 int h, int x, int y);
+
+void ff_avg_rv40_chroma_mc8_rvv(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+                                 int h, int x, int y);
+void ff_avg_rv40_chroma_mc4_rvv(uint8_t *dst, const uint8_t *src, ptrdiff_t stride,
+                                 int h, int x, int y);
+
+av_cold void ff_rv40dsp_init_riscv(RV34DSPContext *c)
+{
+#if HAVE_RVV
+    int flags = av_get_cpu_flags();
+
+    if ((flags & AV_CPU_FLAG_RVV_I32) && ff_get_rv_vlenb() >= 16 &&
+        (flags & AV_CPU_FLAG_RVB_ADDR)) {
+        c->put_chroma_pixels_tab[0] = ff_put_rv40_chroma_mc8_rvv;
+        c->put_chroma_pixels_tab[1] = ff_put_rv40_chroma_mc4_rvv;
+        c->avg_chroma_pixels_tab[0] = ff_avg_rv40_chroma_mc8_rvv;
+        c->avg_chroma_pixels_tab[1] = ff_avg_rv40_chroma_mc4_rvv;
+    }
+#endif
+}
diff --git a/libavcodec/riscv/rv40dsp_rvv.S b/libavcodec/riscv/rv40dsp_rvv.S
new file mode 100644
index 0000000000..e49345ef70
--- /dev/null
+++ b/libavcodec/riscv/rv40dsp_rvv.S
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2024 Institue of Software Chinese Academy of Sciences (ISCAS).
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/riscv/asm.S"
+
+.macro manual_avg dst src1 src2
+        vadd.vv         \dst, \src1, \src2
+        vadd.vi         \dst, \dst, 1
+        vsrl.vi         \dst, \dst, 1
+.endm
+
+.macro  do_chroma_mc type unroll
+        csrwi           vxrm, 2
+        slli            t2, a5, 3
+        mul             t1, a5, a4
+        sh3add          a5, a4, t2
+        slli            a4, a4, 3
+        sub             a5, t1, a5
+        sub             a7, a4, t1
+        addi            a6, a5, 64
+        sub             t0, t2, t1
+        vsetvli         t3, t6, e8, m1, ta, mu
+        beqz            t1, 2f
+        blez            a3, 8f
+        li              t4, 0
+        li              t2, 0
+        li              t5, 1
+        addi            a5, t3, 1
+        slli            t3, a2, (1 + \unroll)
+1:                                # if (xy != 0)
+        add             a4, a1, t4
+        vsetvli         zero, a5, e8, m1, ta, ma
+  .ifc \unroll,1
+        addi            t2, t2, 4
+  .else
+        addi            t2, t2, 2
+  .endif
+        vle8.v          v10, (a4)
+        add             a4, a4, a2
+        vslide1down.vx  v11, v10, t5
+        vsetvli         zero, t6, e8, m1, ta, ma
+        vwmulu.vx       v8, v10, a6
+        vwmaccu.vx      v8, a7, v11
+        vsetvli         zero, a5, e8, m1, ta, ma
+        vle8.v          v12, (a4)
+        vsetvli         zero, t6, e8, m1, ta, ma
+        add             a4, a4, a2
+        vwmaccu.vx      v8, t0, v12
+        vsetvli         zero, a5, e8, m1, ta, ma
+        vslide1down.vx  v13, v12, t5
+        vsetvli         zero, t6, e8, m1, ta, ma
+        vwmulu.vx       v10, v12, a6
+        vwmaccu.vx      v8, t1, v13
+        vwmaccu.vx      v10, a7, v13
+        vsetvli         zero, a5, e8, m1, ta, ma
+        vle8.v          v14, (a4)
+        vsetvli         zero, t6, e8, m1, ta, ma
+        add             a4, a4, a2
+        vwmaccu.vx      v10, t0, v14
+        vsetvli         zero, a5, e8, m1, ta, ma
+        vslide1down.vx  v15, v14, t5
+        vsetvli         zero, t6, e8, m1, ta, ma
+        vwmulu.vx       v12, v14, a6
+        vwmaccu.vx      v10, t1, v15
+        vwmaccu.vx      v12, a7, v15
+        vnclipu.wi      v15, v8, 6
+  .ifc \type,avg
+        vle8.v          v9, (a0)
+        manual_avg      v15, v15, v9
+  .endif
+        vse8.v          v15, (a0)
+        add             a0, a0, a2
+        vnclipu.wi      v8, v10, 6
+  .ifc \type,avg
+        vle8.v          v9, (a0)
+        manual_avg      v8, v8, v9
+  .endif
+        add             t4, t4, t3
+        vse8.v          v8, (a0)
+        add             a0, a0, a2
+  .ifc \unroll,1
+        vsetvli         zero, a5, e8, m1, ta, ma
+        vle8.v          v14, (a4)
+        vsetvli         zero, t6, e8, m1, ta, ma
+        add             a4, a4, a2
+        vwmaccu.vx      v12, t0, v14
+        vsetvli         zero, a5, e8, m1, ta, ma
+        vslide1down.vx  v15, v14, t5
+        vsetvli         zero, t6, e8, m1, ta, ma
+        vwmulu.vx       v16, v14, a6
+        vwmaccu.vx      v12, t1, v15
+        vwmaccu.vx      v16, a7, v15
+        vsetvli         zero, a5, e8, m1, ta, ma
+        vle8.v          v14, (a4)
+        vsetvli         zero, t6, e8, m1, ta, ma
+        vwmaccu.vx      v16, t0, v14
+        vsetvli         zero, a5, e8, m1, ta, ma
+        vslide1down.vx  v14, v14, t5
+        vsetvli         zero, t6, e8, m1, ta, ma
+        vwmaccu.vx      v16, t1, v14
+        vnclipu.wi      v8, v12, 6
+  .ifc \type,avg
+        vle8.v          v9, (a0)
+        manual_avg      v8, v8, v9
+  .endif
+        vse8.v          v8, (a0)
+        add             a0, a0, a2
+        vnclipu.wi      v8, v16, 6
+  .ifc \type,avg
+        vle8.v          v9, (a0)
+        manual_avg      v8, v8, v9
+  .endif
+        vse8.v          v8, (a0)
+        add             a0, a0, a2
+  .endif
+        blt             t2, a3, 1b
+        j               8f
+2:
+        bnez            a4, 4f
+        beqz            t2, 4f
+        blez            a3, 8f
+        li              a4, 0
+        li              t1, 0
+        slli            a7, a2, (1 + \unroll)
+3:                                # if ((x8 - xy) == 0 && (y8 -xy) != 0)
+        add             a5, a1, a4
+        vsetvli         zero, zero, e8, m1, ta, ma
+  .ifc \unroll,1
+        addi            t1, t1, 4
+  .else
+        addi            t1, t1, 2
+  .endif
+        vle8.v          v8, (a5)
+        add             a5, a5, a2
+        add             t2, a5, a2
+        vwmulu.vx       v10, v8, a6
+        vle8.v          v8, (a5)
+        vwmulu.vx       v12, v8, a6
+        vle8.v          v9, (t2)
+        add             t2, t2, a2
+        add             a5, t2, a2
+        vwmaccu.vx      v10, t0, v8
+        add             a4, a4, a7
+        vwmaccu.vx      v12, t0, v9
+        vnclipu.wi      v15, v10, 6
+        vwmulu.vx       v10, v9, a6
+        vnclipu.wi      v9, v12, 6
+  .ifc \type,avg
+        vle8.v          v16, (a0)
+        manual_avg      v15, v15, v16
+  .endif
+        vse8.v          v15, (a0)
+        add             a0, a0, a2
+  .ifc \type,avg
+        vle8.v          v16, (a0)
+        manual_avg      v9, v9, v16
+  .endif
+        vse8.v          v9, (a0)
+        add             a0, a0, a2
+  .ifc \unroll,1
+        vle8.v          v8, (t2)
+        vle8.v          v14, (a5)
+        vwmaccu.vx      v10, t0, v8
+        vwmulu.vx       v12, v8, a6
+        vnclipu.wi      v8, v10, 6
+        vwmaccu.vx      v12, t0, v14
+  .ifc \type,avg
+        vle8.v          v16, (a0)
+        manual_avg      v8, v8, v16
+  .endif
+        vse8.v          v8, (a0)
+        add             a0, a0, a2
+        vnclipu.wi      v8, v12, 6
+  .ifc \type,avg
+        vle8.v          v16, (a0)
+        manual_avg      v8, v8, v16
+  .endif
+        vse8.v          v8, (a0)
+        add             a0, a0, a2
+  .endif
+        blt             t1, a3, 3b
+        j               8f
+4:
+        beqz            a4, 6f
+        bnez            t2, 6f
+        blez            a3, 8f
+        li              a4, 0
+        li              t2, 0
+        addi            t0, t3, 1
+        slli            t1, a2, (1 + \unroll)
+5:                               # if ((x8 - xy) != 0 && (y8 -xy) == 0)
+        add             a5, a1, a4
+        vsetvli         zero, t0, e8, m1, ta, ma
+  .ifc \unroll,1
+        addi            t2, t2, 4
+  .else
+        addi            t2, t2, 2
+  .endif
+        vle8.v          v8, (a5)
+        add             a5, a5, a2
+        vslide1down.vx  v9, v8, t5
+        vsetvli         zero, t6, e8, m1, ta, ma
+        vwmulu.vx       v10, v8, a6
+        vwmaccu.vx      v10, a7, v9
+        vsetvli         zero, t0, e8, m1, ta, ma
+        vle8.v          v8, (a5)
+        add             a5, a5, a2
+        vslide1down.vx  v9, v8, t5
+        vsetvli         zero, t6, e8, m1, ta, ma
+        vwmulu.vx       v12, v8, a6
+        vwmaccu.vx      v12, a7, v9
+        vnclipu.wi      v16, v10, 6
+  .ifc \type,avg
+        vle8.v          v18, (a0)
+        manual_avg      v16, v16, v18
+  .endif
+        vse8.v          v16, (a0)
+        add             a0, a0, a2
+        vnclipu.wi      v10, v12, 6
+  .ifc \type,avg
+        vle8.v          v18, (a0)
+        manual_avg      v10, v10, v18
+  .endif
+        add             a4, a4, t1
+        vse8.v          v10, (a0)
+        add             a0, a0, a2
+  .ifc \unroll,1
+        vsetvli         zero, t0, e8, m1, ta, ma
+        vle8.v          v8, (a5)
+        add             a5, a5, a2
+        vslide1down.vx  v9, v8, t5
+        vsetvli         zero, t6, e8, m1, ta, ma
+        vwmulu.vx       v14, v8, a6
+        vwmaccu.vx      v14, a7, v9
+        vsetvli         zero, t0, e8, m1, ta, ma
+        vle8.v          v8, (a5)
+        vslide1down.vx  v9, v8, t5
+        vsetvli         zero, t6, e8, m1, ta, ma
+        vwmulu.vx       v12, v8, a6
+        vnclipu.wi      v8, v14, 6
+        vwmaccu.vx      v12, a7, v9
+  .ifc \type,avg
+        vle8.v          v18, (a0)
+        manual_avg      v8, v8, v18
+  .endif
+        vse8.v          v8, (a0)
+        add             a0, a0, a2
+        vnclipu.wi      v8, v12, 6
+  .ifc \type,avg
+        vle8.v          v18, (a0)
+        manual_avg      v8, v8, v18
+  .endif
+        vse8.v          v8, (a0)
+        add             a0, a0, a2
+  .endif
+        blt             t2, a3, 5b
+        j               8f
+6:
+        blez            a3, 8f
+        li              a4, 0
+        li              t2, 0
+        slli            a7, a2, (1 + \unroll)
+7:                               # the final else, none of the above conditions are met
+        add             t0, a1, a4
+        vsetvli         zero, zero, e8, m1, ta, ma
+        add             a5, a0, a4
+        add             a4, a4, a7
+  .ifc \unroll,1
+        addi            t2, t2, 4
+  .else
+        addi            t2, t2, 2
+  .endif
+        vle8.v          v8, (t0)
+        add             t0, t0, a2
+        add             t1, t0, a2
+        vwmulu.vx       v10, v8, a6
+        vle8.v          v8, (t0)
+        add             t0, t1, a2
+        vnclipu.wi      v13, v10, 6
+        vwmulu.vx       v10, v8, a6
+  .ifc \type,avg
+        vle8.v          v18, (a5)
+        manual_avg      v13, v13, v18
+  .endif
+        vse8.v          v13, (a5)
+        add             a5, a5, a2
+        vnclipu.wi      v8, v10, 6
+  .ifc \type,avg
+        vle8.v          v18, (a5)
+        manual_avg      v8, v8, v18
+  .endif
+        vse8.v          v8, (a5)
+        add             a5, a5, a2
+  .ifc \unroll,1
+        vle8.v          v9, (t1)
+        vle8.v          v12, (t0)
+        vwmulu.vx       v10, v9, a6
+        vnclipu.wi      v8, v10, 6
+        vwmulu.vx       v10, v12, a6
+  .ifc \type,avg
+        vle8.v          v18, (a5)
+        manual_avg      v8, v8, v18
+  .endif
+        vse8.v          v8, (a5)
+        add             a5, a5, a2
+        vnclipu.wi      v8, v10, 6
+  .ifc \type,avg
+        vle8.v          v18, (a5)
+        manual_avg      v8, v8, v18
+  .endif
+        vse8.v          v8, (a5)
+  .endif
+        blt             t2, a3, 7b
+8:
+        ret
+.endm
+
+func ff_put_rv40_chroma_mc_rvv, zve32x
+11:
+        li      a7, 3
+        blt     a3, a7, 12f
+        do_chroma_mc put 1
+12:
+        do_chroma_mc put 0
+endfunc
+
+func ff_avg_rv40_chroma_mc_rvv, zve32x
+21:
+        li      a7, 3
+        blt     a3, a7, 22f
+        do_chroma_mc avg 1
+22:
+        do_chroma_mc avg 0
+endfunc
+
+func ff_put_rv40_chroma_mc8_rvv, zve32x
+        li      t6, 8
+        j       11b
+endfunc
+
+func ff_put_rv40_chroma_mc4_rvv, zve32x
+        li      t6, 4
+        j       11b
+endfunc
+
+func ff_avg_rv40_chroma_mc8_rvv, zve32x
+        li      t6, 8
+        j       21b
+endfunc
+
+func ff_avg_rv40_chroma_mc4_rvv, zve32x
+        li      t6, 4
+        j       21b
+endfunc
diff --git a/libavcodec/rv34dsp.h b/libavcodec/rv34dsp.h
index b15424d4ae..d59b3c2732 100644
--- a/libavcodec/rv34dsp.h
+++ b/libavcodec/rv34dsp.h
@@ -83,6 +83,7 @@ void ff_rv34dsp_init_riscv(RV34DSPContext *c);
 void ff_rv34dsp_init_x86(RV34DSPContext *c);
 
 void ff_rv40dsp_init_aarch64(RV34DSPContext *c);
+void ff_rv40dsp_init_riscv(RV34DSPContext *c);
 void ff_rv40dsp_init_x86(RV34DSPContext *c);
 void ff_rv40dsp_init_arm(RV34DSPContext *c);
 
diff --git a/libavcodec/rv40dsp.c b/libavcodec/rv40dsp.c
index f0208b16ea..970faec5de 100644
--- a/libavcodec/rv40dsp.c
+++ b/libavcodec/rv40dsp.c
@@ -709,6 +709,8 @@ av_cold void ff_rv40dsp_init(RV34DSPContext *c)
     ff_rv40dsp_init_aarch64(c);
 #elif ARCH_ARM
     ff_rv40dsp_init_arm(c);
+#elif ARCH_RISCV
+    ff_rv40dsp_init_riscv(c);
 #elif ARCH_X86
     ff_rv40dsp_init_x86(c);
 #endif
-- 
2.44.0


[-- Attachment #3: Type: text/plain, Size: 251 bytes --]

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2024-04-30 15:01 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-04-30 15:01 [FFmpeg-devel] [PATCH 2/2] lavc/rv40dsp: R-V V chroma_mc flow gg

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git