* [FFmpeg-devel] [PATCH 1/4] avcodec: [loongarch] Optimize vp8_lpf/mc with LSX.
2021-12-18 14:27 [FFmpeg-devel] Optimize VP8,VP9,WMV3 decoding for loongarch Hao Chen
@ 2021-12-18 14:27 ` Hao Chen
2021-12-18 14:27 ` [FFmpeg-devel] [PATCH 2/4] avcodec: [loongarch] Optimize vp9_mc/intra " Hao Chen
` (3 subsequent siblings)
4 siblings, 0 replies; 10+ messages in thread
From: Hao Chen @ 2021-12-18 14:27 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: yuanhecai
From: yuanhecai <yuanhecai@loongson.cn>
./ffmpeg -i ../9_vp8_1080p_30fps_2Mbps.webm -f rawvideo -y /dev/null -an
before: 210fps
after : 585fps
---
libavcodec/loongarch/Makefile | 3 +
libavcodec/loongarch/vp8_lpf_lsx.c | 591 ++++++++++++
libavcodec/loongarch/vp8_mc_lsx.c | 951 +++++++++++++++++++
libavcodec/loongarch/vp8dsp_init_loongarch.c | 63 ++
libavcodec/loongarch/vp8dsp_loongarch.h | 90 ++
libavcodec/vp8dsp.c | 2 +
libavcodec/vp8dsp.h | 1 +
7 files changed, 1701 insertions(+)
create mode 100644 libavcodec/loongarch/vp8_lpf_lsx.c
create mode 100644 libavcodec/loongarch/vp8_mc_lsx.c
create mode 100644 libavcodec/loongarch/vp8dsp_init_loongarch.c
create mode 100644 libavcodec/loongarch/vp8dsp_loongarch.h
diff --git a/libavcodec/loongarch/Makefile b/libavcodec/loongarch/Makefile
index 30799e4e48..4e1d827e19 100644
--- a/libavcodec/loongarch/Makefile
+++ b/libavcodec/loongarch/Makefile
@@ -2,9 +2,12 @@ OBJS-$(CONFIG_H264CHROMA) += loongarch/h264chroma_init_loongarch.o
OBJS-$(CONFIG_H264QPEL) += loongarch/h264qpel_init_loongarch.o
OBJS-$(CONFIG_H264DSP) += loongarch/h264dsp_init_loongarch.o
OBJS-$(CONFIG_H264PRED) += loongarch/h264_intrapred_init_loongarch.o
+OBJS-$(CONFIG_VP8_DECODER) += loongarch/vp8dsp_init_loongarch.o
LASX-OBJS-$(CONFIG_H264CHROMA) += loongarch/h264chroma_lasx.o
LASX-OBJS-$(CONFIG_H264QPEL) += loongarch/h264qpel_lasx.o
LASX-OBJS-$(CONFIG_H264DSP) += loongarch/h264dsp_lasx.o \
loongarch/h264idct_lasx.o \
loongarch/h264_deblock_lasx.o
LASX-OBJS-$(CONFIG_H264PRED) += loongarch/h264_intrapred_lasx.o
+LSX-OBJS-$(CONFIG_VP8_DECODER) += loongarch/vp8_mc_lsx.o \
+ loongarch/vp8_lpf_lsx.o
diff --git a/libavcodec/loongarch/vp8_lpf_lsx.c b/libavcodec/loongarch/vp8_lpf_lsx.c
new file mode 100644
index 0000000000..f0fc3f3a5b
--- /dev/null
+++ b/libavcodec/loongarch/vp8_lpf_lsx.c
@@ -0,0 +1,591 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hecai Yuan <yuanhecai@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavcodec/vp8dsp.h"
+#include "vp8dsp_loongarch.h"
+#include "libavutil/loongarch/loongson_intrinsics.h"
+
+#define VP8_LPF_FILTER4_4W(p1_in_out, p0_in_out, q0_in_out, q1_in_out, \
+ mask_in, hev_in) \
+{ \
+ __m128i p1_m, p0_m, q0_m, q1_m, q0_sub_p0, filt_sign; \
+ __m128i filt, filt1, filt2, cnst4b, cnst3b; \
+ __m128i q0_sub_p0_l, q0_sub_p0_h, filt_h, filt_l, cnst3h; \
+ \
+ p1_m = __lsx_vxori_b(p1_in_out, 0x80); \
+ p0_m = __lsx_vxori_b(p0_in_out, 0x80); \
+ q0_m = __lsx_vxori_b(q0_in_out, 0x80); \
+ q1_m = __lsx_vxori_b(q1_in_out, 0x80); \
+ filt = __lsx_vssub_b(p1_m, q1_m); \
+ filt = filt & hev_in; \
+ \
+ q0_sub_p0 = __lsx_vsub_b(q0_m, p0_m); \
+ filt_sign = __lsx_vslti_b(filt, 0); \
+ \
+ cnst3h = __lsx_vreplgr2vr_h(3); \
+ q0_sub_p0_l = __lsx_vilvl_b(q0_sub_p0, q0_sub_p0); \
+ q0_sub_p0_l = __lsx_vdp2_h_b(q0_sub_p0_l, cnst3h); \
+ filt_l = __lsx_vilvl_b(filt_sign, filt); \
+ filt_l = __lsx_vadd_h(filt_l, q0_sub_p0_l); \
+ filt_l = __lsx_vsat_h(filt_l, 7); \
+ \
+ q0_sub_p0_h = __lsx_vilvh_b(q0_sub_p0, q0_sub_p0); \
+ q0_sub_p0_h = __lsx_vdp2_h_b(q0_sub_p0_h, cnst3h); \
+ filt_h = __lsx_vilvh_b(filt_sign, filt); \
+ filt_h = __lsx_vadd_h(filt_h, q0_sub_p0_h); \
+ filt_h = __lsx_vsat_h(filt_h, 7); \
+ \
+ filt = __lsx_vpickev_b(filt_h, filt_l); \
+ filt = filt & mask_in; \
+ cnst4b = __lsx_vreplgr2vr_b(4); \
+ filt1 = __lsx_vsadd_b(filt, cnst4b); \
+ filt1 = __lsx_vsrai_b(filt1, 3); \
+ \
+ cnst3b = __lsx_vreplgr2vr_b(3); \
+ filt2 = __lsx_vsadd_b(filt, cnst3b); \
+ filt2 = __lsx_vsrai_b(filt2, 3); \
+ \
+ q0_m = __lsx_vssub_b(q0_m, filt1); \
+ q0_in_out = __lsx_vxori_b(q0_m, 0x80); \
+ p0_m = __lsx_vsadd_b(p0_m, filt2); \
+ p0_in_out = __lsx_vxori_b(p0_m, 0x80); \
+ \
+ filt = __lsx_vsrari_b(filt1, 1); \
+ hev_in = __lsx_vxori_b(hev_in, 0xff); \
+ filt = filt & hev_in; \
+ \
+ q1_m = __lsx_vssub_b(q1_m, filt); \
+ q1_in_out = __lsx_vxori_b(q1_m, 0x80); \
+ p1_m = __lsx_vsadd_b(p1_m, filt); \
+ p1_in_out = __lsx_vxori_b(p1_m, 0x80); \
+}
+
+#define VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev) \
+{ \
+ __m128i p2_m, p1_m, p0_m, q2_m, q1_m, q0_m; \
+ __m128i filt, q0_sub_p0, cnst4b, cnst3b; \
+ __m128i u, filt1, filt2, filt_sign, q0_sub_p0_sign; \
+ __m128i q0_sub_p0_l, q0_sub_p0_h, filt_l, u_l, u_h, filt_h; \
+ __m128i cnst3h, cnst27h, cnst18h, cnst63h; \
+ \
+ cnst3h = __lsx_vreplgr2vr_h(3); \
+ \
+ p2_m = __lsx_vxori_b(p2, 0x80); \
+ p1_m = __lsx_vxori_b(p1, 0x80); \
+ p0_m = __lsx_vxori_b(p0, 0x80); \
+ q0_m = __lsx_vxori_b(q0, 0x80); \
+ q1_m = __lsx_vxori_b(q1, 0x80); \
+ q2_m = __lsx_vxori_b(q2, 0x80); \
+ \
+ filt = __lsx_vssub_b(p1_m, q1_m); \
+ q0_sub_p0 = __lsx_vsub_b(q0_m, p0_m); \
+ q0_sub_p0_sign = __lsx_vslti_b(q0_sub_p0, 0); \
+ filt_sign = __lsx_vslti_b(filt, 0); \
+ \
+ /* right part */ \
+ q0_sub_p0_l = __lsx_vilvl_b(q0_sub_p0_sign, q0_sub_p0); \
+ q0_sub_p0_l = __lsx_vmul_h(q0_sub_p0_l, cnst3h); \
+ filt_l = __lsx_vilvl_b(filt_sign, filt); \
+ filt_l = __lsx_vadd_h(filt_l, q0_sub_p0_l); \
+ filt_l = __lsx_vsat_h(filt_l, 7); \
+ \
+ /* left part */ \
+ q0_sub_p0_h = __lsx_vilvh_b(q0_sub_p0_sign, q0_sub_p0); \
+ q0_sub_p0_h = __lsx_vmul_h(q0_sub_p0_h, cnst3h); \
+ filt_h = __lsx_vilvh_b(filt_sign, filt); \
+ filt_h = __lsx_vadd_h(filt_h, q0_sub_p0_h); \
+ filt_h = __lsx_vsat_h(filt_h, 7); \
+ \
+ /* combine left and right part */ \
+ filt = __lsx_vpickev_b(filt_h, filt_l); \
+ filt = filt & mask; \
+ filt2 = filt & hev; \
+ /* filt_val &= ~hev */ \
+ hev = __lsx_vxori_b(hev, 0xff); \
+ filt = filt & hev; \
+ cnst4b = __lsx_vreplgr2vr_b(4); \
+ filt1 = __lsx_vsadd_b(filt2, cnst4b); \
+ filt1 = __lsx_vsrai_b(filt1, 3); \
+ cnst3b = __lsx_vreplgr2vr_b(3); \
+ filt2 = __lsx_vsadd_b(filt2, cnst3b); \
+ filt2 = __lsx_vsrai_b(filt2, 3); \
+ q0_m = __lsx_vssub_b(q0_m, filt1); \
+ p0_m = __lsx_vsadd_b(p0_m, filt2); \
+ \
+ filt_sign = __lsx_vslti_b(filt, 0); \
+ filt_l = __lsx_vilvl_b(filt_sign, filt); \
+ filt_h = __lsx_vilvh_b(filt_sign, filt); \
+ \
+ cnst27h = __lsx_vreplgr2vr_h(27); \
+ cnst63h = __lsx_vreplgr2vr_h(63); \
+ \
+ /* right part */ \
+ u_l = __lsx_vmul_h(filt_l, cnst27h); \
+ u_l = __lsx_vadd_h(u_l, cnst63h); \
+ u_l = __lsx_vsrai_h(u_l, 7); \
+ u_l = __lsx_vsat_h(u_l, 7); \
+ /* left part */ \
+ u_h = __lsx_vmul_h(filt_h, cnst27h); \
+ u_h = __lsx_vadd_h(u_h, cnst63h); \
+ u_h = __lsx_vsrai_h(u_h, 7); \
+ u_h = __lsx_vsat_h(u_h, 7); \
+ /* combine left and right part */ \
+ u = __lsx_vpickev_b(u_h, u_l); \
+ q0_m = __lsx_vssub_b(q0_m, u); \
+ q0 = __lsx_vxori_b(q0_m, 0x80); \
+ p0_m = __lsx_vsadd_b(p0_m, u); \
+ p0 = __lsx_vxori_b(p0_m, 0x80); \
+ cnst18h = __lsx_vreplgr2vr_h(18); \
+ u_l = __lsx_vmul_h(filt_l, cnst18h); \
+ u_l = __lsx_vadd_h(u_l, cnst63h); \
+ u_l = __lsx_vsrai_h(u_l, 7); \
+ u_l = __lsx_vsat_h(u_l, 7); \
+ \
+ /* left part */ \
+ u_h = __lsx_vmul_h(filt_h, cnst18h); \
+ u_h = __lsx_vadd_h(u_h, cnst63h); \
+ u_h = __lsx_vsrai_h(u_h, 7); \
+ u_h = __lsx_vsat_h(u_h, 7); \
+ /* combine left and right part */ \
+ u = __lsx_vpickev_b(u_h, u_l); \
+ q1_m = __lsx_vssub_b(q1_m, u); \
+ q1 = __lsx_vxori_b(q1_m, 0x80); \
+ p1_m = __lsx_vsadd_b(p1_m, u); \
+ p1 = __lsx_vxori_b(p1_m, 0x80); \
+ u_l = __lsx_vslli_h(filt_l, 3); \
+ u_l = __lsx_vadd_h(u_l, filt_l); \
+ u_l = __lsx_vadd_h(u_l, cnst63h); \
+ u_l = __lsx_vsrai_h(u_l, 7); \
+ u_l = __lsx_vsat_h(u_l, 7); \
+ \
+ /* left part */ \
+ u_h = __lsx_vslli_h(filt_h, 3); \
+ u_h = __lsx_vadd_h(u_h, filt_h); \
+ u_h = __lsx_vadd_h(u_h, cnst63h); \
+ u_h = __lsx_vsrai_h(u_h, 7); \
+ u_h = __lsx_vsat_h(u_h, 7); \
+ /* combine left and right part */ \
+ u = __lsx_vpickev_b(u_h, u_l); \
+ q2_m = __lsx_vssub_b(q2_m, u); \
+ q2 = __lsx_vxori_b(q2_m, 0x80); \
+ p2_m = __lsx_vsadd_b(p2_m, u); \
+ p2 = __lsx_vxori_b(p2_m, 0x80); \
+}
+
+#define LPF_MASK_HEV(p3_src, p2_src, p1_src, p0_src, \
+ q0_src, q1_src, q2_src, q3_src, \
+ limit_src, b_limit_src, thresh_src, \
+ hev_dst, mask_dst, flat_dst) \
+{ \
+ __m128i p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \
+ __m128i p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \
+ \
+ /* absolute subtraction of pixel values */ \
+ p3_asub_p2_m = __lsx_vabsd_bu(p3_src, p2_src); \
+ p2_asub_p1_m = __lsx_vabsd_bu(p2_src, p1_src); \
+ p1_asub_p0_m = __lsx_vabsd_bu(p1_src, p0_src); \
+ q1_asub_q0_m = __lsx_vabsd_bu(q1_src, q0_src); \
+ q2_asub_q1_m = __lsx_vabsd_bu(q2_src, q1_src); \
+ q3_asub_q2_m = __lsx_vabsd_bu(q3_src, q2_src); \
+ p0_asub_q0_m = __lsx_vabsd_bu(p0_src, q0_src); \
+ p1_asub_q1_m = __lsx_vabsd_bu(p1_src, q1_src); \
+ \
+ /* calculation of hev */ \
+ flat_dst = __lsx_vmax_bu(p1_asub_p0_m, q1_asub_q0_m); \
+ hev_dst = __lsx_vslt_bu(thresh_src, flat_dst); \
+ /* calculation of mask */ \
+ p0_asub_q0_m = __lsx_vsadd_bu(p0_asub_q0_m, p0_asub_q0_m); \
+ p1_asub_q1_m = __lsx_vsrli_b(p1_asub_q1_m, 1); \
+ p0_asub_q0_m = __lsx_vsadd_bu(p0_asub_q0_m, p1_asub_q1_m); \
+ mask_dst = __lsx_vslt_bu(b_limit_src, p0_asub_q0_m); \
+ mask_dst = __lsx_vmax_bu(flat_dst, mask_dst); \
+ p3_asub_p2_m = __lsx_vmax_bu(p3_asub_p2_m, p2_asub_p1_m); \
+ mask_dst = __lsx_vmax_bu(p3_asub_p2_m, mask_dst); \
+ q2_asub_q1_m = __lsx_vmax_bu(q2_asub_q1_m, q3_asub_q2_m); \
+ mask_dst = __lsx_vmax_bu(q2_asub_q1_m, mask_dst); \
+ mask_dst = __lsx_vslt_bu(limit_src, mask_dst); \
+ mask_dst = __lsx_vxori_b(mask_dst, 0xff); \
+}
+
+#define VP8_ST6x1_UB(in0, in0_idx, in1, in1_idx, pdst, stride) \
+{ \
+ __lsx_vstelm_w(in0, pdst, 0, in0_idx); \
+ __lsx_vstelm_h(in1, pdst + stride, 0, in1_idx); \
+}
+
+#define ST_W4(in, idx0, idx1, idx2, idx3, pdst, stride) \
+{ \
+ __lsx_vstelm_w(in, pdst, 0, idx0); \
+ pdst += stride; \
+ __lsx_vstelm_w(in, pdst, 0, idx1); \
+ pdst += stride; \
+ __lsx_vstelm_w(in, pdst, 0, idx2); \
+ pdst += stride; \
+ __lsx_vstelm_w(in, pdst, 0, idx3); \
+ pdst += stride; \
+}
+
+void ff_vp8_v_loop_filter16_lsx(uint8_t *dst, ptrdiff_t stride, int b_limit_in,
+ int limit_in, int thresh_in)
+{
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i mask, hev, flat, thresh, limit, b_limit;
+
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ b_limit = __lsx_vreplgr2vr_b(b_limit_in);
+ limit = __lsx_vreplgr2vr_b(limit_in);
+ thresh = __lsx_vreplgr2vr_b(thresh_in);
+
+ /*load vector elements*/
+ DUP4_ARG2(__lsx_vld, dst - stride4, 0, dst - stride3, 0, dst - stride2, 0,
+ dst - stride, 0, p3, p2, p1, p0);
+ DUP4_ARG2(__lsx_vld, dst, 0, dst + stride, 0, dst + stride2, 0, dst + stride3, 0,
+ q0, q1, q2, q3);
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ /*store vector elements*/
+ __lsx_vst(p2, dst - stride3, 0);
+ __lsx_vst(p1, dst - stride2, 0);
+ __lsx_vst(p0, dst - stride, 0);
+ __lsx_vst(q0, dst, 0);
+
+ __lsx_vst(q1, dst + stride, 0);
+ __lsx_vst(q2, dst + stride2, 0);
+}
+
+void ff_vp8_v_loop_filter8uv_lsx(uint8_t *dst_u, uint8_t *dst_v,
+ ptrdiff_t stride, int b_limit_in,
+ int limit_in, int thresh_in)
+{
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i mask, hev, flat, thresh, limit, b_limit;
+ __m128i p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u;
+ __m128i p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v;
+
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ b_limit = __lsx_vreplgr2vr_b(b_limit_in);
+ limit = __lsx_vreplgr2vr_b(limit_in);
+ thresh = __lsx_vreplgr2vr_b(thresh_in);
+
+ DUP4_ARG2(__lsx_vld, dst_u - stride4, 0, dst_u - stride3, 0, dst_u - stride2, 0,
+ dst_u - stride, 0, p3_u, p2_u, p1_u, p0_u);
+ DUP4_ARG2(__lsx_vld, dst_u, 0, dst_u + stride, 0, dst_u + stride2, 0,
+ dst_u + stride3, 0, q0_u, q1_u, q2_u, q3_u);
+
+ DUP4_ARG2(__lsx_vld, dst_v - stride4, 0, dst_v - stride3, 0, dst_v - stride2, 0,
+ dst_v - stride, 0, p3_v, p2_v, p1_v, p0_v);
+ DUP4_ARG2(__lsx_vld, dst_v, 0, dst_v + stride, 0, dst_v + stride2, 0,
+ dst_v + stride3, 0, q0_v, q1_v, q2_v, q3_v);
+
+ /* rht 8 element of p3 are u pixel and left 8 element of p3 are v pixei */
+ DUP4_ARG2(__lsx_vilvl_d, p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0);
+ DUP4_ARG2(__lsx_vilvl_d, q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3);
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ __lsx_vstelm_d(p2, dst_u - stride3, 0, 0);
+ __lsx_vstelm_d(p1, dst_u - stride2, 0, 0);
+ __lsx_vstelm_d(p0, dst_u - stride , 0, 0);
+ __lsx_vstelm_d(q0, dst_u, 0, 0);
+
+ __lsx_vstelm_d(q1, dst_u + stride, 0, 0);
+ __lsx_vstelm_d(q2, dst_u + stride2, 0, 0);
+
+ __lsx_vstelm_d(p2, dst_v - stride3, 0, 1);
+ __lsx_vstelm_d(p1, dst_v - stride2, 0, 1);
+ __lsx_vstelm_d(p0, dst_v - stride , 0, 1);
+ __lsx_vstelm_d(q0, dst_v, 0, 1);
+
+ __lsx_vstelm_d(q1, dst_v + stride, 0, 1);
+ __lsx_vstelm_d(q2, dst_v + stride2, 0, 1);
+}
+
+void ff_vp8_h_loop_filter16_lsx(uint8_t *dst, ptrdiff_t stride, int b_limit_in,
+ int limit_in, int thresh_in)
+{
+ uint8_t *temp_src;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i mask, hev, flat, thresh, limit, b_limit;
+ __m128i row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ __m128i row9, row10, row11, row12, row13, row14, row15;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ b_limit = __lsx_vreplgr2vr_b(b_limit_in);
+ limit = __lsx_vreplgr2vr_b(limit_in);
+ thresh = __lsx_vreplgr2vr_b(thresh_in);
+
+ temp_src = dst - 4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row0, row1, row2, row3);
+ temp_src += stride4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row4, row5, row6, row7);
+
+ temp_src += stride4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row8, row9, row10, row11);
+ temp_src += stride4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row12, row13, row14, row15);
+ LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7, row8, row9, row10,
+ row11, row12, row13, row14, row15, p3, p2, p1, p0, q0, q1, q2, q3);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ tmp0 = __lsx_vilvl_b(p1, p2);
+ tmp1 = __lsx_vilvl_b(q0, p0);
+
+ tmp3 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp4 = __lsx_vilvh_h(tmp1, tmp0);
+
+ tmp0 = __lsx_vilvh_b(p1, p2);
+ tmp1 = __lsx_vilvh_b(q0, p0);
+
+ tmp6 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp7 = __lsx_vilvh_h(tmp1, tmp0);
+
+ tmp2 = __lsx_vilvl_b(q2, q1);
+ tmp5 = __lsx_vilvh_b(q2, q1);
+
+ temp_src = dst - 3;
+ VP8_ST6x1_UB(tmp3, 0, tmp2, 0, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp3, 1, tmp2, 1, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp3, 2, tmp2, 2, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp3, 3, tmp2, 3, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp4, 0, tmp2, 4, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp4, 1, tmp2, 5, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp4, 2, tmp2, 6, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp4, 3, tmp2, 7, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp6, 0, tmp5, 0, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp6, 1, tmp5, 1, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp6, 2, tmp5, 2, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp6, 3, tmp5, 3, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp7, 0, tmp5, 4, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp7, 1, tmp5, 5, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp7, 2, tmp5, 6, temp_src, 4);
+ temp_src += stride;
+ VP8_ST6x1_UB(tmp7, 3, tmp5, 7, temp_src, 4);
+}
+
+void ff_vp8_h_loop_filter8uv_lsx(uint8_t *dst_u, uint8_t *dst_v,
+ ptrdiff_t stride, int b_limit_in,
+ int limit_in, int thresh_in)
+{
+ uint8_t *temp_src;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i mask, hev, flat, thresh, limit, b_limit;
+ __m128i row0, row1, row2, row3, row4, row5, row6, row7, row8;
+ __m128i row9, row10, row11, row12, row13, row14, row15;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ b_limit = __lsx_vreplgr2vr_b(b_limit_in);
+ limit = __lsx_vreplgr2vr_b(limit_in);
+ thresh = __lsx_vreplgr2vr_b(thresh_in);
+
+ temp_src = dst_u - 4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row0, row1, row2, row3);
+ temp_src += stride4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row4, row5, row6, row7);
+
+ temp_src = dst_v - 4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row8, row9, row10, row11);
+ temp_src += stride4;
+ DUP4_ARG2(__lsx_vld, temp_src, 0, temp_src + stride, 0, temp_src + stride2, 0,
+ temp_src + stride3, 0, row12, row13, row14, row15);
+
+ LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7,
+ row8, row9, row10, row11, row12, row13, row14, row15,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, mask, flat);
+ VP8_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
+
+ tmp0 = __lsx_vilvl_b(p1, p2);
+ tmp1 = __lsx_vilvl_b(q0, p0);
+
+ tmp3 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp4 = __lsx_vilvh_h(tmp1, tmp0);
+
+ tmp0 = __lsx_vilvh_b(p1, p2);
+ tmp1 = __lsx_vilvh_b(q0, p0);
+
+ tmp6 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp7 = __lsx_vilvh_h(tmp1, tmp0);
+
+ tmp2 = __lsx_vilvl_b(q2, q1);
+ tmp5 = __lsx_vilvh_b(q2, q1);
+
+ dst_u -= 3;
+ VP8_ST6x1_UB(tmp3, 0, tmp2, 0, dst_u, 4);
+ dst_u += stride;
+ VP8_ST6x1_UB(tmp3, 1, tmp2, 1, dst_u, 4);
+ dst_u += stride;
+ VP8_ST6x1_UB(tmp3, 2, tmp2, 2, dst_u, 4);
+ dst_u += stride;
+ VP8_ST6x1_UB(tmp3, 3, tmp2, 3, dst_u, 4);
+ dst_u += stride;
+ VP8_ST6x1_UB(tmp4, 0, tmp2, 4, dst_u, 4);
+ dst_u += stride;
+ VP8_ST6x1_UB(tmp4, 1, tmp2, 5, dst_u, 4);
+ dst_u += stride;
+ VP8_ST6x1_UB(tmp4, 2, tmp2, 6, dst_u, 4);
+ dst_u += stride;
+ VP8_ST6x1_UB(tmp4, 3, tmp2, 7, dst_u, 4);
+
+ dst_v -= 3;
+ VP8_ST6x1_UB(tmp6, 0, tmp5, 0, dst_v, 4);
+ dst_v += stride;
+ VP8_ST6x1_UB(tmp6, 1, tmp5, 1, dst_v, 4);
+ dst_v += stride;
+ VP8_ST6x1_UB(tmp6, 2, tmp5, 2, dst_v, 4);
+ dst_v += stride;
+ VP8_ST6x1_UB(tmp6, 3, tmp5, 3, dst_v, 4);
+ dst_v += stride;
+ VP8_ST6x1_UB(tmp7, 0, tmp5, 4, dst_v, 4);
+ dst_v += stride;
+ VP8_ST6x1_UB(tmp7, 1, tmp5, 5, dst_v, 4);
+ dst_v += stride;
+ VP8_ST6x1_UB(tmp7, 2, tmp5, 6, dst_v, 4);
+ dst_v += stride;
+ VP8_ST6x1_UB(tmp7, 3, tmp5, 7, dst_v, 4);
+}
+
+void ff_vp8_v_loop_filter16_inner_lsx(uint8_t *src, ptrdiff_t stride,
+ int32_t e, int32_t i, int32_t h)
+{
+ __m128i mask, hev, flat;
+ __m128i thresh, b_limit, limit;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ /* load vector elements */
+ src -= stride4;
+ DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
+ src + stride3, 0, p3, p2, p1, p0);
+ src += stride4;
+ DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
+ src + stride3, 0, q0, q1, q2, q3);
+ thresh = __lsx_vreplgr2vr_b(h);
+ b_limit = __lsx_vreplgr2vr_b(e);
+ limit = __lsx_vreplgr2vr_b(i);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+
+ __lsx_vst(p1, src - stride2, 0);
+ __lsx_vst(p0, src - stride, 0);
+ __lsx_vst(q0, src, 0);
+ __lsx_vst(q1, src + stride, 0);
+}
+
+void ff_vp8_h_loop_filter16_inner_lsx(uint8_t *src, ptrdiff_t stride,
+ int32_t e, int32_t i, int32_t h)
+{
+ __m128i mask, hev, flat;
+ __m128i thresh, b_limit, limit;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
+
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ src -= 4;
+ DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
+ src + stride3, 0, tmp0, tmp1, tmp2, tmp3);
+ src += stride4;
+ DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
+ src + stride3, 0, tmp4, tmp5, tmp6, tmp7);
+ src += stride4;
+ DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
+ src + stride3, 0, tmp8, tmp9, tmp10, tmp11);
+ src += stride4;
+ DUP4_ARG2(__lsx_vld, src, 0, src + stride, 0, src + stride2, 0,
+ src + stride3, 0, tmp12, tmp13, tmp14, tmp15);
+ src -= 3 * stride4;
+
+ LSX_TRANSPOSE16x8_B(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7,
+ tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+
+ thresh = __lsx_vreplgr2vr_b(h);
+ b_limit = __lsx_vreplgr2vr_b(e);
+ limit = __lsx_vreplgr2vr_b(i);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP8_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
+
+ DUP2_ARG2(__lsx_vilvl_b, p0, p1, q1, q0, tmp0, tmp1);
+ tmp2 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp3 = __lsx_vilvh_h(tmp1, tmp0);
+
+ src += 2;
+ ST_W4(tmp2, 0, 1, 2, 3, src, stride);
+ ST_W4(tmp3, 0, 1, 2, 3, src, stride);
+
+ DUP2_ARG2(__lsx_vilvh_b, p0, p1, q1, q0, tmp0, tmp1);
+ tmp2 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp3 = __lsx_vilvh_h(tmp1, tmp0);
+
+ ST_W4(tmp2, 0, 1, 2, 3, src, stride);
+ ST_W4(tmp3, 0, 1, 2, 3, src, stride);
+ src -= 4 * stride4;
+}
diff --git a/libavcodec/loongarch/vp8_mc_lsx.c b/libavcodec/loongarch/vp8_mc_lsx.c
new file mode 100644
index 0000000000..80c4f87e80
--- /dev/null
+++ b/libavcodec/loongarch/vp8_mc_lsx.c
@@ -0,0 +1,951 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hecai Yuan <yuanhecai@loongson.cn>
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "libavcodec/vp8dsp.h"
+#include "libavutil/loongarch/loongson_intrinsics.h"
+#include "vp8dsp_loongarch.h"
+
+static const uint8_t mc_filt_mask_arr[16 * 3] = {
+ /* 8 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ /* 4 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
+ /* 4 width cases */
+ 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
+};
+
+static const int8_t subpel_filters_lsx[7][8] = {
+ {-6, 123, 12, -1, 0, 0, 0, 0},
+ {2, -11, 108, 36, -8, 1, 0, 0}, /* New 1/4 pel 6 tap filter */
+ {-9, 93, 50, -6, 0, 0, 0, 0},
+ {3, -16, 77, 77, -16, 3, 0, 0}, /* New 1/2 pel 6 tap filter */
+ {-6, 50, 93, -9, 0, 0, 0, 0},
+ {1, -8, 36, 108, -11, 2, 0, 0}, /* New 1/4 pel 6 tap filter */
+ {-1, 12, 123, -6, 0, 0, 0, 0},
+};
+
+#define DPADD_SH3_SH(in0, in1, in2, coeff0, coeff1, coeff2) \
+( { \
+ __m128i out0_m; \
+ \
+ out0_m = __lsx_vdp2_h_b(in0, coeff0); \
+ out0_m = __lsx_vdp2add_h_b(out0_m, in1, coeff1); \
+ out0_m = __lsx_vdp2add_h_b(out0_m, in2, coeff2); \
+ \
+ out0_m; \
+} )
+
+#define VSHF_B3_SB(in0, in1, in2, in3, in4, in5, mask0, mask1, mask2, \
+ out0, out1, out2) \
+{ \
+ DUP2_ARG3(__lsx_vshuf_b, in1, in0, mask0, in3, in2, mask1, \
+ out0, out1); \
+ out2 = __lsx_vshuf_b(in5, in4, mask2); \
+}
+
+#define HORIZ_6TAP_FILT(src0, src1, mask0, mask1, mask2, \
+ filt_h0, filt_h1, filt_h2) \
+( { \
+ __m128i vec0_m, vec1_m, vec2_m; \
+ __m128i hz_out_m; \
+ \
+ VSHF_B3_SB(src0, src1, src0, src1, src0, src1, mask0, mask1, mask2, \
+ vec0_m, vec1_m, vec2_m); \
+ hz_out_m = DPADD_SH3_SH(vec0_m, vec1_m, vec2_m, \
+ filt_h0, filt_h1, filt_h2); \
+ \
+ hz_out_m = __lsx_vsrari_h(hz_out_m, 7); \
+ hz_out_m = __lsx_vsat_h(hz_out_m, 7); \
+ \
+ hz_out_m; \
+} )
+
+#define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \
+ mask0, mask1, mask2, \
+ filt0, filt1, filt2, \
+ out0, out1, out2, out3) \
+{ \
+ __m128i vec0_m, vec1_m, vec2_m, vec3_m, vec4_m, vec5_m, vec6_m, vec7_m; \
+ \
+ DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0, src2, src2, \
+ mask0, src3, src3, mask0, vec0_m, vec1_m, vec2_m, vec3_m); \
+ DUP4_ARG2(__lsx_vdp2_h_b, vec0_m, filt0, vec1_m, filt0, vec2_m, filt0, \
+ vec3_m, filt0, out0, out1, out2, out3); \
+ DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1, src2, src2, \
+ mask1, src3, src3, mask1, vec0_m, vec1_m, vec2_m, vec3_m); \
+ DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2, src2, src2, \
+ mask2, src3, src3, mask2, vec4_m, vec5_m, vec6_m, vec7_m); \
+ DUP4_ARG3(__lsx_vdp2add_h_b, out0, vec0_m, filt1, out1, vec1_m, filt1, \
+ out2, vec2_m, filt1, out3, vec3_m, filt1, out0, out1, out2, out3); \
+ DUP4_ARG3(__lsx_vdp2add_h_b, out0, vec4_m, filt2, out1, vec5_m, filt2, \
+ out2, vec6_m, filt2, out3, vec7_m, filt2, out0, out1, out2, out3); \
+}
+
+#define FILT_4TAP_DPADD_S_H(vec0, vec1, filt0, filt1) \
+( { \
+ __m128i tmp0; \
+ \
+ tmp0 = __lsx_vdp2_h_b(vec0, filt0); \
+ tmp0 = __lsx_vdp2add_h_b(tmp0, vec1, filt1); \
+ \
+ tmp0; \
+} )
+
+#define HORIZ_4TAP_FILT(src0, src1, mask0, mask1, filt_h0, filt_h1) \
+( { \
+ __m128i vec0_m, vec1_m; \
+ __m128i hz_out_m; \
+ DUP2_ARG3(__lsx_vshuf_b, src1, src0, mask0, src1, src0, mask1, \
+ vec0_m, vec1_m); \
+ hz_out_m = FILT_4TAP_DPADD_S_H(vec0_m, vec1_m, filt_h0, filt_h1); \
+ \
+ hz_out_m = __lsx_vsrari_h(hz_out_m, 7); \
+ hz_out_m = __lsx_vsat_h(hz_out_m, 7); \
+ \
+ hz_out_m; \
+} )
+
+void ff_put_vp8_epel8_h6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_lsx[mx - 1];
+ __m128i src0, src1, src2, src3, filt0, filt1, filt2;
+ __m128i mask0, mask1, mask2;
+ __m128i out0, out1, out2, out3;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= 2;
+
+ /* rearranging filter */
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ filt2 = __lsx_vldrepl_h(filter, 4);
+
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ src += src_stride4;
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1, out2, out3);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vstelm_d(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 1);
+ dst += dst_stride;
+
+ for (loop_cnt = (height >> 2) - 1; loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ src += src_stride4;
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1, out2, out3);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+
+ __lsx_vstelm_d(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 1);
+ dst += dst_stride;
+ }
+}
+
+void ff_put_vp8_epel16_h6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_lsx[mx - 1];
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, filt0, filt1;
+ __m128i filt2, mask0, mask1, mask2;
+ __m128i out0, out1, out2, out3, out4, out5, out6, out7;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= 2;
+ /* rearranging filter */
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ filt2 = __lsx_vldrepl_h(filter, 4);
+
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2,
+ 0, src + src_stride3, 0, src0 ,src2, src4, src6);
+ DUP4_ARG2(__lsx_vld, src, 8, src + src_stride, 8, src + src_stride2,
+ 8, src + src_stride3, 8, src1, src3, src5, src7);
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src6, 128, src7, 128,
+ src4, src5, src6, src7);
+ src += src_stride4;
+
+ HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ filt0, filt1, filt2, out0, out1, out2, out3);
+ HORIZ_6TAP_8WID_4VECS_FILT(src4, src5, src6, src7, mask0, mask1, mask2,
+ filt0, filt1, filt2, out4, out5, out6, out7);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vst(out0, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(out1, dst, 0);
+ dst += dst_stride;
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, out5, out4, 7, out7, out6, 7, out4, out5);
+ DUP2_ARG2(__lsx_vxori_b, out4, 128, out5, 128, out4, out5);
+ __lsx_vst(out4, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(out5, dst, 0);
+ dst += dst_stride;
+ }
+}
+
+void ff_put_vp8_epel8_v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_lsx[my - 1];
+ __m128i src0, src1, src2, src3, src4, src7, src8, src9, src10;
+ __m128i src10_l, src32_l, src76_l, src98_l, src21_l, src43_l, src87_l;
+ __m128i src109_l, filt0, filt1, filt2;
+ __m128i out0_l, out1_l, out2_l, out3_l;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ src -= src_stride2;
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ filt2 = __lsx_vldrepl_h(filter, 4);
+
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ src += src_stride4;
+ src4 = __lsx_vld(src, 0);
+ src += src_stride;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ src4 = __lsx_vxori_b(src4, 128);
+
+ DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src2, src1, src4,
+ src3, src10_l, src32_l, src21_l, src43_l);
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2,
+ 0, src + src_stride3, 0, src7, src8, src9, src10);
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10,
+ 128, src7, src8, src9, src10);
+ src += src_stride4;
+
+ DUP4_ARG2(__lsx_vilvl_b, src7, src4, src8, src7, src9, src8, src10,
+ src9, src76_l, src87_l, src98_l, src109_l);
+
+ out0_l = DPADD_SH3_SH(src10_l, src32_l, src76_l, filt0, filt1, filt2);
+ out1_l = DPADD_SH3_SH(src21_l, src43_l, src87_l, filt0, filt1, filt2);
+ out2_l = DPADD_SH3_SH(src32_l, src76_l, src98_l, filt0, filt1, filt2);
+ out3_l = DPADD_SH3_SH(src43_l, src87_l, src109_l, filt0, filt1, filt2);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1_l, out0_l, 7, out3_l, out2_l, 7,
+ out0_l, out1_l);
+ DUP2_ARG2(__lsx_vxori_b, out0_l, 128, out1_l, 128, out0_l, out1_l);
+
+ __lsx_vstelm_d(out0_l, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out0_l, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1_l, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1_l, dst, 0, 1);
+ dst += dst_stride;
+
+ src10_l = src76_l;
+ src32_l = src98_l;
+ src21_l = src87_l;
+ src43_l = src109_l;
+ src4 = src10;
+ }
+}
+
+void ff_put_vp8_epel16_v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_lsx[my - 1];
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ __m128i src10_l, src32_l, src54_l, src76_l, src21_l, src43_l, src65_l, src87_l;
+ __m128i src10_h, src32_h, src54_h, src76_h, src21_h, src43_h, src65_h, src87_h;
+ __m128i filt0, filt1, filt2;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ filt2 = __lsx_vldrepl_h(filter, 4);
+
+ DUP4_ARG2(__lsx_vld, src - src_stride2, 0, src - src_stride, 0, src, 0,
+ src + src_stride, 0, src0, src1, src2, src3);
+ src4 = __lsx_vld(src + src_stride2, 0);
+ src += src_stride3;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128, src0,
+ src1, src2, src3);
+ src4 = __lsx_vxori_b(src4, 128);
+
+ DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src4, src3, src2, src1,
+ src10_l, src32_l, src43_l, src21_l);
+ DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src4, src3, src2, src1,
+ src10_h, src32_h, src43_h, src21_h);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src5, src6, src7, src8);
+ src += src_stride4;
+ DUP4_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src7, 128, src8, 128,
+ src5, src6, src7, src8);
+
+ DUP4_ARG2(__lsx_vilvl_b, src5, src4, src6, src5, src7, src6, src8, src7,
+ src54_l, src65_l, src76_l, src87_l);
+ DUP4_ARG2(__lsx_vilvh_b, src5, src4, src6, src5, src7, src6, src8, src7,
+ src54_h, src65_h, src76_h, src87_h);
+
+ tmp0 = DPADD_SH3_SH(src10_l, src32_l, src54_l, filt0, filt1, filt2);
+ tmp1 = DPADD_SH3_SH(src21_l, src43_l, src65_l, filt0, filt1, filt2);
+ tmp2 = DPADD_SH3_SH(src10_h, src32_h, src54_h, filt0, filt1, filt2);
+ tmp3 = DPADD_SH3_SH(src21_h, src43_h, src65_h, filt0, filt1, filt2);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vst(tmp0, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(tmp1, dst, 0);
+ dst += dst_stride;
+
+ tmp0 = DPADD_SH3_SH(src32_l, src54_l, src76_l, filt0, filt1, filt2);
+ tmp1 = DPADD_SH3_SH(src43_l, src65_l, src87_l, filt0, filt1, filt2);
+ tmp2 = DPADD_SH3_SH(src32_h, src54_h, src76_h, filt0, filt1, filt2);
+ tmp3 = DPADD_SH3_SH(src43_h, src65_h, src87_h, filt0, filt1, filt2);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vst(tmp0, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(tmp1, dst, 0);
+ dst += dst_stride;
+
+ src10_l = src54_l;
+ src32_l = src76_l;
+ src21_l = src65_l;
+ src43_l = src87_l;
+ src10_h = src54_h;
+ src32_h = src76_h;
+ src21_h = src65_h;
+ src43_h = src87_h;
+ src4 = src8;
+ }
+}
+
+void ff_put_vp8_epel8_h6v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = subpel_filters_lsx[mx - 1];
+ const int8_t *filter_vert = subpel_filters_lsx[my - 1];
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ __m128i filt_hz0, filt_hz1, filt_hz2;
+ __m128i mask0, mask1, mask2, filt_vt0, filt_vt1, filt_vt2;
+ __m128i hz_out0, hz_out1, hz_out2, hz_out3, hz_out4, hz_out5, hz_out6;
+ __m128i hz_out7, hz_out8, out0, out1, out2, out3, out4, out5, out6, out7;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= (2 + src_stride2);
+
+ /* rearranging filter */
+ DUP2_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filt_hz0, filt_hz1);
+ filt_hz2 = __lsx_vldrepl_h(filter_horiz, 4);
+
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ src += src_stride4;
+ src4 = __lsx_vld(src, 0);
+ src += src_stride;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0 ,src1, src2, src3);
+ src4 = __lsx_vxori_b(src4, 128);
+
+ hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out4 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filt_vt0, filt_vt1);
+ filt_vt2 = __lsx_vldrepl_h(filter_vert, 4);
+
+ DUP2_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out3, hz_out2, out0, out1);
+ DUP2_ARG2(__lsx_vpackev_b, hz_out2, hz_out1, hz_out4, hz_out3, out3, out4);
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src5, src6, src7, src8);
+ src += src_stride4;
+
+ DUP4_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src7, 128, src8, 128,
+ src5, src6, src7, src8);
+
+ hz_out5 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out2 = __lsx_vpackev_b(hz_out5, hz_out4);
+ tmp0 = DPADD_SH3_SH(out0, out1, out2,filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out6 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out5 = __lsx_vpackev_b(hz_out6, hz_out5);
+ tmp1 = DPADD_SH3_SH(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out7 = HORIZ_6TAP_FILT(src7, src7, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+
+ out7 = __lsx_vpackev_b(hz_out7, hz_out6);
+ tmp2 = DPADD_SH3_SH(out1, out2, out7, filt_vt0, filt_vt1, filt_vt2);
+
+ hz_out8 = HORIZ_6TAP_FILT(src8, src8, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ out6 = __lsx_vpackev_b(hz_out8, hz_out7);
+ tmp3 = DPADD_SH3_SH(out4, out5, out6, filt_vt0, filt_vt1, filt_vt2);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vstelm_d(tmp0, dst, 0, 0);
+
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp1, dst, 0, 1);
+ dst += dst_stride;
+
+ hz_out4 = hz_out8;
+ out0 = out2;
+ out1 = out7;
+ out3 = out5;
+ out4 = out6;
+ }
+}
+
+void ff_put_vp8_epel16_h6v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ ff_put_vp8_epel8_h6v6_lsx(dst, dst_stride, src, src_stride, height, mx, my);
+ src += 8;
+ dst += 8;
+ }
+}
+
+void ff_put_vp8_epel8_v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_lsx[my - 1];
+ __m128i src0, src1, src2, src7, src8, src9, src10;
+ __m128i src10_l, src72_l, src98_l, src21_l, src87_l, src109_l, filt0, filt1;
+ __m128i out0, out1, out2, out3;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ src -= src_stride;
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ DUP2_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src0, src1);
+ src2 = __lsx_vld(src + src_stride2, 0);
+ src += src_stride3;
+
+ DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1);
+ src2 = __lsx_vxori_b(src2, 128);
+ DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_l, src21_l);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src7, src8, src9, src10);
+ src += src_stride4;
+
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128,
+ src7, src8, src9, src10);
+ DUP4_ARG2(__lsx_vilvl_b, src7, src2, src8, src7, src9, src8, src10, src9,
+ src72_l, src87_l, src98_l, src109_l);
+
+ out0 = FILT_4TAP_DPADD_S_H(src10_l, src72_l, filt0, filt1);
+ out1 = FILT_4TAP_DPADD_S_H(src21_l, src87_l, filt0, filt1);
+ out2 = FILT_4TAP_DPADD_S_H(src72_l, src98_l, filt0, filt1);
+ out3 = FILT_4TAP_DPADD_S_H(src87_l, src109_l, filt0, filt1);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+
+ __lsx_vstelm_d(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 1);
+ dst += dst_stride;
+
+ src10_l = src98_l;
+ src21_l = src109_l;
+ src2 = src10;
+ }
+}
+
+void ff_put_vp8_epel16_v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter = subpel_filters_lsx[my - 1];
+ __m128i src0, src1, src2, src3, src4, src5, src6;
+ __m128i src10_l, src32_l, src54_l, src21_l, src43_l, src65_l, src10_h;
+ __m128i src32_h, src54_h, src21_h, src43_h, src65_h, filt0, filt1;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ src -= src_stride;
+ DUP2_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filt0, filt1);
+ DUP2_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src0, src1);
+ src2 = __lsx_vld(src + src_stride2, 0);
+ src += src_stride3;
+
+ DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1);
+ src2 = __lsx_vxori_b(src2, 128);
+ DUP2_ARG2(__lsx_vilvl_b, src1, src0, src2, src1, src10_l, src21_l);
+ DUP2_ARG2(__lsx_vilvh_b, src1, src0, src2, src1, src10_h, src21_h);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2,
+ 0, src + src_stride3, 0, src3, src4, src5, src6);
+ src += src_stride4;
+
+ DUP4_ARG2(__lsx_vxori_b, src3, 128, src4, 128, src5, 128, src6, 128,
+ src3, src4, src5, src6);
+ DUP4_ARG2(__lsx_vilvl_b, src3, src2, src4, src3, src5, src4, src6,
+ src5, src32_l, src43_l, src54_l, src65_l);
+ DUP4_ARG2(__lsx_vilvh_b, src3, src2, src4, src3, src5, src4, src6,
+ src5, src32_h, src43_h, src54_h, src65_h);
+
+ tmp0 = FILT_4TAP_DPADD_S_H(src10_l, src32_l, filt0, filt1);
+ tmp1 = FILT_4TAP_DPADD_S_H(src21_l, src43_l, filt0, filt1);
+ tmp2 = FILT_4TAP_DPADD_S_H(src10_h, src32_h, filt0, filt1);
+ tmp3 = FILT_4TAP_DPADD_S_H(src21_h, src43_h, filt0, filt1);
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+
+ __lsx_vst(tmp0, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(tmp1, dst, 0);
+ dst += dst_stride;
+
+ tmp0 = FILT_4TAP_DPADD_S_H(src32_l, src54_l, filt0, filt1);
+ tmp1 = FILT_4TAP_DPADD_S_H(src43_l, src65_l, filt0, filt1);
+ tmp2 = FILT_4TAP_DPADD_S_H(src32_h, src54_h, filt0, filt1);
+ tmp3 = FILT_4TAP_DPADD_S_H(src43_h, src65_h, filt0, filt1);
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+
+ __lsx_vst(tmp0, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(tmp1, dst, 0);
+ dst += dst_stride;
+
+ src10_l = src54_l;
+ src21_l = src65_l;
+ src10_h = src54_h;
+ src21_h = src65_h;
+ src2 = src6;
+ }
+}
+
+void ff_put_vp8_epel8_h6v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = subpel_filters_lsx[mx - 1];
+ const int8_t *filter_vert = subpel_filters_lsx[my - 1];
+ __m128i src0, src1, src2, src3, src4, src5, src6;
+ __m128i filt_hz0, filt_hz1, filt_hz2, mask0, mask1, mask2;
+ __m128i filt_vt0, filt_vt1, hz_out0, hz_out1, hz_out2, hz_out3;
+ __m128i tmp0, tmp1, tmp2, tmp3, vec0, vec1, vec2, vec3;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= (2 + src_stride);
+
+ /* rearranging filter */
+ DUP2_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filt_hz0, filt_hz1);
+ filt_hz2 = __lsx_vldrepl_h(filter_horiz, 4);
+
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+
+ DUP2_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src0, src1);
+ src2 = __lsx_vld(src + src_stride2, 0);
+ src += src_stride3;
+
+ DUP2_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src0, src1);
+ src2 = __lsx_vxori_b(src2, 128);
+ hz_out0 = HORIZ_6TAP_FILT(src0, src0, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out1 = HORIZ_6TAP_FILT(src1, src1, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ hz_out2 = HORIZ_6TAP_FILT(src2, src2, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ DUP2_ARG2(__lsx_vpackev_b, hz_out1, hz_out0, hz_out2, hz_out1, vec0, vec2);
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filt_vt0, filt_vt1);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src3, src4, src5, src6);
+ src += src_stride4;
+
+ DUP4_ARG2(__lsx_vxori_b, src3, 128, src4, 128, src5, 128, src6, 128,
+ src3, src4, src5, src6);
+
+ hz_out3 = HORIZ_6TAP_FILT(src3, src3, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec1 = __lsx_vpackev_b(hz_out3, hz_out2);
+ tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1);
+
+ hz_out0 = HORIZ_6TAP_FILT(src4, src4, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec3 = __lsx_vpackev_b(hz_out0, hz_out3);
+ tmp1 = FILT_4TAP_DPADD_S_H(vec2, vec3, filt_vt0, filt_vt1);
+
+ hz_out1 = HORIZ_6TAP_FILT(src5, src5, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ vec0 = __lsx_vpackev_b(hz_out1, hz_out0);
+ tmp2 = FILT_4TAP_DPADD_S_H(vec1, vec0, filt_vt0, filt_vt1);
+
+ hz_out2 = HORIZ_6TAP_FILT(src6, src6, mask0, mask1, mask2, filt_hz0,
+ filt_hz1, filt_hz2);
+ DUP2_ARG2(__lsx_vpackev_b, hz_out0, hz_out3, hz_out2, hz_out1, vec1, vec2);
+ tmp3 = FILT_4TAP_DPADD_S_H(vec1, vec2, filt_vt0, filt_vt1);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+
+ __lsx_vstelm_d(tmp0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp1, dst, 0, 1);
+ dst += dst_stride;
+ }
+}
+
+void ff_put_vp8_epel16_h6v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ ff_put_vp8_epel8_h6v4_lsx(dst, dst_stride, src, src_stride, height,
+ mx, my);
+ src += 8;
+ dst += 8;
+ }
+}
+
+void ff_put_vp8_epel8_h4v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ uint32_t loop_cnt;
+ const int8_t *filter_horiz = subpel_filters_lsx[mx - 1];
+ const int8_t *filter_vert = subpel_filters_lsx[my - 1];
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8;
+ __m128i filt_hz0, filt_hz1, mask0, mask1;
+ __m128i filt_vt0, filt_vt1, filt_vt2;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8;
+ __m128i out0, out1, out2, out3, out4, out5, out6, out7;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= (1 + src_stride2);
+
+ /* rearranging filter */
+ DUP2_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filt_hz0, filt_hz1);
+ mask1 = __lsx_vaddi_bu(mask0, 2);
+
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ src += src_stride4;
+ src4 = __lsx_vld(src, 0);
+ src += src_stride;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ src4 = __lsx_vxori_b(src4, 128);
+
+ tmp0 = HORIZ_4TAP_FILT(src0, src0, mask0, mask1, filt_hz0, filt_hz1);
+ tmp1 = HORIZ_4TAP_FILT(src1, src1, mask0, mask1, filt_hz0, filt_hz1);
+ tmp2 = HORIZ_4TAP_FILT(src2, src2, mask0, mask1, filt_hz0, filt_hz1);
+ tmp3 = HORIZ_4TAP_FILT(src3, src3, mask0, mask1, filt_hz0, filt_hz1);
+ tmp4 = HORIZ_4TAP_FILT(src4, src4, mask0, mask1, filt_hz0, filt_hz1);
+
+ DUP4_ARG2(__lsx_vpackev_b, tmp1, tmp0, tmp3, tmp2, tmp2, tmp1,
+ tmp4, tmp3, out0, out1, out3, out4);
+
+ DUP2_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filt_vt0, filt_vt1);
+ filt_vt2 = __lsx_vldrepl_h(filter_vert, 4);
+
+ for (loop_cnt = (height >> 2); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src5, src6, src7, src8);
+ src += src_stride4;
+
+ DUP4_ARG2(__lsx_vxori_b, src5, 128, src6, 128, src7, 128, src8, 128,
+ src5, src6, src7, src8);
+
+ tmp5 = HORIZ_4TAP_FILT(src5, src5, mask0, mask1, filt_hz0, filt_hz1);
+ out2 = __lsx_vpackev_b(tmp5, tmp4);
+ tmp0 = DPADD_SH3_SH(out0, out1, out2, filt_vt0, filt_vt1, filt_vt2);
+
+ tmp6 = HORIZ_4TAP_FILT(src6, src6, mask0, mask1, filt_hz0, filt_hz1);
+ out5 = __lsx_vpackev_b(tmp6, tmp5);
+ tmp1 = DPADD_SH3_SH(out3, out4, out5, filt_vt0, filt_vt1, filt_vt2);
+
+ tmp7 = HORIZ_4TAP_FILT(src7, src7, mask0, mask1, filt_hz0, filt_hz1);
+ out6 = __lsx_vpackev_b(tmp7, tmp6);
+ tmp2 = DPADD_SH3_SH(out1, out2, out6, filt_vt0, filt_vt1, filt_vt2);
+
+ tmp8 = HORIZ_4TAP_FILT(src8, src8, mask0, mask1, filt_hz0, filt_hz1);
+ out7 = __lsx_vpackev_b(tmp8, tmp7);
+ tmp3 = DPADD_SH3_SH(out4, out5, out7, filt_vt0, filt_vt1, filt_vt2);
+
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+
+ __lsx_vstelm_d(tmp0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(tmp1, dst, 0, 1);
+ dst += dst_stride;
+
+ tmp4 = tmp8;
+ out0 = out2;
+ out1 = out6;
+ out3 = out5;
+ out4 = out7;
+ }
+}
+
+void ff_put_vp8_epel16_h4v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ ff_put_vp8_epel8_h4v6_lsx(dst, dst_stride, src, src_stride, height,
+ mx, my);
+ src += 8;
+ dst += 8;
+ }
+}
+
+void ff_put_vp8_pixels8_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t cnt;
+ __m128i src0, src1, src2, src3;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ if (0 == height % 8) {
+ for (cnt = height >> 3; cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ src += src_stride4;
+
+ __lsx_vstelm_d(src0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src2, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src3, dst, 0, 0);
+ dst += dst_stride;
+
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ src += src_stride4;
+
+ __lsx_vstelm_d(src0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src2, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src3, dst, 0, 0);
+ dst += dst_stride;
+ }
+ } else if( 0 == height % 4) {
+ for (cnt = (height >> 2); cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ src += src_stride4;
+
+ __lsx_vstelm_d(src0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src2, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src3, dst, 0, 0);
+ dst += dst_stride;
+ }
+ }
+}
+
+void ff_put_vp8_pixels16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int height, int mx, int my)
+{
+ int32_t width = 16;
+ int32_t cnt, loop_cnt;
+ uint8_t *src_tmp, *dst_tmp;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7;
+
+ ptrdiff_t src_stride2 = src_stride << 1;
+ ptrdiff_t src_stride3 = src_stride2 + src_stride;
+ ptrdiff_t src_stride4 = src_stride2 << 1;
+
+ ptrdiff_t dst_stride2 = dst_stride << 1;
+ ptrdiff_t dst_stride3 = dst_stride2 + dst_stride;
+ ptrdiff_t dst_stride4 = dst_stride2 << 1;
+
+ if (0 == height % 8) {
+ for (cnt = (width >> 4); cnt--;) {
+ src_tmp = src;
+ dst_tmp = dst;
+ for (loop_cnt = (height >> 3); loop_cnt--;) {
+ DUP4_ARG2(__lsx_vld, src_tmp, 0, src_tmp + src_stride, 0,
+ src_tmp + src_stride2, 0, src_tmp + src_stride3, 0,
+ src4, src5, src6, src7);
+ src_tmp += src_stride4;
+
+ __lsx_vst(src4, dst_tmp, 0);
+ __lsx_vst(src5, dst_tmp + dst_stride, 0);
+ __lsx_vst(src6, dst_tmp + dst_stride2, 0);
+ __lsx_vst(src7, dst_tmp + dst_stride3, 0);
+ dst_tmp += dst_stride4;
+
+ DUP4_ARG2(__lsx_vld, src_tmp, 0, src_tmp + src_stride, 0,
+ src_tmp + src_stride2, 0, src_tmp + src_stride3, 0,
+ src4, src5, src6, src7);
+ src_tmp += src_stride4;
+
+ __lsx_vst(src4, dst_tmp, 0);
+ __lsx_vst(src5, dst_tmp + dst_stride, 0);
+ __lsx_vst(src6, dst_tmp + dst_stride2, 0);
+ __lsx_vst(src7, dst_tmp + dst_stride3, 0);
+ dst_tmp += dst_stride4;
+ }
+ src += 16;
+ dst += 16;
+ }
+ } else if (0 == height % 4) {
+ for (cnt = (height >> 2); cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src + src_stride, 0, src + src_stride2, 0,
+ src + src_stride3, 0, src0, src1, src2, src3);
+ src += 4 * src_stride4;
+
+ __lsx_vst(src0, dst, 0);
+ __lsx_vst(src1, dst + dst_stride, 0);
+ __lsx_vst(src2, dst + dst_stride2, 0);
+ __lsx_vst(src3, dst + dst_stride3, 0);
+ dst += dst_stride4;
+ }
+ }
+}
diff --git a/libavcodec/loongarch/vp8dsp_init_loongarch.c b/libavcodec/loongarch/vp8dsp_init_loongarch.c
new file mode 100644
index 0000000000..63da15b198
--- /dev/null
+++ b/libavcodec/loongarch/vp8dsp_init_loongarch.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hecai Yuan <yuanhecai@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * VP8 compatible video decoder
+ */
+
+#include "libavutil/loongarch/cpu.h"
+#include "libavcodec/vp8dsp.h"
+#include "libavutil/attributes.h"
+#include "vp8dsp_loongarch.h"
+
+#define VP8_MC_LOONGARCH_FUNC(IDX, SIZE) \
+ dsp->put_vp8_epel_pixels_tab[IDX][0][2] = ff_put_vp8_epel##SIZE##_h6_lsx; \
+ dsp->put_vp8_epel_pixels_tab[IDX][1][0] = ff_put_vp8_epel##SIZE##_v4_lsx; \
+ dsp->put_vp8_epel_pixels_tab[IDX][1][2] = ff_put_vp8_epel##SIZE##_h6v4_lsx; \
+ dsp->put_vp8_epel_pixels_tab[IDX][2][0] = ff_put_vp8_epel##SIZE##_v6_lsx; \
+ dsp->put_vp8_epel_pixels_tab[IDX][2][1] = ff_put_vp8_epel##SIZE##_h4v6_lsx; \
+ dsp->put_vp8_epel_pixels_tab[IDX][2][2] = ff_put_vp8_epel##SIZE##_h6v6_lsx;
+
+#define VP8_MC_LOONGARCH_COPY(IDX, SIZE) \
+ dsp->put_vp8_epel_pixels_tab[IDX][0][0] = ff_put_vp8_pixels##SIZE##_lsx; \
+ dsp->put_vp8_bilinear_pixels_tab[IDX][0][0] = ff_put_vp8_pixels##SIZE##_lsx;
+
+av_cold void ff_vp8dsp_init_loongarch(VP8DSPContext *dsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_lsx(cpu_flags)) {
+ VP8_MC_LOONGARCH_FUNC(0, 16);
+ VP8_MC_LOONGARCH_FUNC(1, 8);
+
+ VP8_MC_LOONGARCH_COPY(0, 16);
+ VP8_MC_LOONGARCH_COPY(1, 8);
+
+ dsp->vp8_v_loop_filter16y = ff_vp8_v_loop_filter16_lsx;
+ dsp->vp8_h_loop_filter16y = ff_vp8_h_loop_filter16_lsx;
+ dsp->vp8_v_loop_filter8uv = ff_vp8_v_loop_filter8uv_lsx;
+ dsp->vp8_h_loop_filter8uv = ff_vp8_h_loop_filter8uv_lsx;
+
+ dsp->vp8_v_loop_filter16y_inner = ff_vp8_v_loop_filter16_inner_lsx;
+ dsp->vp8_h_loop_filter16y_inner = ff_vp8_h_loop_filter16_inner_lsx;
+ }
+}
diff --git a/libavcodec/loongarch/vp8dsp_loongarch.h b/libavcodec/loongarch/vp8dsp_loongarch.h
new file mode 100644
index 0000000000..87e9509db9
--- /dev/null
+++ b/libavcodec/loongarch/vp8dsp_loongarch.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hecai Yuan <yuanhecai@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_LOONGARCH_VP8DSP_LOONGARCH_H
+#define AVCODEC_LOONGARCH_VP8DSP_LOONGARCH_H
+
+#include "libavcodec/vp8dsp.h"
+
+void ff_put_vp8_pixels8_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int x, int y);
+void ff_put_vp8_pixels16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int x, int y);
+
+void ff_put_vp8_epel16_h6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_h6v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_h4v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel16_h6v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+
+void ff_put_vp8_epel8_v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_h6v4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_h4v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+void ff_put_vp8_epel8_h6v6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+
+void ff_put_vp8_epel8_h6_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ uint8_t *src, ptrdiff_t src_stride,
+ int h, int mx, int my);
+
+/* loop filter */
+void ff_vp8_v_loop_filter16_inner_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t e, int32_t i, int32_t h);
+void ff_vp8_h_loop_filter16_inner_lsx(uint8_t *src, ptrdiff_t stride,
+ int32_t e, int32_t i, int32_t h);
+
+void ff_vp8_v_loop_filter16_lsx(uint8_t *dst, ptrdiff_t stride,
+ int flim_e, int flim_i, int hev_thresh);
+void ff_vp8_h_loop_filter16_lsx(uint8_t *dst, ptrdiff_t stride,
+ int flim_e, int flim_i, int hev_thresh);
+void ff_vp8_h_loop_filter8uv_lsx(uint8_t *dst_u, uint8_t *dst_v,
+ ptrdiff_t stride,
+ int flim_e, int flim_i, int hev_thresh);
+void ff_vp8_v_loop_filter8uv_lsx(uint8_t *dst_u, uint8_t *dst_v,
+ ptrdiff_t stride,
+ int flim_e, int flim_i, int hev_thresh);
+
+#endif // #ifndef AVCODEC_LOONGARCH_VP8DSP_LOONGARCH_H
diff --git a/libavcodec/vp8dsp.c b/libavcodec/vp8dsp.c
index 4ff63d0784..732a483b62 100644
--- a/libavcodec/vp8dsp.c
+++ b/libavcodec/vp8dsp.c
@@ -743,5 +743,7 @@ av_cold void ff_vp8dsp_init(VP8DSPContext *dsp)
ff_vp8dsp_init_x86(dsp);
if (ARCH_MIPS)
ff_vp8dsp_init_mips(dsp);
+ if (ARCH_LOONGARCH)
+ ff_vp8dsp_init_loongarch(dsp);
}
#endif /* CONFIG_VP8_DECODER */
diff --git a/libavcodec/vp8dsp.h b/libavcodec/vp8dsp.h
index cfe1524b0b..7c6208df39 100644
--- a/libavcodec/vp8dsp.h
+++ b/libavcodec/vp8dsp.h
@@ -101,6 +101,7 @@ void ff_vp8dsp_init_aarch64(VP8DSPContext *c);
void ff_vp8dsp_init_arm(VP8DSPContext *c);
void ff_vp8dsp_init_x86(VP8DSPContext *c);
void ff_vp8dsp_init_mips(VP8DSPContext *c);
+void ff_vp8dsp_init_loongarch(VP8DSPContext *c);
#define IS_VP7 1
#define IS_VP8 0
--
2.20.1
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 10+ messages in thread
* [FFmpeg-devel] [PATCH 2/4] avcodec: [loongarch] Optimize vp9_mc/intra with LSX.
2021-12-18 14:27 [FFmpeg-devel] Optimize VP8,VP9,WMV3 decoding for loongarch Hao Chen
2021-12-18 14:27 ` [FFmpeg-devel] [PATCH 1/4] avcodec: [loongarch] Optimize vp8_lpf/mc with LSX Hao Chen
@ 2021-12-18 14:27 ` Hao Chen
2021-12-18 18:47 ` Jean-Baptiste Kempf
2021-12-18 14:27 ` [FFmpeg-devel] [PATCH 3/4] avcodec: [loongarch] Optimize vp9_lpf/idct " Hao Chen
` (2 subsequent siblings)
4 siblings, 1 reply; 10+ messages in thread
From: Hao Chen @ 2021-12-18 14:27 UTC (permalink / raw)
To: ffmpeg-devel
ffmpeg -i ../10_vp9_1080p_30fps_3Mbps.webm -f rawvideo -y /dev/null -an
before:170fps
after :294fps
---
libavcodec/loongarch/Makefile | 3 +
libavcodec/loongarch/vp9_intra_lsx.c | 653 +++++
libavcodec/loongarch/vp9_mc_lsx.c | 2480 ++++++++++++++++++
libavcodec/loongarch/vp9dsp_init_loongarch.c | 97 +
libavcodec/loongarch/vp9dsp_loongarch.h | 144 +
libavcodec/vp9dsp.c | 1 +
libavcodec/vp9dsp.h | 1 +
7 files changed, 3379 insertions(+)
create mode 100644 libavcodec/loongarch/vp9_intra_lsx.c
create mode 100644 libavcodec/loongarch/vp9_mc_lsx.c
create mode 100644 libavcodec/loongarch/vp9dsp_init_loongarch.c
create mode 100644 libavcodec/loongarch/vp9dsp_loongarch.h
diff --git a/libavcodec/loongarch/Makefile b/libavcodec/loongarch/Makefile
index 4e1d827e19..6fcebe40a3 100644
--- a/libavcodec/loongarch/Makefile
+++ b/libavcodec/loongarch/Makefile
@@ -3,6 +3,7 @@ OBJS-$(CONFIG_H264QPEL) += loongarch/h264qpel_init_loongarch.o
OBJS-$(CONFIG_H264DSP) += loongarch/h264dsp_init_loongarch.o
OBJS-$(CONFIG_H264PRED) += loongarch/h264_intrapred_init_loongarch.o
OBJS-$(CONFIG_VP8_DECODER) += loongarch/vp8dsp_init_loongarch.o
+OBJS-$(CONFIG_VP9_DECODER) += loongarch/vp9dsp_init_loongarch.o
LASX-OBJS-$(CONFIG_H264CHROMA) += loongarch/h264chroma_lasx.o
LASX-OBJS-$(CONFIG_H264QPEL) += loongarch/h264qpel_lasx.o
LASX-OBJS-$(CONFIG_H264DSP) += loongarch/h264dsp_lasx.o \
@@ -11,3 +12,5 @@ LASX-OBJS-$(CONFIG_H264DSP) += loongarch/h264dsp_lasx.o \
LASX-OBJS-$(CONFIG_H264PRED) += loongarch/h264_intrapred_lasx.o
LSX-OBJS-$(CONFIG_VP8_DECODER) += loongarch/vp8_mc_lsx.o \
loongarch/vp8_lpf_lsx.o
+LSX-OBJS-$(CONFIG_VP9_DECODER) += loongarch/vp9_mc_lsx.o \
+ loongarch/vp9_intra_lsx.o
diff --git a/libavcodec/loongarch/vp9_intra_lsx.c b/libavcodec/loongarch/vp9_intra_lsx.c
new file mode 100644
index 0000000000..d3f32646f3
--- /dev/null
+++ b/libavcodec/loongarch/vp9_intra_lsx.c
@@ -0,0 +1,653 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hao Chen <chenhao@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavcodec/vp9dsp.h"
+#include "libavutil/loongarch/loongson_intrinsics.h"
+#include "vp9dsp_loongarch.h"
+
+#define LSX_ST_8(_dst0, _dst1, _dst2, _dst3, _dst4, \
+ _dst5, _dst6, _dst7, _dst, _stride, \
+ _stride2, _stride3, _stride4) \
+{ \
+ __lsx_vst(_dst0, _dst, 0); \
+ __lsx_vstx(_dst1, _dst, _stride); \
+ __lsx_vstx(_dst2, _dst, _stride2); \
+ __lsx_vstx(_dst3, _dst, _stride3); \
+ _dst += _stride4; \
+ __lsx_vst(_dst4, _dst, 0); \
+ __lsx_vstx(_dst5, _dst, _stride); \
+ __lsx_vstx(_dst6, _dst, _stride2); \
+ __lsx_vstx(_dst7, _dst, _stride3); \
+}
+
+#define LSX_ST_8X16(_dst0, _dst1, _dst2, _dst3, _dst4, \
+ _dst5, _dst6, _dst7, _dst, _stride) \
+{ \
+ __lsx_vst(_dst0, _dst, 0); \
+ __lsx_vst(_dst0, _dst, 16); \
+ _dst += _stride; \
+ __lsx_vst(_dst1, _dst, 0); \
+ __lsx_vst(_dst1, _dst, 16); \
+ _dst += _stride; \
+ __lsx_vst(_dst2, _dst, 0); \
+ __lsx_vst(_dst2, _dst, 16); \
+ _dst += _stride; \
+ __lsx_vst(_dst3, _dst, 0); \
+ __lsx_vst(_dst3, _dst, 16); \
+ _dst += _stride; \
+ __lsx_vst(_dst4, _dst, 0); \
+ __lsx_vst(_dst4, _dst, 16); \
+ _dst += _stride; \
+ __lsx_vst(_dst5, _dst, 0); \
+ __lsx_vst(_dst5, _dst, 16); \
+ _dst += _stride; \
+ __lsx_vst(_dst6, _dst, 0); \
+ __lsx_vst(_dst6, _dst, 16); \
+ _dst += _stride; \
+ __lsx_vst(_dst7, _dst, 0); \
+ __lsx_vst(_dst7, _dst, 16); \
+ _dst += _stride; \
+}
+
+void ff_vert_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *left,
+ const uint8_t *src)
+{
+ __m128i src0;
+ ptrdiff_t stride2 = dst_stride << 1;
+ ptrdiff_t stride3 = stride2 + dst_stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ src0 = __lsx_vld(src, 0);
+ LSX_ST_8(src0, src0, src0, src0, src0, src0, src0, src0, dst,
+ dst_stride, stride2, stride3, stride4);
+ dst += stride4;
+ LSX_ST_8(src0, src0, src0, src0, src0, src0, src0, src0, dst,
+ dst_stride, stride2, stride3, stride4);
+}
+
+void ff_vert_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *left,
+ const uint8_t *src)
+{
+ uint32_t row;
+ __m128i src0, src1;
+
+ DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src1);
+ for (row = 32; row--;) {
+ __lsx_vst(src0, dst, 0);
+ __lsx_vst(src1, dst, 16);
+ dst += dst_stride;
+ }
+}
+
+void ff_hor_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src,
+ const uint8_t *top)
+{
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7;
+ __m128i src8, src9, src10, src11, src12, src13, src14, src15;
+ ptrdiff_t stride2 = dst_stride << 1;
+ ptrdiff_t stride3 = stride2 + dst_stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ src15 = __lsx_vldrepl_b(src, 0);
+ src14 = __lsx_vldrepl_b(src, 1);
+ src13 = __lsx_vldrepl_b(src, 2);
+ src12 = __lsx_vldrepl_b(src, 3);
+ src11 = __lsx_vldrepl_b(src, 4);
+ src10 = __lsx_vldrepl_b(src, 5);
+ src9 = __lsx_vldrepl_b(src, 6);
+ src8 = __lsx_vldrepl_b(src, 7);
+ src7 = __lsx_vldrepl_b(src, 8);
+ src6 = __lsx_vldrepl_b(src, 9);
+ src5 = __lsx_vldrepl_b(src, 10);
+ src4 = __lsx_vldrepl_b(src, 11);
+ src3 = __lsx_vldrepl_b(src, 12);
+ src2 = __lsx_vldrepl_b(src, 13);
+ src1 = __lsx_vldrepl_b(src, 14);
+ src0 = __lsx_vldrepl_b(src, 15);
+ LSX_ST_8(src0, src1, src2, src3, src4, src5, src6, src7, dst,
+ dst_stride, stride2, stride3, stride4);
+ dst += stride4;
+ LSX_ST_8(src8, src9, src10, src11, src12, src13, src14, src15, dst,
+ dst_stride, stride2, stride3, stride4);
+}
+
+void ff_hor_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src,
+ const uint8_t *top)
+{
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7;
+ __m128i src8, src9, src10, src11, src12, src13, src14, src15;
+ __m128i src16, src17, src18, src19, src20, src21, src22, src23;
+ __m128i src24, src25, src26, src27, src28, src29, src30, src31;
+
+ src31 = __lsx_vldrepl_b(src, 0);
+ src30 = __lsx_vldrepl_b(src, 1);
+ src29 = __lsx_vldrepl_b(src, 2);
+ src28 = __lsx_vldrepl_b(src, 3);
+ src27 = __lsx_vldrepl_b(src, 4);
+ src26 = __lsx_vldrepl_b(src, 5);
+ src25 = __lsx_vldrepl_b(src, 6);
+ src24 = __lsx_vldrepl_b(src, 7);
+ src23 = __lsx_vldrepl_b(src, 8);
+ src22 = __lsx_vldrepl_b(src, 9);
+ src21 = __lsx_vldrepl_b(src, 10);
+ src20 = __lsx_vldrepl_b(src, 11);
+ src19 = __lsx_vldrepl_b(src, 12);
+ src18 = __lsx_vldrepl_b(src, 13);
+ src17 = __lsx_vldrepl_b(src, 14);
+ src16 = __lsx_vldrepl_b(src, 15);
+ src15 = __lsx_vldrepl_b(src, 16);
+ src14 = __lsx_vldrepl_b(src, 17);
+ src13 = __lsx_vldrepl_b(src, 18);
+ src12 = __lsx_vldrepl_b(src, 19);
+ src11 = __lsx_vldrepl_b(src, 20);
+ src10 = __lsx_vldrepl_b(src, 21);
+ src9 = __lsx_vldrepl_b(src, 22);
+ src8 = __lsx_vldrepl_b(src, 23);
+ src7 = __lsx_vldrepl_b(src, 24);
+ src6 = __lsx_vldrepl_b(src, 25);
+ src5 = __lsx_vldrepl_b(src, 26);
+ src4 = __lsx_vldrepl_b(src, 27);
+ src3 = __lsx_vldrepl_b(src, 28);
+ src2 = __lsx_vldrepl_b(src, 29);
+ src1 = __lsx_vldrepl_b(src, 30);
+ src0 = __lsx_vldrepl_b(src, 31);
+ LSX_ST_8X16(src0, src1, src2, src3, src4, src5, src6, src7,
+ dst, dst_stride);
+ LSX_ST_8X16(src8, src9, src10, src11, src12, src13, src14, src15,
+ dst, dst_stride);
+ LSX_ST_8X16(src16, src17, src18, src19, src20, src21, src22, src23,
+ dst, dst_stride);
+ LSX_ST_8X16(src24, src25, src26, src27, src28, src29, src30, src31,
+ dst, dst_stride);
+}
+
+void ff_dc_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src_left,
+ const uint8_t *src_top)
+{
+ __m128i tmp0, tmp1, dst0;
+
+ tmp0 = __lsx_vldrepl_w(src_top, 0);
+ tmp1 = __lsx_vldrepl_w(src_left, 0);
+ dst0 = __lsx_vilvl_w(tmp1, tmp0);
+ dst0 = __lsx_vhaddw_hu_bu(dst0, dst0);
+ dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
+ dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
+ dst0 = __lsx_vsrari_w(dst0, 3);
+ dst0 = __lsx_vshuf4i_b(dst0, 0);
+ __lsx_vstelm_w(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst0, dst, 0, 0);
+}
+
+#define INTRA_DC_TL_4X4(dir) \
+void ff_dc_##dir##_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *left, \
+ const uint8_t *top) \
+{ \
+ __m128i tmp0, dst0; \
+ \
+ tmp0 = __lsx_vldrepl_w(dir, 0); \
+ dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0); \
+ dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \
+ dst0 = __lsx_vsrari_w(dst0, 2); \
+ dst0 = __lsx_vshuf4i_b(dst0, 0); \
+ __lsx_vstelm_w(dst0, dst, 0, 0); \
+ dst += dst_stride; \
+ __lsx_vstelm_w(dst0, dst, 0, 0); \
+ dst += dst_stride; \
+ __lsx_vstelm_w(dst0, dst, 0, 0); \
+ dst += dst_stride; \
+ __lsx_vstelm_w(dst0, dst, 0, 0); \
+}
+INTRA_DC_TL_4X4(top);
+INTRA_DC_TL_4X4(left);
+
+void ff_dc_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t *src_left,
+ const uint8_t *src_top)
+{
+ __m128i tmp0, tmp1, dst0;
+
+ tmp0 = __lsx_vldrepl_d(src_top, 0);
+ tmp1 = __lsx_vldrepl_d(src_left, 0);
+ dst0 = __lsx_vilvl_d(tmp1, tmp0);
+ dst0 = __lsx_vhaddw_hu_bu(dst0, dst0);
+ dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
+ dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
+ dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
+ dst0 = __lsx_vsrari_w(dst0, 4);
+ dst0 = __lsx_vreplvei_b(dst0, 0);
+ __lsx_vstelm_d(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(dst0, dst, 0, 0);
+}
+
+#define INTRA_DC_TL_8X8(dir) \
+void ff_dc_##dir##_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *left, \
+ const uint8_t *top) \
+{ \
+ __m128i tmp0, dst0; \
+ \
+ tmp0 = __lsx_vldrepl_d(dir, 0); \
+ dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0); \
+ dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \
+ dst0 = __lsx_vhaddw_du_wu(dst0, dst0); \
+ dst0 = __lsx_vsrari_w(dst0, 3); \
+ dst0 = __lsx_vreplvei_b(dst0, 0); \
+ __lsx_vstelm_d(dst0, dst, 0, 0); \
+ dst += dst_stride; \
+ __lsx_vstelm_d(dst0, dst, 0, 0); \
+ dst += dst_stride; \
+ __lsx_vstelm_d(dst0, dst, 0, 0); \
+ dst += dst_stride; \
+ __lsx_vstelm_d(dst0, dst, 0, 0); \
+ dst += dst_stride; \
+ __lsx_vstelm_d(dst0, dst, 0, 0); \
+ dst += dst_stride; \
+ __lsx_vstelm_d(dst0, dst, 0, 0); \
+ dst += dst_stride; \
+ __lsx_vstelm_d(dst0, dst, 0, 0); \
+ dst += dst_stride; \
+ __lsx_vstelm_d(dst0, dst, 0, 0); \
+}
+
+INTRA_DC_TL_8X8(top);
+INTRA_DC_TL_8X8(left);
+
+void ff_dc_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ const uint8_t *src_left, const uint8_t *src_top)
+{
+ __m128i tmp0, tmp1, dst0;
+ ptrdiff_t stride2 = dst_stride << 1;
+ ptrdiff_t stride3 = stride2 + dst_stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ tmp0 = __lsx_vld(src_top, 0);
+ tmp1 = __lsx_vld(src_left, 0);
+ DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1);
+ dst0 = __lsx_vadd_h(tmp0, tmp1);
+ dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
+ dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
+ dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
+ dst0 = __lsx_vsrari_w(dst0, 5);
+ dst0 = __lsx_vreplvei_b(dst0, 0);
+ LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
+ dst_stride, stride2, stride3, stride4);
+ dst += stride4;
+ LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
+ dst_stride, stride2, stride3, stride4);
+}
+
+#define INTRA_DC_TL_16X16(dir) \
+void ff_dc_##dir##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *left, \
+ const uint8_t *top) \
+{ \
+ __m128i tmp0, dst0; \
+ ptrdiff_t stride2 = dst_stride << 1; \
+ ptrdiff_t stride3 = stride2 + dst_stride; \
+ ptrdiff_t stride4 = stride2 << 1; \
+ \
+ tmp0 = __lsx_vld(dir, 0); \
+ dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0); \
+ dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \
+ dst0 = __lsx_vhaddw_du_wu(dst0, dst0); \
+ dst0 = __lsx_vhaddw_qu_du(dst0, dst0); \
+ dst0 = __lsx_vsrari_w(dst0, 4); \
+ dst0 = __lsx_vreplvei_b(dst0, 0); \
+ LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst, \
+ dst_stride, stride2, stride3, stride4); \
+ dst += stride4; \
+ LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst, \
+ dst_stride, stride2, stride3, stride4); \
+}
+
+INTRA_DC_TL_16X16(top);
+INTRA_DC_TL_16X16(left);
+
+void ff_dc_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ const uint8_t *src_left, const uint8_t *src_top)
+{
+ __m128i tmp0, tmp1, tmp2, tmp3, dst0;
+
+ DUP2_ARG2(__lsx_vld, src_top, 0, src_top, 16, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vld, src_left, 0, src_left, 16, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp2, tmp2,
+ tmp3, tmp3, tmp0, tmp1, tmp2, tmp3);
+ DUP2_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp0, tmp1);
+ dst0 = __lsx_vadd_h(tmp0, tmp1);
+ dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
+ dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
+ dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
+ dst0 = __lsx_vsrari_w(dst0, 6);
+ dst0 = __lsx_vreplvei_b(dst0, 0);
+ LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
+ dst, dst_stride);
+ LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
+ dst, dst_stride);
+ LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
+ dst, dst_stride);
+ LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
+ dst, dst_stride);
+}
+
+#define INTRA_DC_TL_32X32(dir) \
+void ff_dc_##dir##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *left, \
+ const uint8_t *top) \
+{ \
+ __m128i tmp0, tmp1, dst0; \
+ \
+ DUP2_ARG2(__lsx_vld, dir, 0, dir, 16, tmp0, tmp1); \
+ DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1); \
+ dst0 = __lsx_vadd_h(tmp0, tmp1); \
+ dst0 = __lsx_vhaddw_wu_hu(dst0, dst0); \
+ dst0 = __lsx_vhaddw_du_wu(dst0, dst0); \
+ dst0 = __lsx_vhaddw_qu_du(dst0, dst0); \
+ dst0 = __lsx_vsrari_w(dst0, 5); \
+ dst0 = __lsx_vreplvei_b(dst0, 0); \
+ LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \
+ dst, dst_stride); \
+ LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \
+ dst, dst_stride); \
+ LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \
+ dst, dst_stride); \
+ LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, \
+ dst, dst_stride); \
+}
+
+INTRA_DC_TL_32X32(top);
+INTRA_DC_TL_32X32(left);
+
+#define INTRA_PREDICT_VALDC_16X16_LSX(val) \
+void ff_dc_##val##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *left, const uint8_t *top) \
+{ \
+ __m128i out = __lsx_vldi(val); \
+ ptrdiff_t stride2 = dst_stride << 1; \
+ ptrdiff_t stride3 = stride2 + dst_stride; \
+ ptrdiff_t stride4 = stride2 << 1; \
+ \
+ LSX_ST_8(out, out, out, out, out, out, out, out, dst, \
+ dst_stride, stride2, stride3, stride4); \
+ dst += stride4; \
+ LSX_ST_8(out, out, out, out, out, out, out, out, dst, \
+ dst_stride, stride2, stride3, stride4); \
+}
+
+INTRA_PREDICT_VALDC_16X16_LSX(127);
+INTRA_PREDICT_VALDC_16X16_LSX(128);
+INTRA_PREDICT_VALDC_16X16_LSX(129);
+
+#define INTRA_PREDICT_VALDC_32X32_LSX(val) \
+void ff_dc_##val##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, \
+ const uint8_t *left, const uint8_t *top) \
+{ \
+ __m128i out = __lsx_vldi(val); \
+ \
+ LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
+ LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
+ LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
+ LSX_ST_8X16(out, out, out, out, out, out, out, out, dst, dst_stride);\
+}
+
+INTRA_PREDICT_VALDC_32X32_LSX(127);
+INTRA_PREDICT_VALDC_32X32_LSX(128);
+INTRA_PREDICT_VALDC_32X32_LSX(129);
+
+void ff_tm_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ const uint8_t *src_left, const uint8_t *src_top_ptr)
+{
+ uint8_t top_left = src_top_ptr[-1];
+ __m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1;
+ __m128i src0, src1, src2, src3;
+ __m128i dst0, dst1, dst2, dst3;
+
+ reg0 = __lsx_vreplgr2vr_h(top_left);
+ reg1 = __lsx_vld(src_top_ptr, 0);
+ DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left,
+ 3, tmp3, tmp2, tmp1, tmp0);
+ DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3, reg1,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vhaddw_hu_bu, src0, src0, src1, src1, src2, src2, src3,
+ src3, dst0, dst1, dst2, dst3);
+ DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0, dst3, reg0,
+ dst0, dst1, dst2, dst3);
+ DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7,
+ dst0, dst1, dst2, dst3);
+ DUP2_ARG2(__lsx_vpickev_b, dst1, dst0, dst3, dst2, dst0, dst1);
+ __lsx_vstelm_w(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst1, dst, 0, 2);
+}
+
+void ff_tm_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ const uint8_t *src_left, const uint8_t *src_top_ptr)
+{
+ uint8_t top_left = src_top_ptr[-1];
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7;
+ __m128i reg0, reg1;
+
+ reg0 = __lsx_vreplgr2vr_h(top_left);
+ reg1 = __lsx_vld(src_top_ptr, 0);
+ DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left,
+ 3, tmp7, tmp6, tmp5, tmp4);
+ DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6, src_left,
+ 7, tmp3, tmp2, tmp1, tmp0);
+ DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3, reg1,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vilvl_b, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7, reg1,
+ src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vhaddw_hu_bu, src0, src0, src1, src1, src2, src2, src3,
+ src3, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vhaddw_hu_bu, src4, src4, src5, src5, src6, src6, src7,
+ src7, src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
+ src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
+ src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, src5, src4, src7, src6,
+ src0, src1, src2, src3);
+ __lsx_vstelm_d(src0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(src1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src1, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(src2, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src2, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(src3, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src3, dst, 0, 1);
+}
+
+void ff_tm_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ const uint8_t *src_left, const uint8_t *src_top_ptr)
+{
+ uint8_t top_left = src_top_ptr[-1];
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7;
+ __m128i reg0, reg1;
+ ptrdiff_t stride2 = dst_stride << 1;
+ ptrdiff_t stride3 = stride2 + dst_stride;
+ ptrdiff_t stride4 = stride2 << 1;
+
+ reg0 = __lsx_vreplgr2vr_h(top_left);
+ reg1 = __lsx_vld(src_top_ptr, 0);
+ DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2, src_left,
+ 3, tmp15, tmp14, tmp13, tmp12);
+ DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6, src_left,
+ 7, tmp11, tmp10, tmp9, tmp8);
+ DUP4_ARG2(__lsx_vldrepl_b, src_left, 8, src_left, 9, src_left, 10,
+ src_left, 11, tmp7, tmp6, tmp5, tmp4);
+ DUP4_ARG2(__lsx_vldrepl_b, src_left, 12, src_left, 13, src_left, 14,
+ src_left, 15, tmp3, tmp2, tmp1, tmp0);
+ DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3,
+ reg1, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3,
+ reg1, src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
+ src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
+ src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3,
+ tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vaddwev_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7,
+ reg1, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vaddwod_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7,
+ reg1, src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
+ src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
+ src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3,
+ tmp4, tmp5, tmp6, tmp7);
+ DUP4_ARG2(__lsx_vaddwev_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1, tmp11,
+ reg1, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vaddwod_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1, tmp11,
+ reg1, src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
+ src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
+ src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3,
+ tmp8, tmp9, tmp10, tmp11);
+ DUP4_ARG2(__lsx_vaddwev_h_bu, tmp12, reg1, tmp13, reg1, tmp14, reg1,
+ tmp15, reg1, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vaddwod_h_bu, tmp12, reg1, tmp13, reg1, tmp14, reg1,
+ tmp15, reg1, src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3, reg0,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7, reg0,
+ src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
+ src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7, src3,
+ tmp12, tmp13, tmp14, tmp15);
+ LSX_ST_8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, dst,
+ dst_stride, stride2, stride3, stride4);
+ dst += stride4;
+ LSX_ST_8(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, dst,
+ dst_stride, stride2, stride3, stride4);
+}
+
+void ff_tm_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
+ const uint8_t *src_left, const uint8_t *src_top_ptr)
+{
+ uint8_t top_left = src_top_ptr[-1];
+ uint32_t loop_cnt;
+ __m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1, reg2;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7;
+ __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+
+ reg0 = __lsx_vreplgr2vr_h(top_left);
+ DUP2_ARG2(__lsx_vld, src_top_ptr, 0, src_top_ptr, 16, reg1, reg2);
+
+ src_left += 28;
+ for (loop_cnt = 8; loop_cnt--;) {
+ DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2,
+ src_left, 3, tmp3, tmp2, tmp1, tmp0);
+ src_left -= 4;
+ DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1,
+ tmp3, reg1, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1,
+ tmp3, reg1, src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0, src3,
+ reg0, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0, src7,
+ reg0, src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg2, tmp1, reg2, tmp2, reg2,
+ tmp3, reg2, dst0, dst1, dst2, dst3);
+ DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg2, tmp1, reg2, tmp2, reg2,
+ tmp3, reg2, dst4, dst5, dst6, dst7);
+ DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0, dst3,
+ reg0, dst0, dst1, dst2, dst3);
+ DUP4_ARG2(__lsx_vssub_hu, dst4, reg0, dst5, reg0, dst6, reg0, dst7,
+ reg0, dst4, dst5, dst6, dst7);
+ DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
+ src4, src5, src6, src7);
+ DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7,
+ dst0, dst1, dst2, dst3);
+ DUP4_ARG2(__lsx_vsat_hu, dst4, 7, dst5, 7, dst6, 7, dst7, 7,
+ dst4, dst5, dst6, dst7);
+ DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2, src7,
+ src3, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vpackev_b, dst4, dst0, dst5, dst1, dst6, dst2, dst7,
+ dst3, dst0, dst1, dst2, dst3);
+ __lsx_vst(src0, dst, 0);
+ __lsx_vst(dst0, dst, 16);
+ dst += dst_stride;
+ __lsx_vst(src1, dst, 0);
+ __lsx_vst(dst1, dst, 16);
+ dst += dst_stride;
+ __lsx_vst(src2, dst, 0);
+ __lsx_vst(dst2, dst, 16);
+ dst += dst_stride;
+ __lsx_vst(src3, dst, 0);
+ __lsx_vst(dst3, dst, 16);
+ dst += dst_stride;
+ }
+}
diff --git a/libavcodec/loongarch/vp9_mc_lsx.c b/libavcodec/loongarch/vp9_mc_lsx.c
new file mode 100644
index 0000000000..c6746fd87f
--- /dev/null
+++ b/libavcodec/loongarch/vp9_mc_lsx.c
@@ -0,0 +1,2480 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hao Chen <chenhao@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavcodec/vp9dsp.h"
+#include "libavutil/loongarch/loongson_intrinsics.h"
+#include "vp9dsp_loongarch.h"
+
+static const uint8_t mc_filt_mask_arr[16 * 3] = {
+ /* 8 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
+ /* 4 width cases */
+ 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
+ /* 4 width cases */
+ 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
+};
+
+
+#define HORIZ_8TAP_4WID_4VECS_FILT(_src0, _src1, _src2, _src3, \
+ _mask0, _mask1, _mask2, _mask3, \
+ _filter0, _filter1, _filter2, _filter3, \
+ _out0, _out1) \
+{ \
+ __m128i _tmp0, _tmp1, _tmp2, _tmp3, _tmp4, _tmp5, _tmp6, _tmp7; \
+ __m128i _reg0, _reg1, _reg2, _reg3; \
+ \
+ DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask0, _src3, _src2, _mask0, \
+ _tmp0, _tmp1); \
+ DUP2_ARG2(__lsx_vdp2_h_b, _tmp0, _filter0, _tmp1, _filter0, _reg0, _reg1); \
+ DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask1, _src3, _src2, _mask1, \
+ _tmp2, _tmp3); \
+ DUP2_ARG3(__lsx_vdp2add_h_b, _reg0, _tmp2, _filter1, _reg1, _tmp3, \
+ _filter1, _reg0, _reg1); \
+ DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask2, _src3, _src2, _mask2, \
+ _tmp4, _tmp5); \
+ DUP2_ARG2(__lsx_vdp2_h_b, _tmp4, _filter2, _tmp5, _filter2, _reg2, _reg3); \
+ DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask3, _src3, _src2, _mask3, \
+ _tmp6, _tmp7); \
+ DUP2_ARG3(__lsx_vdp2add_h_b, _reg2, _tmp6, _filter3, _reg3, _tmp7, \
+ _filter3, _reg2, _reg3); \
+ DUP2_ARG2(__lsx_vsadd_h, _reg0, _reg2, _reg1, _reg3, _out0, _out1); \
+}
+
+#define HORIZ_8TAP_8WID_4VECS_FILT(_src0, _src1, _src2, _src3, \
+ _mask0, _mask1, _mask2, _mask3, \
+ _filter0, _filter1, _filter2, _filter3, \
+ _out0, _out1, _out2, _out3) \
+{ \
+ __m128i _tmp0, _tmp1, _tmp2, _tmp3, _tmp4, _tmp5, _tmp6, _tmp7; \
+ __m128i _reg0, _reg1, _reg2, _reg3, _reg4, _reg5, _reg6, _reg7; \
+ \
+ DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask0, _src1, _src1, _mask0, _src2,\
+ _src2, _mask0, _src3, _src3, _mask0, _tmp0, _tmp1, _tmp2, _tmp3);\
+ DUP4_ARG2(__lsx_vdp2_h_b, _tmp0, _filter0, _tmp1, _filter0, _tmp2, \
+ _filter0, _tmp3, _filter0, _reg0, _reg1, _reg2, _reg3); \
+ DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask2, _src1, _src1, _mask2, _src2,\
+ _src2, _mask2, _src3, _src3, _mask2, _tmp0, _tmp1, _tmp2, _tmp3);\
+ DUP4_ARG2(__lsx_vdp2_h_b, _tmp0, _filter2, _tmp1, _filter2, _tmp2, \
+ _filter2, _tmp3, _filter2, _reg4, _reg5, _reg6, _reg7); \
+ DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask1, _src1, _src1, _mask1, _src2,\
+ _src2, _mask1, _src3, _src3, _mask1, _tmp4, _tmp5, _tmp6, _tmp7);\
+ DUP4_ARG3(__lsx_vdp2add_h_b, _reg0, _tmp4, _filter1, _reg1, _tmp5, \
+ _filter1, _reg2, _tmp6, _filter1, _reg3, _tmp7, _filter1, _reg0, \
+ _reg1, _reg2, _reg3); \
+ DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask3, _src1, _src1, _mask3, _src2,\
+ _src2, _mask3, _src3, _src3, _mask3, _tmp4, _tmp5, _tmp6, _tmp7);\
+ DUP4_ARG3(__lsx_vdp2add_h_b, _reg4, _tmp4, _filter3, _reg5, _tmp5, \
+ _filter3, _reg6, _tmp6, _filter3, _reg7, _tmp7, _filter3, _reg4, \
+ _reg5, _reg6, _reg7); \
+ DUP4_ARG2(__lsx_vsadd_h, _reg0, _reg4, _reg1, _reg5, _reg2, _reg6, _reg3, \
+ _reg7, _out0, _out1, _out2, _out3); \
+}
+
+#define FILT_8TAP_DPADD_S_H(_reg0, _reg1, _reg2, _reg3, \
+ _filter0, _filter1, _filter2, _filter3) \
+( { \
+ __m128i _vec0, _vec1; \
+ \
+ _vec0 = __lsx_vdp2_h_b(_reg0, _filter0); \
+ _vec0 = __lsx_vdp2add_h_b(_vec0, _reg1, _filter1); \
+ _vec1 = __lsx_vdp2_h_b(_reg2, _filter2); \
+ _vec1 = __lsx_vdp2add_h_b(_vec1, _reg3, _filter3); \
+ _vec0 = __lsx_vsadd_h(_vec0, _vec1); \
+ \
+ _vec0; \
+} )
+
+#define HORIZ_8TAP_FILT(_src0, _src1, _mask0, _mask1, _mask2, _mask3, \
+ _filt_h0, _filt_h1, _filt_h2, _filt_h3) \
+( { \
+ __m128i _tmp0, _tmp1, _tmp2, _tmp3; \
+ __m128i _out; \
+ \
+ DUP4_ARG3(__lsx_vshuf_b, _src1, _src0, _mask0, _src1, _src0, _mask1, _src1,\
+ _src0, _mask2, _src1, _src0, _mask3, _tmp0, _tmp1, _tmp2, _tmp3);\
+ _out = FILT_8TAP_DPADD_S_H(_tmp0, _tmp1, _tmp2, _tmp3, _filt_h0, _filt_h1, \
+ _filt_h2, _filt_h3); \
+ _out = __lsx_vsrari_h(_out, 7); \
+ _out = __lsx_vsat_h(_out, 7); \
+ \
+ _out; \
+} )
+
+#define LSX_LD_4(_src, _stride, _src0, _src1, _src2, _src3) \
+{ \
+ _src0 = __lsx_vld(_src, 0); \
+ _src += _stride; \
+ _src1 = __lsx_vld(_src, 0); \
+ _src += _stride; \
+ _src2 = __lsx_vld(_src, 0); \
+ _src += _stride; \
+ _src3 = __lsx_vld(_src, 0); \
+}
+
+static void common_hz_8t_4x4_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ __m128i src0, src1, src2, src3;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i mask0, mask1, mask2, mask3;
+ __m128i out, out0, out1;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 16);
+ src -= 3;
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+
+ LSX_LD_4(src, src_stride, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3, filter0, filter1, filter2, filter3, out0, out1);
+ out = __lsx_vssrarni_b_h(out1, out0, 7);
+ out = __lsx_vxori_b(out, 128);
+ __lsx_vstelm_w(out, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out, dst, 0, 3);
+}
+
+static void common_hz_8t_4x8_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ __m128i src0, src1, src2, src3;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i mask0, mask1, mask2, mask3;
+ __m128i out0, out1, out2, out3;
+ uint8_t *_src = (uint8_t*)src - 3;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 16);
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3, filter0, filter1, filter2, filter3, out0, out1);
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3, filter0, filter1, filter2, filter3, out2, out3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vstelm_w(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 3);
+ dst += dst_stride;
+ __lsx_vstelm_w(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out1, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out1, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out1, dst, 0, 3);
+}
+
+static void common_hz_8t_4w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ if (height == 4) {
+ common_hz_8t_4x4_lsx(src, src_stride, dst, dst_stride, filter);
+ } else if (height == 8) {
+ common_hz_8t_4x8_lsx(src, src_stride, dst, dst_stride, filter);
+ }
+}
+
+static void common_hz_8t_8x4_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ __m128i src0, src1, src2, src3;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i mask0, mask1, mask2, mask3;
+ __m128i out0, out1, out2, out3;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= 3;
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+
+ LSX_LD_4(src, src_stride, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3, filter0, filter1, filter2, filter3, out0, out1, out2, out3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vstelm_d(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 1);
+}
+
+static void common_hz_8t_8x8mult_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ uint32_t loop_cnt = height >> 2;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ __m128i src0, src1, src2, src3;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i mask0, mask1, mask2, mask3;
+ __m128i out0, out1, out2, out3;
+ uint8_t* _src = (uint8_t*)src - 3;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+
+ for (; loop_cnt--;) {
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3, filter0, filter1, filter2, filter3, out0, out1, out2, out3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vstelm_d(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 1);
+ dst += dst_stride;
+ }
+}
+
+static void common_hz_8t_8w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ if (height == 4) {
+ common_hz_8t_8x4_lsx(src, src_stride, dst, dst_stride, filter);
+ } else {
+ common_hz_8t_8x8mult_lsx(src, src_stride, dst, dst_stride,
+ filter, height);
+ }
+}
+
+static void common_hz_8t_16w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ uint32_t loop_cnt = height >> 1;
+ int32_t stride = src_stride << 1;
+ __m128i src0, src1, src2, src3;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i mask0, mask1, mask2, mask3;
+ __m128i out0, out1, out2, out3;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= 3;
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+
+ for (; loop_cnt--;) {
+ const uint8_t* _src = src + src_stride;
+ DUP2_ARG2(__lsx_vld, src, 0, _src, 0, src0, src2);
+ DUP2_ARG2(__lsx_vld, src, 8, _src, 8, src1, src3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3, filter0, filter1, filter2, filter3, out0, out1, out2, out3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vst(out0, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(out1, dst, 0);
+ dst += dst_stride;
+ src += stride;
+ }
+}
+
+static void common_hz_8t_32w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ uint32_t loop_cnt = height >> 1;
+ __m128i src0, src1, src2, src3;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i mask0, mask1, mask2, mask3;
+ __m128i out0, out1, out2, out3;
+ __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= 3;
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+
+ for (; loop_cnt--;) {
+ DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
+ src3 = __lsx_vld(src, 24);
+ src1 = __lsx_vshuf_b(src2, src0, shuff);
+ src += src_stride;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3, filter0, filter1, filter2, filter3, out0, out1, out2, out3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vst(out0, dst, 0);
+ __lsx_vst(out1, dst, 16);
+
+ DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
+ src3 = __lsx_vld(src, 24);
+ src1 = __lsx_vshuf_b(src2, src0, shuff);
+ src += src_stride;
+
+ dst += dst_stride;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3, filter0, filter1, filter2, filter3, out0, out1, out2, out3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vst(out0, dst, 0);
+ __lsx_vst(out1, dst, 16);
+ dst += dst_stride;
+ }
+}
+
+static void common_hz_8t_64w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ int32_t loop_cnt = height;
+ __m128i src0, src1, src2, src3;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i mask0, mask1, mask2, mask3;
+ __m128i out0, out1, out2, out3;
+ __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= 3;
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+
+ for (; loop_cnt--;) {
+ DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
+ src3 = __lsx_vld(src, 24);
+ src1 = __lsx_vshuf_b(src2, src0, shuff);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3, filter0, filter1, filter2, filter3, out0, out1, out2, out3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vst(out0, dst, 0);
+ __lsx_vst(out1, dst, 16);
+
+ DUP2_ARG2(__lsx_vld, src, 32, src, 48, src0, src2);
+ src3 = __lsx_vld(src, 56);
+ src1 = __lsx_vshuf_b(src2, src0, shuff);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3, filter0, filter1, filter2, filter3, out0, out1, out2, out3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vst(out0, dst, 32);
+ __lsx_vst(out1, dst, 48);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+static void common_vt_8t_4w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ uint32_t loop_cnt = height >> 2;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
+ __m128i reg0, reg1, reg2, reg3, reg4;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i out0, out1;
+ uint8_t* _src = (uint8_t*)src - src_stride3;
+
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ src4 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5, src6);
+ _src += src_stride3;
+ DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1, tmp0,
+ tmp1, tmp2, tmp3);
+ DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, tmp4, tmp5);
+ DUP2_ARG2(__lsx_vilvl_d, tmp3, tmp0, tmp4, tmp1, reg0, reg1);
+ reg2 = __lsx_vilvl_d(tmp5, tmp2);
+ DUP2_ARG2(__lsx_vxori_b, reg0, 128, reg1, 128, reg0, reg1);
+ reg2 = __lsx_vxori_b(reg2, 128);
+
+ for (;loop_cnt--;) {
+ src7 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src8, src9);
+ src10 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8, src10,
+ src9, tmp0, tmp1, tmp2, tmp3);
+ DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, reg3, reg4);
+ DUP2_ARG2(__lsx_vxori_b, reg3, 128, reg4, 128, reg3, reg4);
+ out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, reg3, filter0, filter1,
+ filter2, filter3);
+ out1 = FILT_8TAP_DPADD_S_H(reg1, reg2, reg3, reg4, filter0, filter1,
+ filter2, filter3);
+ out0 = __lsx_vssrarni_b_h(out1, out0, 7);
+ out0 = __lsx_vxori_b(out0, 128);
+ __lsx_vstelm_w(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 3);
+ dst += dst_stride;
+
+ reg0 = reg2;
+ reg1 = reg3;
+ reg2 = reg4;
+ src6 = src10;
+ }
+}
+
+static void common_vt_8t_8w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ uint32_t loop_cnt = height >> 2;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+ __m128i reg0, reg1, reg2, reg3, reg4, reg5;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i out0, out1, out2, out3;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ uint8_t* _src = (uint8_t*)src - src_stride3;
+
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ src4 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5, src6);
+ _src += src_stride3;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
+ src6 = __lsx_vxori_b(src6, 128);
+ DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1,
+ reg0, reg1, reg2, reg3);
+ DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
+
+ for (;loop_cnt--;) {
+ src7 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src8, src9);
+ src10 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128,
+ src7, src8, src9, src10);
+ DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8, src10,
+ src9, tmp0, tmp1, tmp2, tmp3);
+ out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, tmp0, filter0, filter1,
+ filter2, filter3);
+ out1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, tmp1, filter0, filter1,
+ filter2, filter3);
+ out2 = FILT_8TAP_DPADD_S_H(reg1, reg2, tmp0, tmp2, filter0, filter1,
+ filter2, filter3);
+ out3 = FILT_8TAP_DPADD_S_H(reg4, reg5, tmp1, tmp3, filter0, filter1,
+ filter2, filter3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vstelm_d(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 1);
+ dst += dst_stride;
+
+ reg0 = reg2;
+ reg1 = tmp0;
+ reg2 = tmp2;
+ reg3 = reg5;
+ reg4 = tmp1;
+ reg5 = tmp3;
+ src6 = src10;
+ }
+}
+
+static void common_vt_8t_16w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ uint32_t loop_cnt = height >> 2;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i reg0, reg1, reg2, reg3, reg4, reg5;
+ __m128i reg6, reg7, reg8, reg9, reg10, reg11;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ uint8_t* _src = (uint8_t*)src - src_stride3;
+
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ src4 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5, src6);
+ _src += src_stride3;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
+ src6 = __lsx_vxori_b(src6, 128);
+ DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1,
+ reg0, reg1, reg2, reg3);
+ DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
+ DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4, src2, src1,
+ reg6, reg7, reg8, reg9);
+ DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, reg10, reg11);
+
+ for (;loop_cnt--;) {
+ src7 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src8, src9);
+ src10 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128,
+ src7, src8, src9, src10);
+ DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8, src10, src9,
+ src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9, src8, src10, src9,
+ src4, src5, src7, src8);
+ tmp0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, src0, filter0, filter1,
+ filter2, filter3);
+ tmp1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, src1, filter0, filter1,
+ filter2, filter3);
+ tmp2 = FILT_8TAP_DPADD_S_H(reg6, reg7, reg8, src4, filter0, filter1,
+ filter2, filter3);
+ tmp3 = FILT_8TAP_DPADD_S_H(reg9, reg10, reg11, src5, filter0, filter1,
+ filter2, filter3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vst(tmp0, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(tmp1, dst, 0);
+ dst += dst_stride;
+ tmp0 = FILT_8TAP_DPADD_S_H(reg1, reg2, src0, src2, filter0, filter1,
+ filter2, filter3);
+ tmp1 = FILT_8TAP_DPADD_S_H(reg4, reg5, src1, src3, filter0, filter1,
+ filter2, filter3);
+ tmp2 = FILT_8TAP_DPADD_S_H(reg7, reg8, src4, src7, filter0, filter1,
+ filter2, filter3);
+ tmp3 = FILT_8TAP_DPADD_S_H(reg10, reg11, src5, src8, filter0, filter1,
+ filter2, filter3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vst(tmp0, dst, 0);
+ dst += dst_stride;
+ __lsx_vst(tmp1, dst, 0);
+ dst += dst_stride;
+
+ reg0 = reg2;
+ reg1 = src0;
+ reg2 = src2;
+ reg3 = reg5;
+ reg4 = src1;
+ reg5 = src3;
+ reg6 = reg8;
+ reg7 = src4;
+ reg8 = src7;
+ reg9 = reg11;
+ reg10 = src5;
+ reg11 = src8;
+ src6 = src10;
+ }
+}
+
+static void common_vt_8t_16w_mult_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height,
+ int32_t width)
+{
+ uint8_t *src_tmp;
+ uint8_t *dst_tmp;
+ uint32_t cnt = width >> 4;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i reg0, reg1, reg2, reg3, reg4, reg5;
+ __m128i reg6, reg7, reg8, reg9, reg10, reg11;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ int32_t dst_stride2 = dst_stride << 1;
+ int32_t dst_stride3 = dst_stride2 + dst_stride;
+ int32_t dst_stride4 = dst_stride2 << 1;
+ uint8_t* _src = (uint8_t*)src - src_stride3;
+
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+ for (;cnt--;) {
+ uint32_t loop_cnt = height >> 2;
+
+ src_tmp = _src;
+ dst_tmp = dst;
+
+ src0 = __lsx_vld(src_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride2,
+ src1, src2);
+ src3 = __lsx_vldx(src_tmp, src_stride3);
+ src_tmp += src_stride4;
+ src4 = __lsx_vld(src_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride2,
+ src5, src6);
+ src_tmp += src_stride3;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
+ src6 = __lsx_vxori_b(src6, 128);
+ DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1,
+ reg0, reg1, reg2, reg3);
+ DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
+ DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4, src2, src1,
+ reg6, reg7, reg8, reg9);
+ DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, reg10, reg11);
+
+ for (;loop_cnt--;) {
+ src7 = __lsx_vld(src_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride2,
+ src8, src9);
+ src10 = __lsx_vldx(src_tmp, src_stride3);
+ src_tmp += src_stride4;
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10,
+ 128, src7, src8, src9, src10);
+ DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
+ src10, src9, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9, src8,
+ src10, src9, src4, src5, src7, src8);
+ tmp0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, src0, filter0,
+ filter1, filter2, filter3);
+ tmp1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, src1, filter0,
+ filter1, filter2, filter3);
+ tmp2 = FILT_8TAP_DPADD_S_H(reg6, reg7, reg8, src4, filter0,
+ filter1, filter2, filter3);
+ tmp3 = FILT_8TAP_DPADD_S_H(reg9, reg10, reg11, src5, filter0,
+ filter1, filter2, filter3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
+ tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vst(tmp0, dst_tmp, 0);
+ __lsx_vstx(tmp1, dst_tmp, dst_stride);
+ tmp0 = FILT_8TAP_DPADD_S_H(reg1, reg2, src0, src2, filter0,
+ filter1, filter2, filter3);
+ tmp1 = FILT_8TAP_DPADD_S_H(reg4, reg5, src1, src3, filter0,
+ filter1, filter2, filter3);
+ tmp2 = FILT_8TAP_DPADD_S_H(reg7, reg8, src4, src7, filter0,
+ filter1, filter2, filter3);
+ tmp3 = FILT_8TAP_DPADD_S_H(reg10, reg11, src5, src8, filter0,
+ filter1, filter2, filter3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
+ tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ __lsx_vstx(tmp0, dst_tmp, dst_stride2);
+ __lsx_vstx(tmp1, dst_tmp, dst_stride3);
+ dst_tmp += dst_stride4;
+
+ reg0 = reg2;
+ reg1 = src0;
+ reg2 = src2;
+ reg3 = reg5;
+ reg4 = src1;
+ reg5 = src3;
+ reg6 = reg8;
+ reg7 = src4;
+ reg8 = src7;
+ reg9 = reg11;
+ reg10 = src5;
+ reg11 = src8;
+ src6 = src10;
+ }
+ _src += 16;
+ dst += 16;
+ }
+}
+
+static void common_vt_8t_32w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ common_vt_8t_16w_mult_lsx(src, src_stride, dst, dst_stride, filter, height, 32);
+}
+
+static void common_vt_8t_64w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter, int32_t height)
+{
+ common_vt_8t_16w_mult_lsx(src, src_stride, dst, dst_stride,
+ filter, height, 64);
+}
+
+static void common_hv_8ht_8vt_4w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height)
+{
+ uint32_t loop_cnt = height >> 2;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
+ __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
+ __m128i mask0, mask1, mask2, mask3;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
+ __m128i out0, out1;
+ __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ uint8_t* _src = (uint8_t*)src - src_stride3 - 3;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 16);
+ DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filter_horiz, 4,
+ filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ src4 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5, src6);
+ _src += src_stride3;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
+ src6 = __lsx_vxori_b(src6, 128);
+
+ tmp0 = HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ tmp2 = HORIZ_8TAP_FILT(src2, src3, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ tmp4 = HORIZ_8TAP_FILT(src4, src5, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ tmp5 = HORIZ_8TAP_FILT(src5, src6, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ DUP2_ARG3(__lsx_vshuf_b, tmp2, tmp0, shuff, tmp4, tmp2, shuff, tmp1, tmp3);
+ DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filter_vert, 4,
+ filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
+ DUP2_ARG2(__lsx_vpackev_b, tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
+ tmp2 = __lsx_vpackev_b(tmp5, tmp4);
+
+ for (;loop_cnt--;) {
+ src7 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src8, src9);
+ src10 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128,
+ src7, src8, src9, src10);
+ tmp3 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ tmp4 = __lsx_vshuf_b(tmp3, tmp5, shuff);
+ tmp4 = __lsx_vpackev_b(tmp3, tmp4);
+ out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp4, filt_vt0, filt_vt1,
+ filt_vt2, filt_vt3);
+ src1 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3,
+ filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+ src0 = __lsx_vshuf_b(src1, tmp3, shuff);
+ src0 = __lsx_vpackev_b(src1, src0);
+ out1 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp4, src0, filt_vt0, filt_vt1,
+ filt_vt2, filt_vt3);
+ out0 = __lsx_vssrarni_b_h(out1, out0, 7);
+ out0 = __lsx_vxori_b(out0, 128);
+ __lsx_vstelm_w(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 3);
+ dst += dst_stride;
+
+ tmp5 = src1;
+ tmp0 = tmp2;
+ tmp1 = tmp4;
+ tmp2 = src0;
+ }
+}
+
+static void common_hv_8ht_8vt_8w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height)
+{
+ uint32_t loop_cnt = height >> 2;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
+ __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
+ __m128i mask0, mask1, mask2, mask3;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
+ __m128i out0, out1;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ uint8_t* _src = (uint8_t*)src - src_stride3 - 3;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filter_horiz,
+ 4, filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ src4 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5, src6);
+ _src += src_stride3;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
+ src6 = __lsx_vxori_b(src6, 128);
+
+ src0 = HORIZ_8TAP_FILT(src0, src0, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src1 = HORIZ_8TAP_FILT(src1, src1, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src2 = HORIZ_8TAP_FILT(src2, src2, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src3 = HORIZ_8TAP_FILT(src3, src3, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src4 = HORIZ_8TAP_FILT(src4, src4, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src5 = HORIZ_8TAP_FILT(src5, src5, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src6 = HORIZ_8TAP_FILT(src6, src6, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+
+ DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filter_vert, 4,
+ filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
+ DUP4_ARG2(__lsx_vpackev_b, src1, src0, src3, src2, src5, src4,
+ src2, src1, tmp0, tmp1, tmp2, tmp4);
+ DUP2_ARG2(__lsx_vpackev_b, src4, src3, src6, src5, tmp5, tmp6);
+
+ for (;loop_cnt--;) {
+ src7 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src8, src9);
+ src10 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128,
+ src7, src8, src9, src10);
+ src7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ tmp3 = __lsx_vpackev_b(src7, src6);
+ out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp3, filt_vt0, filt_vt1,
+ filt_vt2, filt_vt3);
+ src8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src0 = __lsx_vpackev_b(src8, src7);
+ out1 = FILT_8TAP_DPADD_S_H(tmp4, tmp5, tmp6, src0, filt_vt0, filt_vt1,
+ filt_vt2, filt_vt3);
+ src9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src1 = __lsx_vpackev_b(src9, src8);
+ src3 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp3, src1, filt_vt0, filt_vt1,
+ filt_vt2, filt_vt3);
+ src10 = HORIZ_8TAP_FILT(src10, src10, mask0, mask1, mask2, mask3,
+ filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+ src2 = __lsx_vpackev_b(src10, src9);
+ src4 = FILT_8TAP_DPADD_S_H(tmp5, tmp6, src0, src2, filt_vt0, filt_vt1,
+ filt_vt2, filt_vt3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, src4, src3, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ __lsx_vstelm_d(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 1);
+ dst += dst_stride;
+
+ src6 = src10;
+ tmp0 = tmp2;
+ tmp1 = tmp3;
+ tmp2 = src1;
+ tmp4 = tmp6;
+ tmp5 = src0;
+ tmp6 = src2;
+ }
+}
+
+static void common_hv_8ht_8vt_16w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ common_hv_8ht_8vt_8w_lsx(src, src_stride, dst, dst_stride, filter_horiz,
+ filter_vert, height);
+ src += 8;
+ dst += 8;
+ }
+}
+
+static void common_hv_8ht_8vt_32w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 4; multiple8_cnt--;) {
+ common_hv_8ht_8vt_8w_lsx(src, src_stride, dst, dst_stride, filter_horiz,
+ filter_vert, height);
+ src += 8;
+ dst += 8;
+ }
+}
+
+static void common_hv_8ht_8vt_64w_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 8; multiple8_cnt--;) {
+ common_hv_8ht_8vt_8w_lsx(src, src_stride, dst, dst_stride, filter_horiz,
+ filter_vert, height);
+ src += 8;
+ dst += 8;
+ }
+}
+
+static void copy_width8_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ int32_t height)
+{
+ int32_t cnt = height >> 2;
+ __m128i src0, src1, src2, src3;
+
+ for (;cnt--;) {
+ src0 = __lsx_vldrepl_d(src, 0);
+ src += src_stride;
+ src1 = __lsx_vldrepl_d(src, 0);
+ src += src_stride;
+ src2 = __lsx_vldrepl_d(src, 0);
+ src += src_stride;
+ src3 = __lsx_vldrepl_d(src, 0);
+ src += src_stride;
+ __lsx_vstelm_d(src0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src2, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(src3, dst, 0, 0);
+ dst += dst_stride;
+ }
+}
+
+static void copy_width16_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ int32_t height)
+{
+ int32_t cnt = height >> 2;
+ __m128i src0, src1, src2, src3;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ int32_t dst_stride2 = dst_stride << 1;
+ int32_t dst_stride3 = dst_stride2 + dst_stride;
+ int32_t dst_stride4 = dst_stride2 << 1;
+ uint8_t *_src = (uint8_t*)src;
+
+ for (;cnt--;) {
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ __lsx_vst(src0, dst, 0);
+ __lsx_vstx(src1, dst, dst_stride);
+ __lsx_vstx(src2, dst, dst_stride2);
+ __lsx_vstx(src3, dst, dst_stride3);
+ dst += dst_stride4;
+ }
+}
+
+static void copy_width32_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ int32_t height)
+{
+ int32_t cnt = height >> 2;
+ uint8_t *src_tmp1 = (uint8_t*)src;
+ uint8_t *dst_tmp1 = dst;
+ uint8_t *src_tmp2 = src_tmp1 + 16;
+ uint8_t *dst_tmp2 = dst_tmp1 + 16;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ int32_t dst_stride2 = dst_stride << 1;
+ int32_t dst_stride3 = dst_stride2 + dst_stride;
+ int32_t dst_stride4 = dst_stride2 << 1;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7;
+
+ for (;cnt--;) {
+ src0 = __lsx_vld(src_tmp1, 0);
+ DUP2_ARG2(__lsx_vldx, src_tmp1, src_stride, src_tmp1, src_stride2,
+ src1, src2);
+ src3 = __lsx_vldx(src_tmp1, src_stride3);
+ src_tmp1 += src_stride4;
+
+ src4 = __lsx_vld(src_tmp2, 0);
+ DUP2_ARG2(__lsx_vldx, src_tmp2, src_stride, src_tmp2, src_stride2,
+ src5, src6);
+ src7 = __lsx_vldx(src_tmp2, src_stride3);
+ src_tmp2 += src_stride4;
+
+ __lsx_vst(src0, dst_tmp1, 0);
+ __lsx_vstx(src1, dst_tmp1, dst_stride);
+ __lsx_vstx(src2, dst_tmp1, dst_stride2);
+ __lsx_vstx(src3, dst_tmp1, dst_stride3);
+ dst_tmp1 += dst_stride4;
+ __lsx_vst(src4, dst_tmp2, 0);
+ __lsx_vstx(src5, dst_tmp2, dst_stride);
+ __lsx_vstx(src6, dst_tmp2, dst_stride2);
+ __lsx_vstx(src7, dst_tmp2, dst_stride3);
+ dst_tmp2 += dst_stride4;
+ }
+}
+
+static void copy_width64_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ int32_t height)
+{
+ int32_t cnt = height >> 2;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7;
+ __m128i src8, src9, src10, src11, src12, src13, src14, src15;
+
+ for (;cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
+ src0, src1, src2, src3);
+ src += src_stride;
+ DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
+ src4, src5, src6, src7);
+ src += src_stride;
+ DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
+ src8, src9, src10, src11);
+ src += src_stride;
+ DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
+ src12, src13, src14, src15);
+ src += src_stride;
+ __lsx_vst(src0, dst, 0);
+ __lsx_vst(src1, dst, 16);
+ __lsx_vst(src2, dst, 32);
+ __lsx_vst(src3, dst, 48);
+ dst += dst_stride;
+ __lsx_vst(src4, dst, 0);
+ __lsx_vst(src5, dst, 16);
+ __lsx_vst(src6, dst, 32);
+ __lsx_vst(src7, dst, 48);
+ dst += dst_stride;
+ __lsx_vst(src8, dst, 0);
+ __lsx_vst(src9, dst, 16);
+ __lsx_vst(src10, dst, 32);
+ __lsx_vst(src11, dst, 48);
+ dst += dst_stride;
+ __lsx_vst(src12, dst, 0);
+ __lsx_vst(src13, dst, 16);
+ __lsx_vst(src14, dst, 32);
+ __lsx_vst(src15, dst, 48);
+ dst += dst_stride;
+ }
+}
+
+static void common_hz_8t_and_aver_dst_4x4_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ uint8_t *dst_tmp = dst;
+ __m128i src0, src1, src2, src3;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i mask0, mask1, mask2, mask3;
+ __m128i tmp0, tmp1;
+ __m128i dst0, dst1, dst2, dst3;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 16);
+ src -= 3;
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+ LSX_LD_4(src, src_stride, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
+ filter0, filter1, filter2, filter3, tmp0, tmp1);
+ dst0 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ dst1 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ dst2 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ dst3 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst0 = __lsx_vilvl_w(dst1, dst0);
+ dst1 = __lsx_vilvl_w(dst3, dst2);
+ dst0 = __lsx_vilvl_d(dst1, dst0);
+ tmp0 = __lsx_vssrarni_b_h(tmp1, tmp0, 7);
+ tmp0 = __lsx_vxori_b(tmp0, 128);
+ dst0 = __lsx_vavgr_bu(tmp0, dst0);
+ __lsx_vstelm_w(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst0, dst, 0, 3);
+}
+
+static void common_hz_8t_and_aver_dst_4x8_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter)
+{
+ uint8_t *dst_tmp = dst;
+ __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
+ __m128i mask0, mask1, mask2, mask3, tmp0, tmp1, tmp2, tmp3;
+ __m128i dst0, dst1;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 16);
+ src -= 3;
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+
+ LSX_LD_4(src, src_stride, src0, src1, src2, src3);
+ src += src_stride;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ tmp0 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ tmp1 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ tmp2 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ tmp3 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ tmp0 = __lsx_vilvl_w(tmp1, tmp0);
+ tmp1 = __lsx_vilvl_w(tmp3, tmp2);
+ dst0 = __lsx_vilvl_d(tmp1, tmp0);
+
+ tmp0 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ tmp1 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ tmp2 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ tmp3 = __lsx_vldrepl_w(dst_tmp, 0);
+ tmp0 = __lsx_vilvl_w(tmp1, tmp0);
+ tmp1 = __lsx_vilvl_w(tmp3, tmp2);
+ dst1 = __lsx_vilvl_d(tmp1, tmp0);
+ HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
+ filter0, filter1, filter2, filter3, tmp0, tmp1);
+ LSX_LD_4(src, src_stride, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3,
+ filter0, filter1, filter2, filter3, tmp2, tmp3);
+ DUP4_ARG3(__lsx_vssrarni_b_h, tmp0, tmp0, 7, tmp1, tmp1, 7, tmp2, tmp2, 7,
+ tmp3, tmp3, 7, tmp0, tmp1, tmp2, tmp3);
+ DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vavgr_bu, tmp0, dst0, tmp1, dst1, dst0, dst1);
+ __lsx_vstelm_w(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst0, dst, 0, 3);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst1, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst1, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(dst1, dst, 0, 3);
+}
+
+static void common_hz_8t_and_aver_dst_4w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter,
+ int32_t height)
+{
+ if (height == 4) {
+ common_hz_8t_and_aver_dst_4x4_lsx(src, src_stride, dst, dst_stride, filter);
+ } else if (height == 8) {
+ common_hz_8t_and_aver_dst_4x8_lsx(src, src_stride, dst, dst_stride, filter);
+ }
+}
+
+static void common_hz_8t_and_aver_dst_8w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter,
+ int32_t height)
+{
+ int32_t loop_cnt = height >> 2;
+ uint8_t *dst_tmp = dst;
+ __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
+ __m128i mask0, mask1, mask2, mask3;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+ __m128i dst0, dst1, dst2, dst3;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride2 + src_stride;
+ int32_t src_stride4 = src_stride2 << 1;
+ uint8_t *_src = (uint8_t*)src - 3;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+
+ for (;loop_cnt--;) {
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3,filter0, filter1, filter2, filter3, tmp0, tmp1, tmp2, tmp3);
+ dst0 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ dst1 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ dst2 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ dst3 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ DUP2_ARG2(__lsx_vilvl_d, dst1, dst0, dst3, dst2, dst0, dst1);
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vavgr_bu, tmp0, dst0, tmp1, dst1, dst0, dst1);
+ __lsx_vstelm_d(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(dst0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(dst1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(dst1, dst, 0, 1);
+ dst += dst_stride;
+ }
+}
+
+static void common_hz_8t_and_aver_dst_16w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter,
+ int32_t height)
+{
+ int32_t loop_cnt = height >> 1;
+ int32_t dst_stride2 = dst_stride << 1;
+ uint8_t *dst_tmp = dst;
+ __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
+ __m128i mask0, mask1, mask2, mask3, dst0, dst1, dst2, dst3;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= 3;
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+
+ for (;loop_cnt--;) {
+ DUP2_ARG2(__lsx_vld, src, 0, src, 8, src0, src1);
+ src += src_stride;
+ DUP2_ARG2(__lsx_vld, src, 0, src, 8, src2, src3);
+ src += src_stride;
+ dst0 = __lsx_vld(dst_tmp, 0);
+ dst1 = __lsx_vldx(dst_tmp, dst_stride);
+ dst_tmp += dst_stride2;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0, src2, src2,
+ mask0, src3, src3, mask0, tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1, src2, src2,
+ mask1, src3, src3, mask1, tmp4, tmp5, tmp6, tmp7);
+ DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2, src2, src2,
+ mask2, src3, src3, mask2, tmp8, tmp9, tmp10, tmp11);
+ DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3, src2, src2,
+ mask3, src3, src3, mask3, tmp12, tmp13, tmp14, tmp15);
+ DUP4_ARG2(__lsx_vdp2_h_b, tmp0, filter0, tmp1, filter0, tmp2, filter0, tmp3,
+ filter0, tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vdp2_h_b, tmp8, filter2, tmp9, filter2, tmp10, filter2, tmp11,
+ filter2, tmp8, tmp9, tmp10, tmp11);
+ DUP4_ARG3(__lsx_vdp2add_h_b, tmp0, tmp4, filter1, tmp1, tmp5, filter1, tmp2,
+ tmp6, filter1, tmp3, tmp7, filter1, tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG3(__lsx_vdp2add_h_b, tmp8, tmp12, filter3, tmp9, tmp13, filter3, tmp10,
+ tmp14, filter3, tmp11, tmp15, filter3, tmp4, tmp5, tmp6, tmp7);
+ DUP4_ARG2(__lsx_vsadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6, tmp3, tmp7,
+ tmp0, tmp1, tmp2, tmp3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, dst2, dst3);
+ DUP2_ARG2(__lsx_vxori_b, dst2, 128, dst3, 128, dst2, dst3);
+ DUP2_ARG2(__lsx_vavgr_bu, dst0, dst2, dst1, dst3, dst0, dst1);
+ __lsx_vst(dst0, dst, 0);
+ __lsx_vstx(dst1, dst, dst_stride);
+ dst += dst_stride2;
+ }
+}
+
+static void common_hz_8t_and_aver_dst_32w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter,
+ int32_t height)
+{
+ uint32_t loop_cnt = height;
+ uint8_t *dst_tmp = dst;
+ __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
+ __m128i mask0, mask1, mask2, mask3, dst0, dst1;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
+ __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= 3;
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+
+ for (;loop_cnt--;) {
+ DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
+ src3 = __lsx_vld(src, 24);
+ src1 = __lsx_vshuf_b(src2, src0, shuff);
+ src += src_stride;
+ DUP2_ARG2(__lsx_vld, dst_tmp, 0, dst, 16, dst0, dst1);
+ dst_tmp += dst_stride;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0, src2,
+ src2, mask0, src3, src3, mask0, tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1, src2,
+ src2, mask1, src3, src3, mask1, tmp4, tmp5, tmp6, tmp7);
+ DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2, src2,
+ src2, mask2, src3, src3, mask2, tmp8, tmp9, tmp10, tmp11);
+ DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3, src2,
+ src2, mask3, src3, src3, mask3, tmp12, tmp13, tmp14, tmp15);
+ DUP4_ARG2(__lsx_vdp2_h_b, tmp0, filter0, tmp1, filter0, tmp2, filter0,
+ tmp3, filter0, tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vdp2_h_b, tmp8, filter2, tmp9, filter2, tmp10, filter2,
+ tmp11, filter2, tmp8, tmp9, tmp10, tmp11);
+ DUP4_ARG3(__lsx_vdp2add_h_b, tmp0, tmp4, filter1, tmp1, tmp5, filter1,
+ tmp2, tmp6, filter1, tmp3, tmp7, filter1, tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG3(__lsx_vdp2add_h_b, tmp8, tmp12, filter3, tmp9, tmp13, filter3,
+ tmp10, tmp14, filter3, tmp11, tmp15, filter3, tmp4, tmp5, tmp6, tmp7);
+ DUP4_ARG2(__lsx_vsadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6, tmp3, tmp7,
+ tmp0, tmp1, tmp2, tmp3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vavgr_bu, dst0, tmp0, dst1, tmp1, dst0, dst1);
+ __lsx_vst(dst0, dst, 0);
+ __lsx_vst(dst1, dst, 16);
+ dst += dst_stride;
+ }
+}
+
+static void common_hz_8t_and_aver_dst_64w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter,
+ int32_t height)
+{
+ int32_t loop_cnt = height;
+ __m128i src0, src1, src2, src3;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i mask0, mask1, mask2, mask3;
+ __m128i out0, out1, out2, out3, dst0, dst1;
+ __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ src -= 3;
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+
+ for (;loop_cnt--;) {
+ DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
+ src3 = __lsx_vld(src, 24);
+ src1 = __lsx_vshuf_b(src2, src0, shuff);
+ DUP2_ARG2(__lsx_vld, dst, 0, dst, 16, dst0, dst1);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3, filter0, filter1, filter2, filter3, out0, out1, out2, out3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ DUP2_ARG2(__lsx_vavgr_bu, out0, dst0, out1, dst1, out0, out1);
+ __lsx_vst(out0, dst, 0);
+ __lsx_vst(out1, dst, 16);
+
+ DUP2_ARG2(__lsx_vld, src, 32, src, 48, src0, src2);
+ src3 = __lsx_vld(src, 56);
+ src1 = __lsx_vshuf_b(src2, src0, shuff);
+ DUP2_ARG2(__lsx_vld, dst, 32, dst, 48, dst0, dst1);
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2,
+ mask3, filter0, filter1, filter2, filter3, out0, out1, out2, out3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ DUP2_ARG2(__lsx_vavgr_bu, out0, dst0, out1, dst1, out0, out1);
+ __lsx_vst(out0, dst, 32);
+ __lsx_vst(out1, dst, 48);
+ src += src_stride;
+ dst += dst_stride;
+ }
+}
+
+static void common_vt_8t_and_aver_dst_4w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter,
+ int32_t height)
+{
+ uint32_t loop_cnt = height >> 2;
+ uint8_t *dst_tmp = dst;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
+ __m128i reg0, reg1, reg2, reg3, reg4;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i out0, out1;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ uint8_t* _src = (uint8_t*)src - src_stride3;
+
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ src4 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5, src6);
+ _src += src_stride3;
+ DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1,
+ tmp0, tmp1, tmp2, tmp3);
+ DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, tmp4, tmp5);
+ DUP2_ARG2(__lsx_vilvl_d, tmp3, tmp0, tmp4, tmp1, reg0, reg1);
+ reg2 = __lsx_vilvl_d(tmp5, tmp2);
+ DUP2_ARG2(__lsx_vxori_b, reg0, 128, reg1, 128, reg0, reg1);
+ reg2 = __lsx_vxori_b(reg2, 128);
+
+ for (;loop_cnt--;) {
+ src7 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src8, src9);
+ src10 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ src0 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ src1 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ src2 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ src3 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ DUP2_ARG2(__lsx_vilvl_w, src1, src0, src3, src2, src0, src1);
+ src0 = __lsx_vilvl_d(src1, src0);
+ DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8, src10,
+ src9, tmp0, tmp1, tmp2, tmp3);
+ DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, reg3, reg4);
+ DUP2_ARG2(__lsx_vxori_b, reg3, 128, reg4, 128, reg3, reg4);
+ out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, reg3, filter0,
+ filter1, filter2, filter3);
+ out1 = FILT_8TAP_DPADD_S_H(reg1, reg2, reg3, reg4, filter0,
+ filter1, filter2, filter3);
+ out0 = __lsx_vssrarni_b_h(out1, out0, 7);
+ out0 = __lsx_vxori_b(out0, 128);
+ out0 = __lsx_vavgr_bu(out0, src0);
+ __lsx_vstelm_w(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 3);
+ dst += dst_stride;
+ reg0 = reg2;
+ reg1 = reg3;
+ reg2 = reg4;
+ src6 = src10;
+ }
+}
+
+static void common_vt_8t_and_aver_dst_8w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter,
+ int32_t height)
+{
+ uint32_t loop_cnt = height >> 2;
+ uint8_t *dst_tmp = dst;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+ __m128i reg0, reg1, reg2, reg3, reg4, reg5;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i out0, out1, out2, out3;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ uint8_t* _src = (uint8_t*)src - src_stride3;
+
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ src4 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5, src6);
+ _src += src_stride3;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
+ src6 = __lsx_vxori_b(src6, 128);
+ DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
+ src1, reg0, reg1, reg2, reg3);
+ DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
+
+ for (;loop_cnt--;) {
+ src7 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src8, src9);
+ src10 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ src0 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ src1 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ src2 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ src3 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ DUP2_ARG2(__lsx_vilvl_d, src1, src0, src3, src2, src0, src1);
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128,
+ src7, src8, src9, src10);
+ DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8, src10,
+ src9, tmp0, tmp1, tmp2, tmp3);
+ out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, tmp0, filter0,
+ filter1, filter2, filter3);
+ out1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, tmp1, filter0,
+ filter1, filter2, filter3);
+ out2 = FILT_8TAP_DPADD_S_H(reg1, reg2, tmp0, tmp2, filter0,
+ filter1, filter2, filter3);
+ out3 = FILT_8TAP_DPADD_S_H(reg4, reg5, tmp1, tmp3, filter0,
+ filter1, filter2, filter3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ DUP2_ARG2(__lsx_vavgr_bu, out0, src0, out1, src1, out0, out1);
+ __lsx_vstelm_d(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 1);
+ dst += dst_stride;
+
+ reg0 = reg2;
+ reg1 = tmp0;
+ reg2 = tmp2;
+ reg3 = reg5;
+ reg4 = tmp1;
+ reg5 = tmp3;
+ src6 = src10;
+ }
+}
+
+static void common_vt_8t_and_aver_dst_16w_mult_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst,
+ int32_t dst_stride,
+ const int8_t *filter,
+ int32_t height,
+ int32_t width)
+{
+ uint8_t *src_tmp;
+ uint32_t cnt = width >> 4;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ __m128i filter0, filter1, filter2, filter3;
+ __m128i reg0, reg1, reg2, reg3, reg4, reg5;
+ __m128i reg6, reg7, reg8, reg9, reg10, reg11;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ int32_t dst_stride2 = dst_stride << 1;
+ int32_t dst_stride3 = dst_stride2 + dst_stride;
+ int32_t dst_stride4 = dst_stride2 << 1;
+ uint8_t *_src = (uint8_t*)src - src_stride3;
+
+ DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4, filter, 6,
+ filter0, filter1, filter2, filter3);
+ for (;cnt--;) {
+ uint32_t loop_cnt = height >> 2;
+ uint8_t *dst_reg = dst;
+
+ src_tmp = _src;
+ src0 = __lsx_vld(src_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride2,
+ src1, src2);
+ src3 = __lsx_vldx(src_tmp, src_stride3);
+ src_tmp += src_stride4;
+ src4 = __lsx_vld(src_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride2,
+ src5, src6);
+ src_tmp += src_stride3;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
+ src6 = __lsx_vxori_b(src6, 128);
+ DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2, src1,
+ reg0, reg1, reg2, reg3);
+ DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
+ DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4, src2, src1,
+ reg6, reg7, reg8, reg9);
+ DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, reg10, reg11);
+
+ for (;loop_cnt--;) {
+ src7 = __lsx_vld(src_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp, src_stride2,
+ src8, src9);
+ src10 = __lsx_vldx(src_tmp, src_stride3);
+ src_tmp += src_stride4;
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10,
+ 128, src7, src8, src9, src10);
+ DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
+ src10, src9, src0, src1, src2, src3);
+ DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9, src8,
+ src10, src9, src4, src5, src7, src8);
+ tmp0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, src0, filter0,
+ filter1, filter2, filter3);
+ tmp1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, src1, filter0,
+ filter1, filter2, filter3);
+ tmp2 = FILT_8TAP_DPADD_S_H(reg6, reg7, reg8, src4, filter0,
+ filter1, filter2, filter3);
+ tmp3 = FILT_8TAP_DPADD_S_H(reg9, reg10, reg11, src5, filter0,
+ filter1, filter2, filter3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
+ tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ tmp2 = __lsx_vld(dst_reg, 0);
+ tmp3 = __lsx_vldx(dst_reg, dst_stride);
+ DUP2_ARG2(__lsx_vavgr_bu, tmp0, tmp2, tmp1, tmp3, tmp0, tmp1);
+ __lsx_vst(tmp0, dst_reg, 0);
+ __lsx_vstx(tmp1, dst_reg, dst_stride);
+ tmp0 = FILT_8TAP_DPADD_S_H(reg1, reg2, src0, src2, filter0,
+ filter1, filter2, filter3);
+ tmp1 = FILT_8TAP_DPADD_S_H(reg4, reg5, src1, src3, filter0,
+ filter1, filter2, filter3);
+ tmp2 = FILT_8TAP_DPADD_S_H(reg7, reg8, src4, src7, filter0,
+ filter1, filter2, filter3);
+ tmp3 = FILT_8TAP_DPADD_S_H(reg10, reg11, src5, src8, filter0,
+ filter1, filter2, filter3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
+ tmp0, tmp1);
+ DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
+ tmp2 = __lsx_vldx(dst_reg, dst_stride2);
+ tmp3 = __lsx_vldx(dst_reg, dst_stride3);
+ DUP2_ARG2(__lsx_vavgr_bu, tmp0, tmp2, tmp1, tmp3, tmp0, tmp1);
+ __lsx_vstx(tmp0, dst_reg, dst_stride2);
+ __lsx_vstx(tmp1, dst_reg, dst_stride3);
+ dst_reg += dst_stride4;
+
+ reg0 = reg2;
+ reg1 = src0;
+ reg2 = src2;
+ reg3 = reg5;
+ reg4 = src1;
+ reg5 = src3;
+ reg6 = reg8;
+ reg7 = src4;
+ reg8 = src7;
+ reg9 = reg11;
+ reg10 = src5;
+ reg11 = src8;
+ src6 = src10;
+ }
+ _src += 16;
+ dst += 16;
+ }
+}
+
+static void common_vt_8t_and_aver_dst_16w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter,
+ int32_t height)
+{
+ common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst, dst_stride,
+ filter, height, 16);
+}
+
+static void common_vt_8t_and_aver_dst_32w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter,
+ int32_t height)
+{
+ common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst, dst_stride,
+ filter, height, 32);
+}
+
+static void common_vt_8t_and_aver_dst_64w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ const int8_t *filter,
+ int32_t height)
+{
+ common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst, dst_stride,
+ filter, height, 64);
+}
+
+static void common_hv_8ht_8vt_and_aver_dst_4w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst,
+ int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height)
+{
+ uint32_t loop_cnt = height >> 2;
+ uint8_t *dst_tmp = dst;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
+ __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
+ __m128i mask0, mask1, mask2, mask3;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
+ __m128i out0, out1;
+ __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ uint8_t* _src = (uint8_t*)src - 3 - src_stride3;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 16);
+ DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filter_horiz,
+ 4, filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ src4 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5, src6);
+ _src += src_stride3;
+
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
+ src6 = __lsx_vxori_b(src6, 128);
+
+ tmp0 = HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ tmp2 = HORIZ_8TAP_FILT(src2, src3, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ tmp4 = HORIZ_8TAP_FILT(src4, src5, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ tmp5 = HORIZ_8TAP_FILT(src5, src6, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ DUP2_ARG3(__lsx_vshuf_b, tmp2, tmp0, shuff, tmp4, tmp2, shuff, tmp1, tmp3);
+ DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filter_vert, 4,
+ filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
+ DUP2_ARG2(__lsx_vpackev_b, tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
+ tmp2 = __lsx_vpackev_b(tmp5, tmp4);
+
+ for (;loop_cnt--;) {
+ src7 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src8, src9);
+ src10 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ src2 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ src3 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ src4 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ src5 = __lsx_vldrepl_w(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ DUP2_ARG2(__lsx_vilvl_w, src3, src2, src5, src4, src2, src3);
+ src2 = __lsx_vilvl_d(src3, src2);
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128,
+ src7, src8, src9, src10);
+ tmp3 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ tmp4 = __lsx_vshuf_b(tmp3, tmp5, shuff);
+ tmp4 = __lsx_vpackev_b(tmp3, tmp4);
+ out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp4, filt_vt0, filt_vt1,
+ filt_vt2, filt_vt3);
+ src1 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3,
+ filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+ src0 = __lsx_vshuf_b(src1, tmp3, shuff);
+ src0 = __lsx_vpackev_b(src1, src0);
+ out1 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp4, src0, filt_vt0, filt_vt1,
+ filt_vt2, filt_vt3);
+ out0 = __lsx_vssrarni_b_h(out1, out0, 7);
+ out0 = __lsx_vxori_b(out0, 128);
+ out0 = __lsx_vavgr_bu(out0, src2);
+ __lsx_vstelm_w(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 2);
+ dst += dst_stride;
+ __lsx_vstelm_w(out0, dst, 0, 3);
+ dst += dst_stride;
+
+ tmp5 = src1;
+ tmp0 = tmp2;
+ tmp1 = tmp4;
+ tmp2 = src0;
+ }
+}
+
+static void common_hv_8ht_8vt_and_aver_dst_8w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst,
+ int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height)
+{
+ uint32_t loop_cnt = height >> 2;
+ uint8_t *dst_tmp = dst;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10;
+ __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
+ __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
+ __m128i mask0, mask1, mask2, mask3;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
+ __m128i out0, out1;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ uint8_t* _src = (uint8_t*)src - 3 - src_stride3;
+
+ mask0 = __lsx_vld(mc_filt_mask_arr, 0);
+ DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2, filter_horiz,
+ 4, filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2, filt_hz3);
+ DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
+ mask3 = __lsx_vaddi_bu(mask0, 6);
+
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+ src4 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5, src6);
+ _src += src_stride3;
+ DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3, 128,
+ src0, src1, src2, src3);
+ DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
+ src6 = __lsx_vxori_b(src6, 128);
+
+ src0 = HORIZ_8TAP_FILT(src0, src0, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src1 = HORIZ_8TAP_FILT(src1, src1, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src2 = HORIZ_8TAP_FILT(src2, src2, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src3 = HORIZ_8TAP_FILT(src3, src3, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src4 = HORIZ_8TAP_FILT(src4, src4, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src5 = HORIZ_8TAP_FILT(src5, src5, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src6 = HORIZ_8TAP_FILT(src6, src6, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+
+ DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2, filter_vert, 4,
+ filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
+ DUP4_ARG2(__lsx_vpackev_b, src1, src0, src3, src2, src5, src4,
+ src2, src1, tmp0, tmp1, tmp2, tmp4);
+ DUP2_ARG2(__lsx_vpackev_b, src4, src3, src6, src5, tmp5, tmp6);
+
+ for (;loop_cnt--;) {
+ src7 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src8, src9);
+ src10 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+
+ DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128, src10, 128,
+ src7, src8, src9, src10);
+ src7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ tmp3 = __lsx_vpackev_b(src7, src6);
+ out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp3, filt_vt0, filt_vt1,
+ filt_vt2, filt_vt3);
+ src8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src0 = __lsx_vpackev_b(src8, src7);
+ out1 = FILT_8TAP_DPADD_S_H(tmp4, tmp5, tmp6, src0, filt_vt0, filt_vt1,
+ filt_vt2, filt_vt3);
+ src9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src1 = __lsx_vpackev_b(src9, src8);
+ src3 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp3, src1, filt_vt0, filt_vt1,
+ filt_vt2, filt_vt3);
+ src10 = HORIZ_8TAP_FILT(src10, src10, mask0, mask1, mask2, mask3, filt_hz0,
+ filt_hz1, filt_hz2, filt_hz3);
+ src2 = __lsx_vpackev_b(src10, src9);
+ src4 = FILT_8TAP_DPADD_S_H(tmp5, tmp6, src0, src2, filt_vt0, filt_vt1,
+ filt_vt2, filt_vt3);
+ DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, src4, src3, 7, out0, out1);
+ DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
+ src5 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ src7 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ src8 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ src9 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ DUP2_ARG2(__lsx_vilvl_d, src7, src5, src9, src8, src5, src7);
+ DUP2_ARG2(__lsx_vavgr_bu, out0, src5, out1, src7, out0, out1);
+ __lsx_vstelm_d(out0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(out1, dst, 0, 1);
+ dst += dst_stride;
+
+ src6 = src10;
+ tmp0 = tmp2;
+ tmp1 = tmp3;
+ tmp2 = src1;
+ tmp4 = tmp6;
+ tmp5 = src0;
+ tmp6 = src2;
+ }
+}
+
+static void common_hv_8ht_8vt_and_aver_dst_16w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst,
+ int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 2; multiple8_cnt--;) {
+ common_hv_8ht_8vt_and_aver_dst_8w_lsx(src, src_stride, dst, dst_stride,
+ filter_horiz, filter_vert,
+ height);
+
+ src += 8;
+ dst += 8;
+ }
+}
+
+static void common_hv_8ht_8vt_and_aver_dst_32w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst,
+ int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 4; multiple8_cnt--;) {
+ common_hv_8ht_8vt_and_aver_dst_8w_lsx(src, src_stride, dst, dst_stride,
+ filter_horiz, filter_vert,
+ height);
+
+ src += 8;
+ dst += 8;
+ }
+}
+
+static void common_hv_8ht_8vt_and_aver_dst_64w_lsx(const uint8_t *src,
+ int32_t src_stride,
+ uint8_t *dst,
+ int32_t dst_stride,
+ const int8_t *filter_horiz,
+ const int8_t *filter_vert,
+ int32_t height)
+{
+ int32_t multiple8_cnt;
+
+ for (multiple8_cnt = 8; multiple8_cnt--;) {
+ common_hv_8ht_8vt_and_aver_dst_8w_lsx(src, src_stride, dst, dst_stride,
+ filter_horiz, filter_vert,
+ height);
+
+ src += 8;
+ dst += 8;
+ }
+}
+
+static void avg_width8_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ int32_t height)
+{
+ int32_t cnt = height >> 2;
+ uint8_t *dst_tmp = dst;
+ __m128i src0, src1, dst0, dst1;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+
+ for (;cnt--;) {
+ tmp0 = __lsx_vldrepl_d(src, 0);
+ src += src_stride;
+ tmp1 = __lsx_vldrepl_d(src, 0);
+ src += src_stride;
+ tmp2 = __lsx_vldrepl_d(src, 0);
+ src += src_stride;
+ tmp3 = __lsx_vldrepl_d(src, 0);
+ src += src_stride;
+ DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, src0, src1);
+ tmp0 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ tmp1 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ tmp2 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ tmp3 = __lsx_vldrepl_d(dst_tmp, 0);
+ dst_tmp += dst_stride;
+ DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, dst0, dst1);
+ DUP2_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1, dst0, dst1);
+ __lsx_vstelm_d(dst0, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(dst0, dst, 0, 1);
+ dst += dst_stride;
+ __lsx_vstelm_d(dst1, dst, 0, 0);
+ dst += dst_stride;
+ __lsx_vstelm_d(dst1, dst, 0, 1);
+ dst += dst_stride;
+ }
+}
+
+static void avg_width16_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ int32_t height)
+{
+ int32_t cnt = height >> 2;
+ __m128i src0, src1, src2, src3;
+ __m128i dst0, dst1, dst2, dst3;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ int32_t dst_stride2 = dst_stride << 1;
+ int32_t dst_stride3 = dst_stride2 + dst_stride;
+ int32_t dst_stride4 = dst_stride2 << 1;
+ uint8_t* _src = (uint8_t*)src;
+
+ for (;cnt--;) {
+ src0 = __lsx_vld(_src, 0);
+ DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1, src2);
+ src3 = __lsx_vldx(_src, src_stride3);
+ _src += src_stride4;
+
+ dst0 = __lsx_vld(dst, 0);
+ DUP2_ARG2(__lsx_vldx, dst, dst_stride, dst, dst_stride2,
+ dst1, dst2);
+ dst3 = __lsx_vldx(dst, dst_stride3);
+ DUP4_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1,
+ src2, dst2, src3, dst3, dst0, dst1, dst2, dst3);
+ __lsx_vst(dst0, dst, 0);
+ __lsx_vstx(dst1, dst, dst_stride);
+ __lsx_vstx(dst2, dst, dst_stride2);
+ __lsx_vstx(dst3, dst, dst_stride3);
+ dst += dst_stride4;
+ }
+}
+
+static void avg_width32_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ int32_t height)
+{
+ int32_t cnt = height >> 2;
+ uint8_t *src_tmp1 = (uint8_t*)src;
+ uint8_t *src_tmp2 = src_tmp1 + 16;
+ uint8_t *dst_tmp1, *dst_tmp2;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7;
+ __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+ int32_t src_stride2 = src_stride << 1;
+ int32_t src_stride3 = src_stride + src_stride2;
+ int32_t src_stride4 = src_stride2 << 1;
+ int32_t dst_stride2 = dst_stride << 1;
+ int32_t dst_stride3 = dst_stride2 + dst_stride;
+ int32_t dst_stride4 = dst_stride2 << 1;
+
+ dst_tmp1 = dst;
+ dst_tmp2 = dst + 16;
+ for (;cnt--;) {
+ src0 = __lsx_vld(src_tmp1, 0);
+ DUP2_ARG2(__lsx_vldx, src_tmp1, src_stride, src_tmp1, src_stride2,
+ src2, src4);
+ src6 = __lsx_vldx(src_tmp1, src_stride3);
+ src_tmp1 += src_stride4;
+
+ src1 = __lsx_vld(src_tmp2, 0);
+ DUP2_ARG2(__lsx_vldx, src_tmp2, src_stride, src_tmp2, src_stride2,
+ src3, src5);
+ src7 = __lsx_vldx(src_tmp2, src_stride3);
+ src_tmp2 += src_stride4;
+
+ dst0 = __lsx_vld(dst_tmp1, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp1, dst_stride, dst_tmp1, dst_stride2,
+ dst2, dst4);
+ dst6 = __lsx_vldx(dst_tmp1, dst_stride3);
+ dst1 = __lsx_vld(dst_tmp2, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp2, dst_stride, dst_tmp2, dst_stride2,
+ dst3, dst5);
+ dst7 = __lsx_vldx(dst_tmp2, dst_stride3);
+
+ DUP4_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1,
+ src2, dst2, src3, dst3, dst0, dst1, dst2, dst3);
+ DUP4_ARG2(__lsx_vavgr_bu, src4, dst4, src5, dst5,
+ src6, dst6, src7, dst7, dst4, dst5, dst6, dst7);
+ __lsx_vst(dst0, dst_tmp1, 0);
+ __lsx_vstx(dst2, dst_tmp1, dst_stride);
+ __lsx_vstx(dst4, dst_tmp1, dst_stride2);
+ __lsx_vstx(dst6, dst_tmp1, dst_stride3);
+ dst_tmp1 += dst_stride4;
+ __lsx_vst(dst1, dst_tmp2, 0);
+ __lsx_vstx(dst3, dst_tmp2, dst_stride);
+ __lsx_vstx(dst5, dst_tmp2, dst_stride2);
+ __lsx_vstx(dst7, dst_tmp2, dst_stride3);
+ dst_tmp2 += dst_stride4;
+ }
+}
+
+static void avg_width64_lsx(const uint8_t *src, int32_t src_stride,
+ uint8_t *dst, int32_t dst_stride,
+ int32_t height)
+{
+ int32_t cnt = height >> 2;
+ uint8_t *dst_tmp = dst;
+ __m128i src0, src1, src2, src3, src4, src5, src6, src7;
+ __m128i src8, src9, src10, src11, src12, src13, src14, src15;
+ __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
+ __m128i dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15;
+
+ for (;cnt--;) {
+ DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
+ src0, src1, src2, src3);
+ src += src_stride;
+ DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
+ src4, src5, src6, src7);
+ src += src_stride;
+ DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
+ src8, src9, src10, src11);
+ src += src_stride;
+ DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
+ src12, src13, src14, src15);
+ src += src_stride;
+ DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32, dst_tmp, 48,
+ dst0, dst1, dst2, dst3);
+ dst_tmp += dst_stride;
+ DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32, dst_tmp, 48,
+ dst4, dst5, dst6, dst7);
+ dst_tmp += dst_stride;
+ DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32, dst_tmp, 48,
+ dst8, dst9, dst10, dst11);
+ dst_tmp += dst_stride;
+ DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32, dst_tmp, 48,
+ dst12, dst13, dst14, dst15);
+ dst_tmp += dst_stride;
+ DUP4_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1,
+ src2, dst2, src3, dst3, dst0, dst1, dst2, dst3);
+ DUP4_ARG2(__lsx_vavgr_bu, src4, dst4, src5, dst5,
+ src6, dst6, src7, dst7, dst4, dst5, dst6, dst7);
+ DUP4_ARG2(__lsx_vavgr_bu, src8, dst8, src9, dst9, src10,
+ dst10, src11, dst11, dst8, dst9, dst10, dst11);
+ DUP4_ARG2(__lsx_vavgr_bu, src12, dst12, src13, dst13, src14,
+ dst14, src15, dst15, dst12, dst13, dst14, dst15);
+ __lsx_vst(dst0, dst, 0);
+ __lsx_vst(dst1, dst, 16);
+ __lsx_vst(dst2, dst, 32);
+ __lsx_vst(dst3, dst, 48);
+ dst += dst_stride;
+ __lsx_vst(dst4, dst, 0);
+ __lsx_vst(dst5, dst, 16);
+ __lsx_vst(dst6, dst, 32);
+ __lsx_vst(dst7, dst, 48);
+ dst += dst_stride;
+ __lsx_vst(dst8, dst, 0);
+ __lsx_vst(dst9, dst, 16);
+ __lsx_vst(dst10, dst, 32);
+ __lsx_vst(dst11, dst, 48);
+ dst += dst_stride;
+ __lsx_vst(dst12, dst, 0);
+ __lsx_vst(dst13, dst, 16);
+ __lsx_vst(dst14, dst, 32);
+ __lsx_vst(dst15, dst, 48);
+ dst += dst_stride;
+ }
+}
+
+static const int8_t vp9_subpel_filters_lsx[3][15][8] = {
+ [FILTER_8TAP_REGULAR] = {
+ {0, 1, -5, 126, 8, -3, 1, 0},
+ {-1, 3, -10, 122, 18, -6, 2, 0},
+ {-1, 4, -13, 118, 27, -9, 3, -1},
+ {-1, 4, -16, 112, 37, -11, 4, -1},
+ {-1, 5, -18, 105, 48, -14, 4, -1},
+ {-1, 5, -19, 97, 58, -16, 5, -1},
+ {-1, 6, -19, 88, 68, -18, 5, -1},
+ {-1, 6, -19, 78, 78, -19, 6, -1},
+ {-1, 5, -18, 68, 88, -19, 6, -1},
+ {-1, 5, -16, 58, 97, -19, 5, -1},
+ {-1, 4, -14, 48, 105, -18, 5, -1},
+ {-1, 4, -11, 37, 112, -16, 4, -1},
+ {-1, 3, -9, 27, 118, -13, 4, -1},
+ {0, 2, -6, 18, 122, -10, 3, -1},
+ {0, 1, -3, 8, 126, -5, 1, 0},
+ }, [FILTER_8TAP_SHARP] = {
+ {-1, 3, -7, 127, 8, -3, 1, 0},
+ {-2, 5, -13, 125, 17, -6, 3, -1},
+ {-3, 7, -17, 121, 27, -10, 5, -2},
+ {-4, 9, -20, 115, 37, -13, 6, -2},
+ {-4, 10, -23, 108, 48, -16, 8, -3},
+ {-4, 10, -24, 100, 59, -19, 9, -3},
+ {-4, 11, -24, 90, 70, -21, 10, -4},
+ {-4, 11, -23, 80, 80, -23, 11, -4},
+ {-4, 10, -21, 70, 90, -24, 11, -4},
+ {-3, 9, -19, 59, 100, -24, 10, -4},
+ {-3, 8, -16, 48, 108, -23, 10, -4},
+ {-2, 6, -13, 37, 115, -20, 9, -4},
+ {-2, 5, -10, 27, 121, -17, 7, -3},
+ {-1, 3, -6, 17, 125, -13, 5, -2},
+ {0, 1, -3, 8, 127, -7, 3, -1},
+ }, [FILTER_8TAP_SMOOTH] = {
+ {-3, -1, 32, 64, 38, 1, -3, 0},
+ {-2, -2, 29, 63, 41, 2, -3, 0},
+ {-2, -2, 26, 63, 43, 4, -4, 0},
+ {-2, -3, 24, 62, 46, 5, -4, 0},
+ {-2, -3, 21, 60, 49, 7, -4, 0},
+ {-1, -4, 18, 59, 51, 9, -4, 0},
+ {-1, -4, 16, 57, 53, 12, -4, -1},
+ {-1, -4, 14, 55, 55, 14, -4, -1},
+ {-1, -4, 12, 53, 57, 16, -4, -1},
+ {0, -4, 9, 51, 59, 18, -4, -1},
+ {0, -4, 7, 49, 60, 21, -3, -2},
+ {0, -4, 5, 46, 62, 24, -3, -2},
+ {0, -4, 4, 43, 63, 26, -2, -2},
+ {0, -3, 2, 41, 63, 29, -2, -2},
+ {0, -3, 1, 38, 64, 32, -1, -3},
+ }
+};
+
+#define VP9_8TAP_LOONGARCH_LSX_FUNC(SIZE, type, type_idx) \
+void ff_put_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const int8_t *filter = vp9_subpel_filters_lsx[type_idx][mx-1]; \
+ \
+ common_hz_8t_##SIZE##w_lsx(src, srcstride, dst, dststride, filter, h); \
+} \
+ \
+void ff_put_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const int8_t *filter = vp9_subpel_filters_lsx[type_idx][my-1]; \
+ \
+ common_vt_8t_##SIZE##w_lsx(src, srcstride, dst, dststride, filter, h); \
+} \
+ \
+void ff_put_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const int8_t *hfilter = vp9_subpel_filters_lsx[type_idx][mx-1]; \
+ const int8_t *vfilter = vp9_subpel_filters_lsx[type_idx][my-1]; \
+ \
+ common_hv_8ht_8vt_##SIZE##w_lsx(src, srcstride, dst, dststride, hfilter, \
+ vfilter, h); \
+} \
+ \
+void ff_avg_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const int8_t *filter = vp9_subpel_filters_lsx[type_idx][mx-1]; \
+ \
+ common_hz_8t_and_aver_dst_##SIZE##w_lsx(src, srcstride, dst, \
+ dststride, filter, h); \
+} \
+ \
+void ff_avg_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const int8_t *filter = vp9_subpel_filters_lsx[type_idx][my-1]; \
+ \
+ common_vt_8t_and_aver_dst_##SIZE##w_lsx(src, srcstride, dst, dststride, \
+ filter, h); \
+} \
+ \
+void ff_avg_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ const int8_t *hfilter = vp9_subpel_filters_lsx[type_idx][mx-1]; \
+ const int8_t *vfilter = vp9_subpel_filters_lsx[type_idx][my-1]; \
+ \
+ common_hv_8ht_8vt_and_aver_dst_##SIZE##w_lsx(src, srcstride, dst, \
+ dststride, hfilter, \
+ vfilter, h); \
+}
+
+#define VP9_COPY_LOONGARCH_LSX_FUNC(SIZE) \
+void ff_copy##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ \
+ copy_width##SIZE##_lsx(src, srcstride, dst, dststride, h); \
+} \
+void ff_avg##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, ptrdiff_t srcstride, \
+ int h, int mx, int my) \
+{ \
+ \
+ avg_width##SIZE##_lsx(src, srcstride, dst, dststride, h); \
+}
+
+VP9_8TAP_LOONGARCH_LSX_FUNC(64, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_LOONGARCH_LSX_FUNC(32, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_LOONGARCH_LSX_FUNC(16, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_LOONGARCH_LSX_FUNC(8, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_LOONGARCH_LSX_FUNC(4, regular, FILTER_8TAP_REGULAR);
+
+VP9_8TAP_LOONGARCH_LSX_FUNC(64, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_LOONGARCH_LSX_FUNC(32, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_LOONGARCH_LSX_FUNC(16, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_LOONGARCH_LSX_FUNC(8, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_LOONGARCH_LSX_FUNC(4, sharp, FILTER_8TAP_SHARP);
+
+VP9_8TAP_LOONGARCH_LSX_FUNC(64, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_LOONGARCH_LSX_FUNC(32, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_LOONGARCH_LSX_FUNC(16, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_LOONGARCH_LSX_FUNC(8, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_LOONGARCH_LSX_FUNC(4, smooth, FILTER_8TAP_SMOOTH);
+
+VP9_COPY_LOONGARCH_LSX_FUNC(64);
+VP9_COPY_LOONGARCH_LSX_FUNC(32);
+VP9_COPY_LOONGARCH_LSX_FUNC(16);
+VP9_COPY_LOONGARCH_LSX_FUNC(8);
+
+#undef VP9_8TAP_LOONGARCH_LSX_FUNC
+#undef VP9_COPY_LOONGARCH_LSX_FUNC
diff --git a/libavcodec/loongarch/vp9dsp_init_loongarch.c b/libavcodec/loongarch/vp9dsp_init_loongarch.c
new file mode 100644
index 0000000000..c1e01b4558
--- /dev/null
+++ b/libavcodec/loongarch/vp9dsp_init_loongarch.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hao Chen <chenhao@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/loongarch/cpu.h"
+#include "libavutil/attributes.h"
+#include "libavcodec/vp9dsp.h"
+#include "vp9dsp_loongarch.h"
+
+#define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type) \
+ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = \
+ ff_##type##_8tap_smooth_##sz##dir##_lsx; \
+ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = \
+ ff_##type##_8tap_regular_##sz##dir##_lsx; \
+ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = \
+ ff_##type##_8tap_sharp_##sz##dir##_lsx;
+
+#define init_subpel2(idx, idxh, idxv, dir, type) \
+ init_subpel1(0, idx, idxh, idxv, 64, dir, type); \
+ init_subpel1(1, idx, idxh, idxv, 32, dir, type); \
+ init_subpel1(2, idx, idxh, idxv, 16, dir, type); \
+ init_subpel1(3, idx, idxh, idxv, 8, dir, type); \
+ init_subpel1(4, idx, idxh, idxv, 4, dir, type);
+
+#define init_subpel3(idx, type) \
+ init_subpel2(idx, 1, 0, h, type); \
+ init_subpel2(idx, 0, 1, v, type); \
+ init_subpel2(idx, 1, 1, hv, type);
+
+#define init_fpel(idx1, idx2, sz, type) \
+ dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] = ff_##type##sz##_lsx; \
+ dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] = ff_##type##sz##_lsx; \
+ dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] = ff_##type##sz##_lsx; \
+ dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] = ff_##type##sz##_lsx;
+
+#define init_copy(idx, sz) \
+ init_fpel(idx, 0, sz, copy); \
+ init_fpel(idx, 1, sz, avg);
+
+#define init_intra_pred1_lsx(tx, sz) \
+ dsp->intra_pred[tx][VERT_PRED] = ff_vert_##sz##_lsx; \
+ dsp->intra_pred[tx][HOR_PRED] = ff_hor_##sz##_lsx; \
+ dsp->intra_pred[tx][DC_PRED] = ff_dc_##sz##_lsx; \
+ dsp->intra_pred[tx][LEFT_DC_PRED] = ff_dc_left_##sz##_lsx; \
+ dsp->intra_pred[tx][TOP_DC_PRED] = ff_dc_top_##sz##_lsx; \
+ dsp->intra_pred[tx][DC_128_PRED] = ff_dc_128_##sz##_lsx; \
+ dsp->intra_pred[tx][DC_127_PRED] = ff_dc_127_##sz##_lsx; \
+ dsp->intra_pred[tx][DC_129_PRED] = ff_dc_129_##sz##_lsx; \
+ dsp->intra_pred[tx][TM_VP8_PRED] = ff_tm_##sz##_lsx; \
+
+#define init_intra_pred2_lsx(tx, sz) \
+ dsp->intra_pred[tx][DC_PRED] = ff_dc_##sz##_lsx; \
+ dsp->intra_pred[tx][LEFT_DC_PRED] = ff_dc_left_##sz##_lsx; \
+ dsp->intra_pred[tx][TOP_DC_PRED] = ff_dc_top_##sz##_lsx; \
+ dsp->intra_pred[tx][TM_VP8_PRED] = ff_tm_##sz##_lsx; \
+
+av_cold void ff_vp9dsp_init_loongarch(VP9DSPContext *dsp, int bpp)
+{
+ int cpu_flags = av_get_cpu_flags();
+ if (have_lsx(cpu_flags))
+ if (bpp == 8) {
+ init_subpel3(0, put);
+ init_subpel3(1, avg);
+ init_copy(0, 64);
+ init_copy(1, 32);
+ init_copy(2, 16);
+ init_copy(3, 8);
+ init_intra_pred1_lsx(TX_16X16, 16x16);
+ init_intra_pred1_lsx(TX_32X32, 32x32);
+ init_intra_pred2_lsx(TX_4X4, 4x4);
+ init_intra_pred2_lsx(TX_8X8, 8x8);
+ }
+}
+#undef init_subpel1
+#undef init_subpel2
+#undef init_subpel3
+#undef init_copy
+#undef init_fpel
+#undef init_intra_pred1_lsx
+#undef init_intra_pred2_lsx
diff --git a/libavcodec/loongarch/vp9dsp_loongarch.h b/libavcodec/loongarch/vp9dsp_loongarch.h
new file mode 100644
index 0000000000..b469326fdc
--- /dev/null
+++ b/libavcodec/loongarch/vp9dsp_loongarch.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hao Chen <chenhao@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H
+#define AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H
+
+#define VP9_8TAP_LOONGARCH_LSX_FUNC(SIZE, type, type_idx) \
+void ff_put_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my); \
+ \
+void ff_put_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my); \
+ \
+void ff_put_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my); \
+ \
+void ff_avg_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my); \
+ \
+void ff_avg_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my); \
+ \
+void ff_avg_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, \
+ ptrdiff_t srcstride, \
+ int h, int mx, int my);
+
+#define VP9_COPY_LOONGARCH_LSX_FUNC(SIZE) \
+void ff_copy##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, ptrdiff_t srcstride, \
+ int h, int mx, int my); \
+ \
+void ff_avg##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
+ const uint8_t *src, ptrdiff_t srcstride, \
+ int h, int mx, int my);
+
+VP9_8TAP_LOONGARCH_LSX_FUNC(64, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_LOONGARCH_LSX_FUNC(32, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_LOONGARCH_LSX_FUNC(16, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_LOONGARCH_LSX_FUNC(8, regular, FILTER_8TAP_REGULAR);
+VP9_8TAP_LOONGARCH_LSX_FUNC(4, regular, FILTER_8TAP_REGULAR);
+
+VP9_8TAP_LOONGARCH_LSX_FUNC(64, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_LOONGARCH_LSX_FUNC(32, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_LOONGARCH_LSX_FUNC(16, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_LOONGARCH_LSX_FUNC(8, sharp, FILTER_8TAP_SHARP);
+VP9_8TAP_LOONGARCH_LSX_FUNC(4, sharp, FILTER_8TAP_SHARP);
+
+VP9_8TAP_LOONGARCH_LSX_FUNC(64, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_LOONGARCH_LSX_FUNC(32, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_LOONGARCH_LSX_FUNC(16, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_LOONGARCH_LSX_FUNC(8, smooth, FILTER_8TAP_SMOOTH);
+VP9_8TAP_LOONGARCH_LSX_FUNC(4, smooth, FILTER_8TAP_SMOOTH);
+
+VP9_COPY_LOONGARCH_LSX_FUNC(64);
+VP9_COPY_LOONGARCH_LSX_FUNC(32);
+VP9_COPY_LOONGARCH_LSX_FUNC(16);
+VP9_COPY_LOONGARCH_LSX_FUNC(8);
+
+#undef VP9_8TAP_LOONGARCH_LSX_FUNC
+#undef VP9_COPY_LOONGARCH_LSX_FUNC
+
+void ff_vert_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_vert_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_hor_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_hor_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_dc_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_dc_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_dc_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_dc_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_dc_left_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_dc_left_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_dc_left_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top);
+void ff_dc_left_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top);
+void ff_dc_top_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_dc_top_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_dc_top_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top);
+void ff_dc_top_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top);
+void ff_dc_128_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top);
+void ff_dc_128_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top);
+void ff_dc_127_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top);
+void ff_dc_127_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top);
+void ff_dc_129_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top);
+void ff_dc_129_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *left, const uint8_t *top);
+void ff_tm_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_tm_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_tm_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+void ff_tm_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
+ const uint8_t *top);
+
+#endif /* AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H */
diff --git a/libavcodec/vp9dsp.c b/libavcodec/vp9dsp.c
index 41b8ad1ad1..82bfe394d1 100644
--- a/libavcodec/vp9dsp.c
+++ b/libavcodec/vp9dsp.c
@@ -98,4 +98,5 @@ av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int bpp, int bitexact)
if (ARCH_ARM) ff_vp9dsp_init_arm(dsp, bpp);
if (ARCH_X86) ff_vp9dsp_init_x86(dsp, bpp, bitexact);
if (ARCH_MIPS) ff_vp9dsp_init_mips(dsp, bpp);
+ if (ARCH_LOONGARCH) ff_vp9dsp_init_loongarch(dsp, bpp);
}
diff --git a/libavcodec/vp9dsp.h b/libavcodec/vp9dsp.h
index e2256316a8..700dd72de8 100644
--- a/libavcodec/vp9dsp.h
+++ b/libavcodec/vp9dsp.h
@@ -132,5 +132,6 @@ void ff_vp9dsp_init_aarch64(VP9DSPContext *dsp, int bpp);
void ff_vp9dsp_init_arm(VP9DSPContext *dsp, int bpp);
void ff_vp9dsp_init_x86(VP9DSPContext *dsp, int bpp, int bitexact);
void ff_vp9dsp_init_mips(VP9DSPContext *dsp, int bpp);
+void ff_vp9dsp_init_loongarch(VP9DSPContext *dsp, int bpp);
#endif /* AVCODEC_VP9DSP_H */
--
2.20.1
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [FFmpeg-devel] [PATCH 2/4] avcodec: [loongarch] Optimize vp9_mc/intra with LSX.
2021-12-18 14:27 ` [FFmpeg-devel] [PATCH 2/4] avcodec: [loongarch] Optimize vp9_mc/intra " Hao Chen
@ 2021-12-18 18:47 ` Jean-Baptiste Kempf
2021-12-20 6:07 ` Hao Chen
2021-12-20 6:20 ` Shiyou Yin
0 siblings, 2 replies; 10+ messages in thread
From: Jean-Baptiste Kempf @ 2021-12-18 18:47 UTC (permalink / raw)
To: Hao Chen, FFmpeg development discussions and patches
Sorry to ask, but don't you have an ASM format, instead of intrinsics?
Best,
On Sat, 18 Dec 2021, at 15:27, Hao Chen wrote:
> ffmpeg -i ../10_vp9_1080p_30fps_3Mbps.webm -f rawvideo -y /dev/null -an
> before:170fps
> after :294fps
> ---
> libavcodec/loongarch/Makefile | 3 +
> libavcodec/loongarch/vp9_intra_lsx.c | 653 +++++
> libavcodec/loongarch/vp9_mc_lsx.c | 2480 ++++++++++++++++++
> libavcodec/loongarch/vp9dsp_init_loongarch.c | 97 +
> libavcodec/loongarch/vp9dsp_loongarch.h | 144 +
> libavcodec/vp9dsp.c | 1 +
> libavcodec/vp9dsp.h | 1 +
> 7 files changed, 3379 insertions(+)
> create mode 100644 libavcodec/loongarch/vp9_intra_lsx.c
> create mode 100644 libavcodec/loongarch/vp9_mc_lsx.c
> create mode 100644 libavcodec/loongarch/vp9dsp_init_loongarch.c
> create mode 100644 libavcodec/loongarch/vp9dsp_loongarch.h
>
> diff --git a/libavcodec/loongarch/Makefile
> b/libavcodec/loongarch/Makefile
> index 4e1d827e19..6fcebe40a3 100644
> --- a/libavcodec/loongarch/Makefile
> +++ b/libavcodec/loongarch/Makefile
> @@ -3,6 +3,7 @@ OBJS-$(CONFIG_H264QPEL) +=
> loongarch/h264qpel_init_loongarch.o
> OBJS-$(CONFIG_H264DSP) +=
> loongarch/h264dsp_init_loongarch.o
> OBJS-$(CONFIG_H264PRED) +=
> loongarch/h264_intrapred_init_loongarch.o
> OBJS-$(CONFIG_VP8_DECODER) +=
> loongarch/vp8dsp_init_loongarch.o
> +OBJS-$(CONFIG_VP9_DECODER) +=
> loongarch/vp9dsp_init_loongarch.o
> LASX-OBJS-$(CONFIG_H264CHROMA) += loongarch/h264chroma_lasx.o
> LASX-OBJS-$(CONFIG_H264QPEL) += loongarch/h264qpel_lasx.o
> LASX-OBJS-$(CONFIG_H264DSP) += loongarch/h264dsp_lasx.o \
> @@ -11,3 +12,5 @@ LASX-OBJS-$(CONFIG_H264DSP) +=
> loongarch/h264dsp_lasx.o \
> LASX-OBJS-$(CONFIG_H264PRED) +=
> loongarch/h264_intrapred_lasx.o
> LSX-OBJS-$(CONFIG_VP8_DECODER) += loongarch/vp8_mc_lsx.o \
> loongarch/vp8_lpf_lsx.o
> +LSX-OBJS-$(CONFIG_VP9_DECODER) += loongarch/vp9_mc_lsx.o \
> + loongarch/vp9_intra_lsx.o
> diff --git a/libavcodec/loongarch/vp9_intra_lsx.c
> b/libavcodec/loongarch/vp9_intra_lsx.c
> new file mode 100644
> index 0000000000..d3f32646f3
> --- /dev/null
> +++ b/libavcodec/loongarch/vp9_intra_lsx.c
> @@ -0,0 +1,653 @@
> +/*
> + * Copyright (c) 2021 Loongson Technology Corporation Limited
> + * Contributed by Hao Chen <chenhao@loongson.cn>
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
> 02110-1301 USA
> + */
> +
> +#include "libavcodec/vp9dsp.h"
> +#include "libavutil/loongarch/loongson_intrinsics.h"
> +#include "vp9dsp_loongarch.h"
> +
> +#define LSX_ST_8(_dst0, _dst1, _dst2, _dst3, _dst4, \
> + _dst5, _dst6, _dst7, _dst, _stride, \
> + _stride2, _stride3, _stride4) \
> +{ \
> + __lsx_vst(_dst0, _dst, 0); \
> + __lsx_vstx(_dst1, _dst, _stride); \
> + __lsx_vstx(_dst2, _dst, _stride2); \
> + __lsx_vstx(_dst3, _dst, _stride3); \
> + _dst += _stride4; \
> + __lsx_vst(_dst4, _dst, 0); \
> + __lsx_vstx(_dst5, _dst, _stride); \
> + __lsx_vstx(_dst6, _dst, _stride2); \
> + __lsx_vstx(_dst7, _dst, _stride3); \
> +}
> +
> +#define LSX_ST_8X16(_dst0, _dst1, _dst2, _dst3, _dst4, \
> + _dst5, _dst6, _dst7, _dst, _stride) \
> +{ \
> + __lsx_vst(_dst0, _dst, 0); \
> + __lsx_vst(_dst0, _dst, 16); \
> + _dst += _stride; \
> + __lsx_vst(_dst1, _dst, 0); \
> + __lsx_vst(_dst1, _dst, 16); \
> + _dst += _stride; \
> + __lsx_vst(_dst2, _dst, 0); \
> + __lsx_vst(_dst2, _dst, 16); \
> + _dst += _stride; \
> + __lsx_vst(_dst3, _dst, 0); \
> + __lsx_vst(_dst3, _dst, 16); \
> + _dst += _stride; \
> + __lsx_vst(_dst4, _dst, 0); \
> + __lsx_vst(_dst4, _dst, 16); \
> + _dst += _stride; \
> + __lsx_vst(_dst5, _dst, 0); \
> + __lsx_vst(_dst5, _dst, 16); \
> + _dst += _stride; \
> + __lsx_vst(_dst6, _dst, 0); \
> + __lsx_vst(_dst6, _dst, 16); \
> + _dst += _stride; \
> + __lsx_vst(_dst7, _dst, 0); \
> + __lsx_vst(_dst7, _dst, 16); \
> + _dst += _stride; \
> +}
> +
> +void ff_vert_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, const
> uint8_t *left,
> + const uint8_t *src)
> +{
> + __m128i src0;
> + ptrdiff_t stride2 = dst_stride << 1;
> + ptrdiff_t stride3 = stride2 + dst_stride;
> + ptrdiff_t stride4 = stride2 << 1;
> + src0 = __lsx_vld(src, 0);
> + LSX_ST_8(src0, src0, src0, src0, src0, src0, src0, src0, dst,
> + dst_stride, stride2, stride3, stride4);
> + dst += stride4;
> + LSX_ST_8(src0, src0, src0, src0, src0, src0, src0, src0, dst,
> + dst_stride, stride2, stride3, stride4);
> +}
> +
> +void ff_vert_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, const
> uint8_t *left,
> + const uint8_t *src)
> +{
> + uint32_t row;
> + __m128i src0, src1;
> +
> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src1);
> + for (row = 32; row--;) {
> + __lsx_vst(src0, dst, 0);
> + __lsx_vst(src1, dst, 16);
> + dst += dst_stride;
> + }
> +}
> +
> +void ff_hor_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, const
> uint8_t *src,
> + const uint8_t *top)
> +{
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
> + __m128i src8, src9, src10, src11, src12, src13, src14, src15;
> + ptrdiff_t stride2 = dst_stride << 1;
> + ptrdiff_t stride3 = stride2 + dst_stride;
> + ptrdiff_t stride4 = stride2 << 1;
> +
> + src15 = __lsx_vldrepl_b(src, 0);
> + src14 = __lsx_vldrepl_b(src, 1);
> + src13 = __lsx_vldrepl_b(src, 2);
> + src12 = __lsx_vldrepl_b(src, 3);
> + src11 = __lsx_vldrepl_b(src, 4);
> + src10 = __lsx_vldrepl_b(src, 5);
> + src9 = __lsx_vldrepl_b(src, 6);
> + src8 = __lsx_vldrepl_b(src, 7);
> + src7 = __lsx_vldrepl_b(src, 8);
> + src6 = __lsx_vldrepl_b(src, 9);
> + src5 = __lsx_vldrepl_b(src, 10);
> + src4 = __lsx_vldrepl_b(src, 11);
> + src3 = __lsx_vldrepl_b(src, 12);
> + src2 = __lsx_vldrepl_b(src, 13);
> + src1 = __lsx_vldrepl_b(src, 14);
> + src0 = __lsx_vldrepl_b(src, 15);
> + LSX_ST_8(src0, src1, src2, src3, src4, src5, src6, src7, dst,
> + dst_stride, stride2, stride3, stride4);
> + dst += stride4;
> + LSX_ST_8(src8, src9, src10, src11, src12, src13, src14, src15, dst,
> + dst_stride, stride2, stride3, stride4);
> +}
> +
> +void ff_hor_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, const
> uint8_t *src,
> + const uint8_t *top)
> +{
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
> + __m128i src8, src9, src10, src11, src12, src13, src14, src15;
> + __m128i src16, src17, src18, src19, src20, src21, src22, src23;
> + __m128i src24, src25, src26, src27, src28, src29, src30, src31;
> +
> + src31 = __lsx_vldrepl_b(src, 0);
> + src30 = __lsx_vldrepl_b(src, 1);
> + src29 = __lsx_vldrepl_b(src, 2);
> + src28 = __lsx_vldrepl_b(src, 3);
> + src27 = __lsx_vldrepl_b(src, 4);
> + src26 = __lsx_vldrepl_b(src, 5);
> + src25 = __lsx_vldrepl_b(src, 6);
> + src24 = __lsx_vldrepl_b(src, 7);
> + src23 = __lsx_vldrepl_b(src, 8);
> + src22 = __lsx_vldrepl_b(src, 9);
> + src21 = __lsx_vldrepl_b(src, 10);
> + src20 = __lsx_vldrepl_b(src, 11);
> + src19 = __lsx_vldrepl_b(src, 12);
> + src18 = __lsx_vldrepl_b(src, 13);
> + src17 = __lsx_vldrepl_b(src, 14);
> + src16 = __lsx_vldrepl_b(src, 15);
> + src15 = __lsx_vldrepl_b(src, 16);
> + src14 = __lsx_vldrepl_b(src, 17);
> + src13 = __lsx_vldrepl_b(src, 18);
> + src12 = __lsx_vldrepl_b(src, 19);
> + src11 = __lsx_vldrepl_b(src, 20);
> + src10 = __lsx_vldrepl_b(src, 21);
> + src9 = __lsx_vldrepl_b(src, 22);
> + src8 = __lsx_vldrepl_b(src, 23);
> + src7 = __lsx_vldrepl_b(src, 24);
> + src6 = __lsx_vldrepl_b(src, 25);
> + src5 = __lsx_vldrepl_b(src, 26);
> + src4 = __lsx_vldrepl_b(src, 27);
> + src3 = __lsx_vldrepl_b(src, 28);
> + src2 = __lsx_vldrepl_b(src, 29);
> + src1 = __lsx_vldrepl_b(src, 30);
> + src0 = __lsx_vldrepl_b(src, 31);
> + LSX_ST_8X16(src0, src1, src2, src3, src4, src5, src6, src7,
> + dst, dst_stride);
> + LSX_ST_8X16(src8, src9, src10, src11, src12, src13, src14, src15,
> + dst, dst_stride);
> + LSX_ST_8X16(src16, src17, src18, src19, src20, src21, src22, src23,
> + dst, dst_stride);
> + LSX_ST_8X16(src24, src25, src26, src27, src28, src29, src30, src31,
> + dst, dst_stride);
> +}
> +
> +void ff_dc_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t
> *src_left,
> + const uint8_t *src_top)
> +{
> + __m128i tmp0, tmp1, dst0;
> +
> + tmp0 = __lsx_vldrepl_w(src_top, 0);
> + tmp1 = __lsx_vldrepl_w(src_left, 0);
> + dst0 = __lsx_vilvl_w(tmp1, tmp0);
> + dst0 = __lsx_vhaddw_hu_bu(dst0, dst0);
> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
> + dst0 = __lsx_vsrari_w(dst0, 3);
> + dst0 = __lsx_vshuf4i_b(dst0, 0);
> + __lsx_vstelm_w(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst0, dst, 0, 0);
> +}
> +
> +#define INTRA_DC_TL_4X4(dir)
> \
> +void ff_dc_##dir##_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
> \
> + const uint8_t *left,
> \
> + const uint8_t *top)
> \
> +{
> \
> + __m128i tmp0, dst0;
> \
> +
> \
> + tmp0 = __lsx_vldrepl_w(dir, 0);
> \
> + dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0);
> \
> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
> \
> + dst0 = __lsx_vsrari_w(dst0, 2);
> \
> + dst0 = __lsx_vshuf4i_b(dst0, 0);
> \
> + __lsx_vstelm_w(dst0, dst, 0, 0);
> \
> + dst += dst_stride;
> \
> + __lsx_vstelm_w(dst0, dst, 0, 0);
> \
> + dst += dst_stride;
> \
> + __lsx_vstelm_w(dst0, dst, 0, 0);
> \
> + dst += dst_stride;
> \
> + __lsx_vstelm_w(dst0, dst, 0, 0);
> \
> +}
> +INTRA_DC_TL_4X4(top);
> +INTRA_DC_TL_4X4(left);
> +
> +void ff_dc_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t
> *src_left,
> + const uint8_t *src_top)
> +{
> + __m128i tmp0, tmp1, dst0;
> +
> + tmp0 = __lsx_vldrepl_d(src_top, 0);
> + tmp1 = __lsx_vldrepl_d(src_left, 0);
> + dst0 = __lsx_vilvl_d(tmp1, tmp0);
> + dst0 = __lsx_vhaddw_hu_bu(dst0, dst0);
> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
> + dst0 = __lsx_vsrari_w(dst0, 4);
> + dst0 = __lsx_vreplvei_b(dst0, 0);
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> +}
> +
> +#define INTRA_DC_TL_8X8(dir)
> \
> +void ff_dc_##dir##_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride,
> \
> + const uint8_t *left,
> \
> + const uint8_t *top)
> \
> +{
> \
> + __m128i tmp0, dst0;
> \
> +
> \
> + tmp0 = __lsx_vldrepl_d(dir, 0);
> \
> + dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0);
> \
> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
> \
> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
> \
> + dst0 = __lsx_vsrari_w(dst0, 3);
> \
> + dst0 = __lsx_vreplvei_b(dst0, 0);
> \
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> \
> + dst += dst_stride;
> \
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> \
> + dst += dst_stride;
> \
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> \
> + dst += dst_stride;
> \
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> \
> + dst += dst_stride;
> \
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> \
> + dst += dst_stride;
> \
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> \
> + dst += dst_stride;
> \
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> \
> + dst += dst_stride;
> \
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> \
> +}
> +
> +INTRA_DC_TL_8X8(top);
> +INTRA_DC_TL_8X8(left);
> +
> +void ff_dc_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
> + const uint8_t *src_left, const uint8_t *src_top)
> +{
> + __m128i tmp0, tmp1, dst0;
> + ptrdiff_t stride2 = dst_stride << 1;
> + ptrdiff_t stride3 = stride2 + dst_stride;
> + ptrdiff_t stride4 = stride2 << 1;
> +
> + tmp0 = __lsx_vld(src_top, 0);
> + tmp1 = __lsx_vld(src_left, 0);
> + DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1);
> + dst0 = __lsx_vadd_h(tmp0, tmp1);
> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
> + dst0 = __lsx_vsrari_w(dst0, 5);
> + dst0 = __lsx_vreplvei_b(dst0, 0);
> + LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
> + dst_stride, stride2, stride3, stride4);
> + dst += stride4;
> + LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
> + dst_stride, stride2, stride3, stride4);
> +}
> +
> +#define INTRA_DC_TL_16X16(dir)
> \
> +void ff_dc_##dir##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
> \
> + const uint8_t *left,
> \
> + const uint8_t *top)
> \
> +{
> \
> + __m128i tmp0, dst0;
> \
> + ptrdiff_t stride2 = dst_stride << 1;
> \
> + ptrdiff_t stride3 = stride2 + dst_stride;
> \
> + ptrdiff_t stride4 = stride2 << 1;
> \
> +
> \
> + tmp0 = __lsx_vld(dir, 0);
> \
> + dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0);
> \
> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
> \
> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
> \
> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
> \
> + dst0 = __lsx_vsrari_w(dst0, 4);
> \
> + dst0 = __lsx_vreplvei_b(dst0, 0);
> \
> + LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
> \
> + dst_stride, stride2, stride3, stride4);
> \
> + dst += stride4;
> \
> + LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
> \
> + dst_stride, stride2, stride3, stride4);
> \
> +}
> +
> +INTRA_DC_TL_16X16(top);
> +INTRA_DC_TL_16X16(left);
> +
> +void ff_dc_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
> + const uint8_t *src_left, const uint8_t *src_top)
> +{
> + __m128i tmp0, tmp1, tmp2, tmp3, dst0;
> +
> + DUP2_ARG2(__lsx_vld, src_top, 0, src_top, 16, tmp0, tmp1);
> + DUP2_ARG2(__lsx_vld, src_left, 0, src_left, 16, tmp2, tmp3);
> + DUP4_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp2, tmp2,
> + tmp3, tmp3, tmp0, tmp1, tmp2, tmp3);
> + DUP2_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp0, tmp1);
> + dst0 = __lsx_vadd_h(tmp0, tmp1);
> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
> + dst0 = __lsx_vsrari_w(dst0, 6);
> + dst0 = __lsx_vreplvei_b(dst0, 0);
> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
> + dst, dst_stride);
> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
> + dst, dst_stride);
> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
> + dst, dst_stride);
> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
> + dst, dst_stride);
> +}
> +
> +#define INTRA_DC_TL_32X32(dir)
> \
> +void ff_dc_##dir##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
> \
> + const uint8_t *left,
> \
> + const uint8_t *top)
> \
> +{
> \
> + __m128i tmp0, tmp1, dst0;
> \
> +
> \
> + DUP2_ARG2(__lsx_vld, dir, 0, dir, 16, tmp0, tmp1);
> \
> + DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1);
> \
> + dst0 = __lsx_vadd_h(tmp0, tmp1);
> \
> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
> \
> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
> \
> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
> \
> + dst0 = __lsx_vsrari_w(dst0, 5);
> \
> + dst0 = __lsx_vreplvei_b(dst0, 0);
> \
> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
> \
> + dst, dst_stride);
> \
> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
> \
> + dst, dst_stride);
> \
> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
> \
> + dst, dst_stride);
> \
> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
> \
> + dst, dst_stride);
> \
> +}
> +
> +INTRA_DC_TL_32X32(top);
> +INTRA_DC_TL_32X32(left);
> +
> +#define INTRA_PREDICT_VALDC_16X16_LSX(val)
> \
> +void ff_dc_##val##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
> \
> + const uint8_t *left, const uint8_t *top)
> \
> +{
> \
> + __m128i out = __lsx_vldi(val);
> \
> + ptrdiff_t stride2 = dst_stride << 1;
> \
> + ptrdiff_t stride3 = stride2 + dst_stride;
> \
> + ptrdiff_t stride4 = stride2 << 1;
> \
> +
> \
> + LSX_ST_8(out, out, out, out, out, out, out, out, dst,
> \
> + dst_stride, stride2, stride3, stride4);
> \
> + dst += stride4;
> \
> + LSX_ST_8(out, out, out, out, out, out, out, out, dst,
> \
> + dst_stride, stride2, stride3, stride4);
> \
> +}
> +
> +INTRA_PREDICT_VALDC_16X16_LSX(127);
> +INTRA_PREDICT_VALDC_16X16_LSX(128);
> +INTRA_PREDICT_VALDC_16X16_LSX(129);
> +
> +#define INTRA_PREDICT_VALDC_32X32_LSX(val)
> \
> +void ff_dc_##val##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
> \
> + const uint8_t *left, const uint8_t *top)
> \
> +{
> \
> + __m128i out = __lsx_vldi(val);
> \
> +
> \
> + LSX_ST_8X16(out, out, out, out, out, out, out, out, dst,
> dst_stride);\
> + LSX_ST_8X16(out, out, out, out, out, out, out, out, dst,
> dst_stride);\
> + LSX_ST_8X16(out, out, out, out, out, out, out, out, dst,
> dst_stride);\
> + LSX_ST_8X16(out, out, out, out, out, out, out, out, dst,
> dst_stride);\
> +}
> +
> +INTRA_PREDICT_VALDC_32X32_LSX(127);
> +INTRA_PREDICT_VALDC_32X32_LSX(128);
> +INTRA_PREDICT_VALDC_32X32_LSX(129);
> +
> +void ff_tm_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
> + const uint8_t *src_left, const uint8_t *src_top_ptr)
> +{
> + uint8_t top_left = src_top_ptr[-1];
> + __m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1;
> + __m128i src0, src1, src2, src3;
> + __m128i dst0, dst1, dst2, dst3;
> +
> + reg0 = __lsx_vreplgr2vr_h(top_left);
> + reg1 = __lsx_vld(src_top_ptr, 0);
> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2,
> src_left,
> + 3, tmp3, tmp2, tmp1, tmp0);
> + DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3,
> reg1,
> + src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vhaddw_hu_bu, src0, src0, src1, src1, src2, src2,
> src3,
> + src3, dst0, dst1, dst2, dst3);
> + DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0,
> dst3, reg0,
> + dst0, dst1, dst2, dst3);
> + DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7,
> + dst0, dst1, dst2, dst3);
> + DUP2_ARG2(__lsx_vpickev_b, dst1, dst0, dst3, dst2, dst0, dst1);
> + __lsx_vstelm_w(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst0, dst, 0, 2);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst1, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst1, dst, 0, 2);
> +}
> +
> +void ff_tm_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride,
> + const uint8_t *src_left, const uint8_t *src_top_ptr)
> +{
> + uint8_t top_left = src_top_ptr[-1];
> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
> + __m128i reg0, reg1;
> +
> + reg0 = __lsx_vreplgr2vr_h(top_left);
> + reg1 = __lsx_vld(src_top_ptr, 0);
> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2,
> src_left,
> + 3, tmp7, tmp6, tmp5, tmp4);
> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6,
> src_left,
> + 7, tmp3, tmp2, tmp1, tmp0);
> + DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3,
> reg1,
> + src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vilvl_b, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7,
> reg1,
> + src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vhaddw_hu_bu, src0, src0, src1, src1, src2, src2,
> src3,
> + src3, src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vhaddw_hu_bu, src4, src4, src5, src5, src6, src6,
> src7,
> + src7, src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
> src3, reg0,
> + src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
> src7, reg0,
> + src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
> + src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
> + src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, src5, src4,
> src7, src6,
> + src0, src1, src2, src3);
> + __lsx_vstelm_d(src0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(src0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_d(src1, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(src1, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_d(src2, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(src2, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_d(src3, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(src3, dst, 0, 1);
> +}
> +
> +void ff_tm_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
> + const uint8_t *src_left, const uint8_t
> *src_top_ptr)
> +{
> + uint8_t top_left = src_top_ptr[-1];
> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
> + __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
> + __m128i reg0, reg1;
> + ptrdiff_t stride2 = dst_stride << 1;
> + ptrdiff_t stride3 = stride2 + dst_stride;
> + ptrdiff_t stride4 = stride2 << 1;
> +
> + reg0 = __lsx_vreplgr2vr_h(top_left);
> + reg1 = __lsx_vld(src_top_ptr, 0);
> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2,
> src_left,
> + 3, tmp15, tmp14, tmp13, tmp12);
> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6,
> src_left,
> + 7, tmp11, tmp10, tmp9, tmp8);
> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 8, src_left, 9, src_left, 10,
> + src_left, 11, tmp7, tmp6, tmp5, tmp4);
> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 12, src_left, 13, src_left,
> 14,
> + src_left, 15, tmp3, tmp2, tmp1, tmp0);
> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1,
> tmp3,
> + reg1, src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1,
> tmp3,
> + reg1, src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
> src3, reg0,
> + src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
> src7, reg0,
> + src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
> + src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
> + src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
> src7, src3,
> + tmp0, tmp1, tmp2, tmp3);
> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1,
> tmp7,
> + reg1, src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1,
> tmp7,
> + reg1, src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
> src3, reg0,
> + src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
> src7, reg0,
> + src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
> + src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
> + src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
> src7, src3,
> + tmp4, tmp5, tmp6, tmp7);
> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1,
> tmp11,
> + reg1, src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1,
> tmp11,
> + reg1, src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
> src3, reg0,
> + src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
> src7, reg0,
> + src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
> + src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
> + src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
> src7, src3,
> + tmp8, tmp9, tmp10, tmp11);
> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp12, reg1, tmp13, reg1, tmp14,
> reg1,
> + tmp15, reg1, src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp12, reg1, tmp13, reg1, tmp14,
> reg1,
> + tmp15, reg1, src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
> src3, reg0,
> + src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
> src7, reg0,
> + src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
> + src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
> + src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
> src7, src3,
> + tmp12, tmp13, tmp14, tmp15);
> + LSX_ST_8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, dst,
> + dst_stride, stride2, stride3, stride4);
> + dst += stride4;
> + LSX_ST_8(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, dst,
> + dst_stride, stride2, stride3, stride4);
> +}
> +
> +void ff_tm_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
> + const uint8_t *src_left, const uint8_t
> *src_top_ptr)
> +{
> + uint8_t top_left = src_top_ptr[-1];
> + uint32_t loop_cnt;
> + __m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1, reg2;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
> + __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
> +
> + reg0 = __lsx_vreplgr2vr_h(top_left);
> + DUP2_ARG2(__lsx_vld, src_top_ptr, 0, src_top_ptr, 16, reg1, reg2);
> +
> + src_left += 28;
> + for (loop_cnt = 8; loop_cnt--;) {
> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left,
> 2,
> + src_left, 3, tmp3, tmp2, tmp1, tmp0);
> + src_left -= 4;
> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2,
> reg1,
> + tmp3, reg1, src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2,
> reg1,
> + tmp3, reg1, src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
> src3,
> + reg0, src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
> src7,
> + reg0, src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg2, tmp1, reg2, tmp2,
> reg2,
> + tmp3, reg2, dst0, dst1, dst2, dst3);
> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg2, tmp1, reg2, tmp2,
> reg2,
> + tmp3, reg2, dst4, dst5, dst6, dst7);
> + DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0,
> dst3,
> + reg0, dst0, dst1, dst2, dst3);
> + DUP4_ARG2(__lsx_vssub_hu, dst4, reg0, dst5, reg0, dst6, reg0,
> dst7,
> + reg0, dst4, dst5, dst6, dst7);
> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
> + src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
> + src4, src5, src6, src7);
> + DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7,
> + dst0, dst1, dst2, dst3);
> + DUP4_ARG2(__lsx_vsat_hu, dst4, 7, dst5, 7, dst6, 7, dst7, 7,
> + dst4, dst5, dst6, dst7);
> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
> src7,
> + src3, src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vpackev_b, dst4, dst0, dst5, dst1, dst6, dst2,
> dst7,
> + dst3, dst0, dst1, dst2, dst3);
> + __lsx_vst(src0, dst, 0);
> + __lsx_vst(dst0, dst, 16);
> + dst += dst_stride;
> + __lsx_vst(src1, dst, 0);
> + __lsx_vst(dst1, dst, 16);
> + dst += dst_stride;
> + __lsx_vst(src2, dst, 0);
> + __lsx_vst(dst2, dst, 16);
> + dst += dst_stride;
> + __lsx_vst(src3, dst, 0);
> + __lsx_vst(dst3, dst, 16);
> + dst += dst_stride;
> + }
> +}
> diff --git a/libavcodec/loongarch/vp9_mc_lsx.c
> b/libavcodec/loongarch/vp9_mc_lsx.c
> new file mode 100644
> index 0000000000..c6746fd87f
> --- /dev/null
> +++ b/libavcodec/loongarch/vp9_mc_lsx.c
> @@ -0,0 +1,2480 @@
> +/*
> + * Copyright (c) 2021 Loongson Technology Corporation Limited
> + * Contributed by Hao Chen <chenhao@loongson.cn>
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
> 02110-1301 USA
> + */
> +
> +#include "libavcodec/vp9dsp.h"
> +#include "libavutil/loongarch/loongson_intrinsics.h"
> +#include "vp9dsp_loongarch.h"
> +
> +static const uint8_t mc_filt_mask_arr[16 * 3] = {
> + /* 8 width cases */
> + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
> + /* 4 width cases */
> + 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
> + /* 4 width cases */
> + 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
> +};
> +
> +
> +#define HORIZ_8TAP_4WID_4VECS_FILT(_src0, _src1, _src2, _src3,
> \
> + _mask0, _mask1, _mask2, _mask3,
> \
> + _filter0, _filter1, _filter2,
> _filter3, \
> + _out0, _out1)
> \
> +{
> \
> + __m128i _tmp0, _tmp1, _tmp2, _tmp3, _tmp4, _tmp5, _tmp6, _tmp7;
> \
> + __m128i _reg0, _reg1, _reg2, _reg3;
> \
> +
> \
> + DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask0, _src3, _src2,
> _mask0, \
> + _tmp0, _tmp1);
> \
> + DUP2_ARG2(__lsx_vdp2_h_b, _tmp0, _filter0, _tmp1, _filter0, _reg0,
> _reg1); \
> + DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask1, _src3, _src2,
> _mask1, \
> + _tmp2, _tmp3);
> \
> + DUP2_ARG3(__lsx_vdp2add_h_b, _reg0, _tmp2, _filter1, _reg1, _tmp3,
> \
> + _filter1, _reg0, _reg1);
> \
> + DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask2, _src3, _src2,
> _mask2, \
> + _tmp4, _tmp5);
> \
> + DUP2_ARG2(__lsx_vdp2_h_b, _tmp4, _filter2, _tmp5, _filter2, _reg2,
> _reg3); \
> + DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask3, _src3, _src2,
> _mask3, \
> + _tmp6, _tmp7);
> \
> + DUP2_ARG3(__lsx_vdp2add_h_b, _reg2, _tmp6, _filter3, _reg3, _tmp7,
> \
> + _filter3, _reg2, _reg3);
> \
> + DUP2_ARG2(__lsx_vsadd_h, _reg0, _reg2, _reg1, _reg3, _out0,
> _out1); \
> +}
> +
> +#define HORIZ_8TAP_8WID_4VECS_FILT(_src0, _src1, _src2, _src3,
> \
> + _mask0, _mask1, _mask2, _mask3,
> \
> + _filter0, _filter1, _filter2,
> _filter3, \
> + _out0, _out1, _out2, _out3)
> \
> +{
> \
> + __m128i _tmp0, _tmp1, _tmp2, _tmp3, _tmp4, _tmp5, _tmp6, _tmp7;
> \
> + __m128i _reg0, _reg1, _reg2, _reg3, _reg4, _reg5, _reg6, _reg7;
> \
> +
> \
> + DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask0, _src1, _src1,
> _mask0, _src2,\
> + _src2, _mask0, _src3, _src3, _mask0, _tmp0, _tmp1,
> _tmp2, _tmp3);\
> + DUP4_ARG2(__lsx_vdp2_h_b, _tmp0, _filter0, _tmp1, _filter0, _tmp2,
> \
> + _filter0, _tmp3, _filter0, _reg0, _reg1, _reg2, _reg3);
> \
> + DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask2, _src1, _src1,
> _mask2, _src2,\
> + _src2, _mask2, _src3, _src3, _mask2, _tmp0, _tmp1,
> _tmp2, _tmp3);\
> + DUP4_ARG2(__lsx_vdp2_h_b, _tmp0, _filter2, _tmp1, _filter2, _tmp2,
> \
> + _filter2, _tmp3, _filter2, _reg4, _reg5, _reg6, _reg7);
> \
> + DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask1, _src1, _src1,
> _mask1, _src2,\
> + _src2, _mask1, _src3, _src3, _mask1, _tmp4, _tmp5,
> _tmp6, _tmp7);\
> + DUP4_ARG3(__lsx_vdp2add_h_b, _reg0, _tmp4, _filter1, _reg1, _tmp5,
> \
> + _filter1, _reg2, _tmp6, _filter1, _reg3, _tmp7,
> _filter1, _reg0, \
> + _reg1, _reg2, _reg3);
> \
> + DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask3, _src1, _src1,
> _mask3, _src2,\
> + _src2, _mask3, _src3, _src3, _mask3, _tmp4, _tmp5,
> _tmp6, _tmp7);\
> + DUP4_ARG3(__lsx_vdp2add_h_b, _reg4, _tmp4, _filter3, _reg5, _tmp5,
> \
> + _filter3, _reg6, _tmp6, _filter3, _reg7, _tmp7,
> _filter3, _reg4, \
> + _reg5, _reg6, _reg7);
> \
> + DUP4_ARG2(__lsx_vsadd_h, _reg0, _reg4, _reg1, _reg5, _reg2, _reg6,
> _reg3, \
> + _reg7, _out0, _out1, _out2, _out3);
> \
> +}
> +
> +#define FILT_8TAP_DPADD_S_H(_reg0, _reg1, _reg2, _reg3,
> \
> + _filter0, _filter1, _filter2, _filter3)
> \
> +( {
> \
> + __m128i _vec0, _vec1;
> \
> +
> \
> + _vec0 = __lsx_vdp2_h_b(_reg0, _filter0);
> \
> + _vec0 = __lsx_vdp2add_h_b(_vec0, _reg1, _filter1);
> \
> + _vec1 = __lsx_vdp2_h_b(_reg2, _filter2);
> \
> + _vec1 = __lsx_vdp2add_h_b(_vec1, _reg3, _filter3);
> \
> + _vec0 = __lsx_vsadd_h(_vec0, _vec1);
> \
> +
> \
> + _vec0;
> \
> +} )
> +
> +#define HORIZ_8TAP_FILT(_src0, _src1, _mask0, _mask1, _mask2, _mask3,
> \
> + _filt_h0, _filt_h1, _filt_h2, _filt_h3)
> \
> +( {
> \
> + __m128i _tmp0, _tmp1, _tmp2, _tmp3;
> \
> + __m128i _out;
> \
> +
> \
> + DUP4_ARG3(__lsx_vshuf_b, _src1, _src0, _mask0, _src1, _src0,
> _mask1, _src1,\
> + _src0, _mask2, _src1, _src0, _mask3, _tmp0, _tmp1,
> _tmp2, _tmp3);\
> + _out = FILT_8TAP_DPADD_S_H(_tmp0, _tmp1, _tmp2, _tmp3, _filt_h0,
> _filt_h1, \
> + _filt_h2, _filt_h3);
> \
> + _out = __lsx_vsrari_h(_out, 7);
> \
> + _out = __lsx_vsat_h(_out, 7);
> \
> +
> \
> + _out;
> \
> +} )
> +
> +#define LSX_LD_4(_src, _stride, _src0, _src1, _src2, _src3)
> \
> +{
> \
> + _src0 = __lsx_vld(_src, 0);
> \
> + _src += _stride;
> \
> + _src1 = __lsx_vld(_src, 0);
> \
> + _src += _stride;
> \
> + _src2 = __lsx_vld(_src, 0);
> \
> + _src += _stride;
> \
> + _src3 = __lsx_vld(_src, 0);
> \
> +}
> +
> +static void common_hz_8t_4x4_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter)
> +{
> + __m128i src0, src1, src2, src3;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i mask0, mask1, mask2, mask3;
> + __m128i out, out0, out1;
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
> + src -= 3;
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> +
> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
> 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
> mask2,
> + mask3, filter0, filter1, filter2, filter3, out0,
> out1);
> + out = __lsx_vssrarni_b_h(out1, out0, 7);
> + out = __lsx_vxori_b(out, 128);
> + __lsx_vstelm_w(out, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(out, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_w(out, dst, 0, 2);
> + dst += dst_stride;
> + __lsx_vstelm_w(out, dst, 0, 3);
> +}
> +
> +static void common_hz_8t_4x8_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter)
> +{
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + __m128i src0, src1, src2, src3;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i mask0, mask1, mask2, mask3;
> + __m128i out0, out1, out2, out3;
> + uint8_t *_src = (uint8_t*)src - 3;
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> +
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
> src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
> 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
> mask2,
> + mask3, filter0, filter1, filter2, filter3, out0,
> out1);
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
> src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
> 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
> mask2,
> + mask3, filter0, filter1, filter2, filter3, out2,
> out3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0,
> out1);
> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
> + __lsx_vstelm_w(out0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 2);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 3);
> + dst += dst_stride;
> + __lsx_vstelm_w(out1, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(out1, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_w(out1, dst, 0, 2);
> + dst += dst_stride;
> + __lsx_vstelm_w(out1, dst, 0, 3);
> +}
> +
> +static void common_hz_8t_4w_lsx(const uint8_t *src, int32_t src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter, int32_t height)
> +{
> + if (height == 4) {
> + common_hz_8t_4x4_lsx(src, src_stride, dst, dst_stride, filter);
> + } else if (height == 8) {
> + common_hz_8t_4x8_lsx(src, src_stride, dst, dst_stride, filter);
> + }
> +}
> +
> +static void common_hz_8t_8x4_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter)
> +{
> + __m128i src0, src1, src2, src3;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i mask0, mask1, mask2, mask3;
> + __m128i out0, out1, out2, out3;
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
> + src -= 3;
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> +
> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
> 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
> mask2,
> + mask3, filter0, filter1, filter2, filter3, out0, out1, out2,
> out3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0,
> out1);
> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
> + __lsx_vstelm_d(out0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(out0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_d(out1, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(out1, dst, 0, 1);
> +}
> +
> +static void common_hz_8t_8x8mult_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter, int32_t
> height)
> +{
> + uint32_t loop_cnt = height >> 2;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + __m128i src0, src1, src2, src3;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i mask0, mask1, mask2, mask3;
> + __m128i out0, out1, out2, out3;
> + uint8_t* _src = (uint8_t*)src - 3;
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> +
> + for (; loop_cnt--;) {
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
> src1, src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
> src3, 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
> mask1, mask2,
> + mask3, filter0, filter1, filter2, filter3, out0, out1,
> out2, out3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
> out0, out1);
> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
> + __lsx_vstelm_d(out0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(out0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_d(out1, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(out1, dst, 0, 1);
> + dst += dst_stride;
> + }
> +}
> +
> +static void common_hz_8t_8w_lsx(const uint8_t *src, int32_t src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter, int32_t height)
> +{
> + if (height == 4) {
> + common_hz_8t_8x4_lsx(src, src_stride, dst, dst_stride, filter);
> + } else {
> + common_hz_8t_8x8mult_lsx(src, src_stride, dst, dst_stride,
> + filter, height);
> + }
> +}
> +
> +static void common_hz_8t_16w_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter, int32_t height)
> +{
> + uint32_t loop_cnt = height >> 1;
> + int32_t stride = src_stride << 1;
> + __m128i src0, src1, src2, src3;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i mask0, mask1, mask2, mask3;
> + __m128i out0, out1, out2, out3;
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
> + src -= 3;
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> +
> + for (; loop_cnt--;) {
> + const uint8_t* _src = src + src_stride;
> + DUP2_ARG2(__lsx_vld, src, 0, _src, 0, src0, src2);
> + DUP2_ARG2(__lsx_vld, src, 8, _src, 8, src1, src3);
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
> src3, 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
> mask1, mask2,
> + mask3, filter0, filter1, filter2, filter3, out0, out1,
> out2, out3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
> out0, out1);
> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
> + __lsx_vst(out0, dst, 0);
> + dst += dst_stride;
> + __lsx_vst(out1, dst, 0);
> + dst += dst_stride;
> + src += stride;
> + }
> +}
> +
> +static void common_hz_8t_32w_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter, int32_t height)
> +{
> + uint32_t loop_cnt = height >> 1;
> + __m128i src0, src1, src2, src3;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i mask0, mask1, mask2, mask3;
> + __m128i out0, out1, out2, out3;
> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
> + src -= 3;
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> +
> + for (; loop_cnt--;) {
> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
> + src3 = __lsx_vld(src, 24);
> + src1 = __lsx_vshuf_b(src2, src0, shuff);
> + src += src_stride;
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
> src3, 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
> mask1, mask2,
> + mask3, filter0, filter1, filter2, filter3, out0, out1,
> out2, out3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
> out0, out1);
> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
> + __lsx_vst(out0, dst, 0);
> + __lsx_vst(out1, dst, 16);
> +
> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
> + src3 = __lsx_vld(src, 24);
> + src1 = __lsx_vshuf_b(src2, src0, shuff);
> + src += src_stride;
> +
> + dst += dst_stride;
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
> src3, 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
> mask1, mask2,
> + mask3, filter0, filter1, filter2, filter3, out0, out1,
> out2, out3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
> out0, out1);
> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
> + __lsx_vst(out0, dst, 0);
> + __lsx_vst(out1, dst, 16);
> + dst += dst_stride;
> + }
> +}
> +
> +static void common_hz_8t_64w_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter, int32_t height)
> +{
> + int32_t loop_cnt = height;
> + __m128i src0, src1, src2, src3;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i mask0, mask1, mask2, mask3;
> + __m128i out0, out1, out2, out3;
> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
> + src -= 3;
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> +
> + for (; loop_cnt--;) {
> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
> + src3 = __lsx_vld(src, 24);
> + src1 = __lsx_vshuf_b(src2, src0, shuff);
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
> src3, 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
> mask1, mask2,
> + mask3, filter0, filter1, filter2, filter3, out0, out1,
> out2, out3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
> out0, out1);
> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
> + __lsx_vst(out0, dst, 0);
> + __lsx_vst(out1, dst, 16);
> +
> + DUP2_ARG2(__lsx_vld, src, 32, src, 48, src0, src2);
> + src3 = __lsx_vld(src, 56);
> + src1 = __lsx_vshuf_b(src2, src0, shuff);
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
> src3, 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
> mask1, mask2,
> + mask3, filter0, filter1, filter2, filter3, out0, out1,
> out2, out3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
> out0, out1);
> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
> + __lsx_vst(out0, dst, 32);
> + __lsx_vst(out1, dst, 48);
> + src += src_stride;
> + dst += dst_stride;
> + }
> +}
> +
> +static void common_vt_8t_4w_lsx(const uint8_t *src, int32_t src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter, int32_t height)
> +{
> + uint32_t loop_cnt = height >> 2;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
> src9, src10;
> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
> + __m128i reg0, reg1, reg2, reg3, reg4;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i out0, out1;
> + uint8_t* _src = (uint8_t*)src - src_stride3;
> +
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
> src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + src4 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
> src6);
> + _src += src_stride3;
> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
> src1, tmp0,
> + tmp1, tmp2, tmp3);
> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, tmp4, tmp5);
> + DUP2_ARG2(__lsx_vilvl_d, tmp3, tmp0, tmp4, tmp1, reg0, reg1);
> + reg2 = __lsx_vilvl_d(tmp5, tmp2);
> + DUP2_ARG2(__lsx_vxori_b, reg0, 128, reg1, 128, reg0, reg1);
> + reg2 = __lsx_vxori_b(reg2, 128);
> +
> + for (;loop_cnt--;) {
> + src7 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
> src8, src9);
> + src10 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
> src10,
> + src9, tmp0, tmp1, tmp2, tmp3);
> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, reg3, reg4);
> + DUP2_ARG2(__lsx_vxori_b, reg3, 128, reg4, 128, reg3, reg4);
> + out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, reg3, filter0,
> filter1,
> + filter2, filter3);
> + out1 = FILT_8TAP_DPADD_S_H(reg1, reg2, reg3, reg4, filter0,
> filter1,
> + filter2, filter3);
> + out0 = __lsx_vssrarni_b_h(out1, out0, 7);
> + out0 = __lsx_vxori_b(out0, 128);
> + __lsx_vstelm_w(out0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 2);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 3);
> + dst += dst_stride;
> +
> + reg0 = reg2;
> + reg1 = reg3;
> + reg2 = reg4;
> + src6 = src10;
> + }
> +}
> +
> +static void common_vt_8t_8w_lsx(const uint8_t *src, int32_t src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter, int32_t height)
> +{
> + uint32_t loop_cnt = height >> 2;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
> src9, src10;
> + __m128i tmp0, tmp1, tmp2, tmp3;
> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i out0, out1, out2, out3;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + uint8_t* _src = (uint8_t*)src - src_stride3;
> +
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> +
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
> src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + src4 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
> src6);
> + _src += src_stride3;
> +
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
> 128,
> + src0, src1, src2, src3);
> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
> + src6 = __lsx_vxori_b(src6, 128);
> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
> src1,
> + reg0, reg1, reg2, reg3);
> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
> +
> + for (;loop_cnt--;) {
> + src7 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
> src8, src9);
> + src10 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
> src10, 128,
> + src7, src8, src9, src10);
> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
> src10,
> + src9, tmp0, tmp1, tmp2, tmp3);
> + out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, tmp0, filter0,
> filter1,
> + filter2, filter3);
> + out1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, tmp1, filter0,
> filter1,
> + filter2, filter3);
> + out2 = FILT_8TAP_DPADD_S_H(reg1, reg2, tmp0, tmp2, filter0,
> filter1,
> + filter2, filter3);
> + out3 = FILT_8TAP_DPADD_S_H(reg4, reg5, tmp1, tmp3, filter0,
> filter1,
> + filter2, filter3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
> out0, out1);
> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
> + __lsx_vstelm_d(out0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(out0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_d(out1, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(out1, dst, 0, 1);
> + dst += dst_stride;
> +
> + reg0 = reg2;
> + reg1 = tmp0;
> + reg2 = tmp2;
> + reg3 = reg5;
> + reg4 = tmp1;
> + reg5 = tmp3;
> + src6 = src10;
> + }
> +}
> +
> +static void common_vt_8t_16w_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter, int32_t height)
> +{
> + uint32_t loop_cnt = height >> 2;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
> src9, src10;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
> + __m128i reg6, reg7, reg8, reg9, reg10, reg11;
> + __m128i tmp0, tmp1, tmp2, tmp3;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + uint8_t* _src = (uint8_t*)src - src_stride3;
> +
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
> src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + src4 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
> src6);
> + _src += src_stride3;
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
> 128,
> + src0, src1, src2, src3);
> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
> + src6 = __lsx_vxori_b(src6, 128);
> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
> src1,
> + reg0, reg1, reg2, reg3);
> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
> + DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4, src2,
> src1,
> + reg6, reg7, reg8, reg9);
> + DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, reg10, reg11);
> +
> + for (;loop_cnt--;) {
> + src7 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
> src8, src9);
> + src10 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
> src10, 128,
> + src7, src8, src9, src10);
> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
> src10, src9,
> + src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9, src8,
> src10, src9,
> + src4, src5, src7, src8);
> + tmp0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, src0, filter0,
> filter1,
> + filter2, filter3);
> + tmp1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, src1, filter0,
> filter1,
> + filter2, filter3);
> + tmp2 = FILT_8TAP_DPADD_S_H(reg6, reg7, reg8, src4, filter0,
> filter1,
> + filter2, filter3);
> + tmp3 = FILT_8TAP_DPADD_S_H(reg9, reg10, reg11, src5, filter0,
> filter1,
> + filter2, filter3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
> tmp0, tmp1);
> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
> + __lsx_vst(tmp0, dst, 0);
> + dst += dst_stride;
> + __lsx_vst(tmp1, dst, 0);
> + dst += dst_stride;
> + tmp0 = FILT_8TAP_DPADD_S_H(reg1, reg2, src0, src2, filter0,
> filter1,
> + filter2, filter3);
> + tmp1 = FILT_8TAP_DPADD_S_H(reg4, reg5, src1, src3, filter0,
> filter1,
> + filter2, filter3);
> + tmp2 = FILT_8TAP_DPADD_S_H(reg7, reg8, src4, src7, filter0,
> filter1,
> + filter2, filter3);
> + tmp3 = FILT_8TAP_DPADD_S_H(reg10, reg11, src5, src8, filter0,
> filter1,
> + filter2, filter3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
> tmp0, tmp1);
> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
> + __lsx_vst(tmp0, dst, 0);
> + dst += dst_stride;
> + __lsx_vst(tmp1, dst, 0);
> + dst += dst_stride;
> +
> + reg0 = reg2;
> + reg1 = src0;
> + reg2 = src2;
> + reg3 = reg5;
> + reg4 = src1;
> + reg5 = src3;
> + reg6 = reg8;
> + reg7 = src4;
> + reg8 = src7;
> + reg9 = reg11;
> + reg10 = src5;
> + reg11 = src8;
> + src6 = src10;
> + }
> +}
> +
> +static void common_vt_8t_16w_mult_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter, int32_t
> height,
> + int32_t width)
> +{
> + uint8_t *src_tmp;
> + uint8_t *dst_tmp;
> + uint32_t cnt = width >> 4;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
> src9, src10;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
> + __m128i reg6, reg7, reg8, reg9, reg10, reg11;
> + __m128i tmp0, tmp1, tmp2, tmp3;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + int32_t dst_stride2 = dst_stride << 1;
> + int32_t dst_stride3 = dst_stride2 + dst_stride;
> + int32_t dst_stride4 = dst_stride2 << 1;
> + uint8_t* _src = (uint8_t*)src - src_stride3;
> +
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> + for (;cnt--;) {
> + uint32_t loop_cnt = height >> 2;
> +
> + src_tmp = _src;
> + dst_tmp = dst;
> +
> + src0 = __lsx_vld(src_tmp, 0);
> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
> src_stride2,
> + src1, src2);
> + src3 = __lsx_vldx(src_tmp, src_stride3);
> + src_tmp += src_stride4;
> + src4 = __lsx_vld(src_tmp, 0);
> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
> src_stride2,
> + src5, src6);
> + src_tmp += src_stride3;
> +
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
> src3, 128,
> + src0, src1, src2, src3);
> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
> + src6 = __lsx_vxori_b(src6, 128);
> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4,
> src2, src1,
> + reg0, reg1, reg2, reg3);
> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
> + DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4,
> src2, src1,
> + reg6, reg7, reg8, reg9);
> + DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, reg10, reg11);
> +
> + for (;loop_cnt--;) {
> + src7 = __lsx_vld(src_tmp, 0);
> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
> src_stride2,
> + src8, src9);
> + src10 = __lsx_vldx(src_tmp, src_stride3);
> + src_tmp += src_stride4;
> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
> src10,
> + 128, src7, src8, src9, src10);
> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9,
> src8,
> + src10, src9, src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9,
> src8,
> + src10, src9, src4, src5, src7, src8);
> + tmp0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, src0, filter0,
> + filter1, filter2, filter3);
> + tmp1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, src1, filter0,
> + filter1, filter2, filter3);
> + tmp2 = FILT_8TAP_DPADD_S_H(reg6, reg7, reg8, src4, filter0,
> + filter1, filter2, filter3);
> + tmp3 = FILT_8TAP_DPADD_S_H(reg9, reg10, reg11, src5,
> filter0,
> + filter1, filter2, filter3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
> + tmp0, tmp1);
> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
> + __lsx_vst(tmp0, dst_tmp, 0);
> + __lsx_vstx(tmp1, dst_tmp, dst_stride);
> + tmp0 = FILT_8TAP_DPADD_S_H(reg1, reg2, src0, src2, filter0,
> + filter1, filter2, filter3);
> + tmp1 = FILT_8TAP_DPADD_S_H(reg4, reg5, src1, src3, filter0,
> + filter1, filter2, filter3);
> + tmp2 = FILT_8TAP_DPADD_S_H(reg7, reg8, src4, src7, filter0,
> + filter1, filter2, filter3);
> + tmp3 = FILT_8TAP_DPADD_S_H(reg10, reg11, src5, src8,
> filter0,
> + filter1, filter2, filter3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
> + tmp0, tmp1);
> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
> + __lsx_vstx(tmp0, dst_tmp, dst_stride2);
> + __lsx_vstx(tmp1, dst_tmp, dst_stride3);
> + dst_tmp += dst_stride4;
> +
> + reg0 = reg2;
> + reg1 = src0;
> + reg2 = src2;
> + reg3 = reg5;
> + reg4 = src1;
> + reg5 = src3;
> + reg6 = reg8;
> + reg7 = src4;
> + reg8 = src7;
> + reg9 = reg11;
> + reg10 = src5;
> + reg11 = src8;
> + src6 = src10;
> + }
> + _src += 16;
> + dst += 16;
> + }
> +}
> +
> +static void common_vt_8t_32w_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter, int32_t height)
> +{
> + common_vt_8t_16w_mult_lsx(src, src_stride, dst, dst_stride,
> filter, height, 32);
> +}
> +
> +static void common_vt_8t_64w_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter, int32_t height)
> +{
> + common_vt_8t_16w_mult_lsx(src, src_stride, dst, dst_stride,
> + filter, height, 64);
> +}
> +
> +static void common_hv_8ht_8vt_4w_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter_horiz,
> + const int8_t *filter_vert,
> + int32_t height)
> +{
> + uint32_t loop_cnt = height >> 2;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
> src9, src10;
> + __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
> + __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
> + __m128i mask0, mask1, mask2, mask3;
> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
> + __m128i out0, out1;
> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + uint8_t* _src = (uint8_t*)src - src_stride3 - 3;
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
> + DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2,
> filter_horiz, 4,
> + filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2, filt_hz3);
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> +
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
> src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + src4 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
> src6);
> + _src += src_stride3;
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
> 128,
> + src0, src1, src2, src3);
> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
> + src6 = __lsx_vxori_b(src6, 128);
> +
> + tmp0 = HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + tmp2 = HORIZ_8TAP_FILT(src2, src3, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + tmp4 = HORIZ_8TAP_FILT(src4, src5, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + tmp5 = HORIZ_8TAP_FILT(src5, src6, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + DUP2_ARG3(__lsx_vshuf_b, tmp2, tmp0, shuff, tmp4, tmp2, shuff,
> tmp1, tmp3);
> + DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2,
> filter_vert, 4,
> + filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
> + DUP2_ARG2(__lsx_vpackev_b, tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
> + tmp2 = __lsx_vpackev_b(tmp5, tmp4);
> +
> + for (;loop_cnt--;) {
> + src7 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
> src8, src9);
> + src10 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
> src10, 128,
> + src7, src8, src9, src10);
> + tmp3 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + tmp4 = __lsx_vshuf_b(tmp3, tmp5, shuff);
> + tmp4 = __lsx_vpackev_b(tmp3, tmp4);
> + out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp4, filt_vt0,
> filt_vt1,
> + filt_vt2, filt_vt3);
> + src1 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3,
> + filt_hz0, filt_hz1, filt_hz2, filt_hz3);
> + src0 = __lsx_vshuf_b(src1, tmp3, shuff);
> + src0 = __lsx_vpackev_b(src1, src0);
> + out1 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp4, src0, filt_vt0,
> filt_vt1,
> + filt_vt2, filt_vt3);
> + out0 = __lsx_vssrarni_b_h(out1, out0, 7);
> + out0 = __lsx_vxori_b(out0, 128);
> + __lsx_vstelm_w(out0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 2);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 3);
> + dst += dst_stride;
> +
> + tmp5 = src1;
> + tmp0 = tmp2;
> + tmp1 = tmp4;
> + tmp2 = src0;
> + }
> +}
> +
> +static void common_hv_8ht_8vt_8w_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter_horiz,
> + const int8_t *filter_vert,
> + int32_t height)
> +{
> + uint32_t loop_cnt = height >> 2;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
> src9, src10;
> + __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
> + __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
> + __m128i mask0, mask1, mask2, mask3;
> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
> + __m128i out0, out1;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + uint8_t* _src = (uint8_t*)src - src_stride3 - 3;
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
> + DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2,
> filter_horiz,
> + 4, filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2,
> filt_hz3);
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> +
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
> src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + src4 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
> src6);
> + _src += src_stride3;
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
> 128,
> + src0, src1, src2, src3);
> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
> + src6 = __lsx_vxori_b(src6, 128);
> +
> + src0 = HORIZ_8TAP_FILT(src0, src0, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src1 = HORIZ_8TAP_FILT(src1, src1, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src2 = HORIZ_8TAP_FILT(src2, src2, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src3 = HORIZ_8TAP_FILT(src3, src3, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src4 = HORIZ_8TAP_FILT(src4, src4, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src5 = HORIZ_8TAP_FILT(src5, src5, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src6 = HORIZ_8TAP_FILT(src6, src6, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> +
> + DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2,
> filter_vert, 4,
> + filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
> + DUP4_ARG2(__lsx_vpackev_b, src1, src0, src3, src2, src5, src4,
> + src2, src1, tmp0, tmp1, tmp2, tmp4);
> + DUP2_ARG2(__lsx_vpackev_b, src4, src3, src6, src5, tmp5, tmp6);
> +
> + for (;loop_cnt--;) {
> + src7 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
> src8, src9);
> + src10 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> +
> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
> src10, 128,
> + src7, src8, src9, src10);
> + src7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + tmp3 = __lsx_vpackev_b(src7, src6);
> + out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp3, filt_vt0,
> filt_vt1,
> + filt_vt2, filt_vt3);
> + src8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src0 = __lsx_vpackev_b(src8, src7);
> + out1 = FILT_8TAP_DPADD_S_H(tmp4, tmp5, tmp6, src0, filt_vt0,
> filt_vt1,
> + filt_vt2, filt_vt3);
> + src9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src1 = __lsx_vpackev_b(src9, src8);
> + src3 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp3, src1, filt_vt0,
> filt_vt1,
> + filt_vt2, filt_vt3);
> + src10 = HORIZ_8TAP_FILT(src10, src10, mask0, mask1, mask2,
> mask3,
> + filt_hz0, filt_hz1, filt_hz2,
> filt_hz3);
> + src2 = __lsx_vpackev_b(src10, src9);
> + src4 = FILT_8TAP_DPADD_S_H(tmp5, tmp6, src0, src2, filt_vt0,
> filt_vt1,
> + filt_vt2, filt_vt3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, src4, src3, 7,
> out0, out1);
> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
> + __lsx_vstelm_d(out0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(out0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_d(out1, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(out1, dst, 0, 1);
> + dst += dst_stride;
> +
> + src6 = src10;
> + tmp0 = tmp2;
> + tmp1 = tmp3;
> + tmp2 = src1;
> + tmp4 = tmp6;
> + tmp5 = src0;
> + tmp6 = src2;
> + }
> +}
> +
> +static void common_hv_8ht_8vt_16w_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter_horiz,
> + const int8_t *filter_vert,
> + int32_t height)
> +{
> + int32_t multiple8_cnt;
> +
> + for (multiple8_cnt = 2; multiple8_cnt--;) {
> + common_hv_8ht_8vt_8w_lsx(src, src_stride, dst, dst_stride,
> filter_horiz,
> + filter_vert, height);
> + src += 8;
> + dst += 8;
> + }
> +}
> +
> +static void common_hv_8ht_8vt_32w_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter_horiz,
> + const int8_t *filter_vert,
> + int32_t height)
> +{
> + int32_t multiple8_cnt;
> +
> + for (multiple8_cnt = 4; multiple8_cnt--;) {
> + common_hv_8ht_8vt_8w_lsx(src, src_stride, dst, dst_stride,
> filter_horiz,
> + filter_vert, height);
> + src += 8;
> + dst += 8;
> + }
> +}
> +
> +static void common_hv_8ht_8vt_64w_lsx(const uint8_t *src, int32_t
> src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + const int8_t *filter_horiz,
> + const int8_t *filter_vert,
> + int32_t height)
> +{
> + int32_t multiple8_cnt;
> +
> + for (multiple8_cnt = 8; multiple8_cnt--;) {
> + common_hv_8ht_8vt_8w_lsx(src, src_stride, dst, dst_stride,
> filter_horiz,
> + filter_vert, height);
> + src += 8;
> + dst += 8;
> + }
> +}
> +
> +static void copy_width8_lsx(const uint8_t *src, int32_t src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + int32_t height)
> +{
> + int32_t cnt = height >> 2;
> + __m128i src0, src1, src2, src3;
> +
> + for (;cnt--;) {
> + src0 = __lsx_vldrepl_d(src, 0);
> + src += src_stride;
> + src1 = __lsx_vldrepl_d(src, 0);
> + src += src_stride;
> + src2 = __lsx_vldrepl_d(src, 0);
> + src += src_stride;
> + src3 = __lsx_vldrepl_d(src, 0);
> + src += src_stride;
> + __lsx_vstelm_d(src0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(src1, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(src2, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(src3, dst, 0, 0);
> + dst += dst_stride;
> + }
> +}
> +
> +static void copy_width16_lsx(const uint8_t *src, int32_t src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + int32_t height)
> +{
> + int32_t cnt = height >> 2;
> + __m128i src0, src1, src2, src3;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + int32_t dst_stride2 = dst_stride << 1;
> + int32_t dst_stride3 = dst_stride2 + dst_stride;
> + int32_t dst_stride4 = dst_stride2 << 1;
> + uint8_t *_src = (uint8_t*)src;
> +
> + for (;cnt--;) {
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
> src1, src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + __lsx_vst(src0, dst, 0);
> + __lsx_vstx(src1, dst, dst_stride);
> + __lsx_vstx(src2, dst, dst_stride2);
> + __lsx_vstx(src3, dst, dst_stride3);
> + dst += dst_stride4;
> + }
> +}
> +
> +static void copy_width32_lsx(const uint8_t *src, int32_t src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + int32_t height)
> +{
> + int32_t cnt = height >> 2;
> + uint8_t *src_tmp1 = (uint8_t*)src;
> + uint8_t *dst_tmp1 = dst;
> + uint8_t *src_tmp2 = src_tmp1 + 16;
> + uint8_t *dst_tmp2 = dst_tmp1 + 16;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + int32_t dst_stride2 = dst_stride << 1;
> + int32_t dst_stride3 = dst_stride2 + dst_stride;
> + int32_t dst_stride4 = dst_stride2 << 1;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
> +
> + for (;cnt--;) {
> + src0 = __lsx_vld(src_tmp1, 0);
> + DUP2_ARG2(__lsx_vldx, src_tmp1, src_stride, src_tmp1,
> src_stride2,
> + src1, src2);
> + src3 = __lsx_vldx(src_tmp1, src_stride3);
> + src_tmp1 += src_stride4;
> +
> + src4 = __lsx_vld(src_tmp2, 0);
> + DUP2_ARG2(__lsx_vldx, src_tmp2, src_stride, src_tmp2,
> src_stride2,
> + src5, src6);
> + src7 = __lsx_vldx(src_tmp2, src_stride3);
> + src_tmp2 += src_stride4;
> +
> + __lsx_vst(src0, dst_tmp1, 0);
> + __lsx_vstx(src1, dst_tmp1, dst_stride);
> + __lsx_vstx(src2, dst_tmp1, dst_stride2);
> + __lsx_vstx(src3, dst_tmp1, dst_stride3);
> + dst_tmp1 += dst_stride4;
> + __lsx_vst(src4, dst_tmp2, 0);
> + __lsx_vstx(src5, dst_tmp2, dst_stride);
> + __lsx_vstx(src6, dst_tmp2, dst_stride2);
> + __lsx_vstx(src7, dst_tmp2, dst_stride3);
> + dst_tmp2 += dst_stride4;
> + }
> +}
> +
> +static void copy_width64_lsx(const uint8_t *src, int32_t src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + int32_t height)
> +{
> + int32_t cnt = height >> 2;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
> + __m128i src8, src9, src10, src11, src12, src13, src14, src15;
> +
> + for (;cnt--;) {
> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
> + src0, src1, src2, src3);
> + src += src_stride;
> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
> + src4, src5, src6, src7);
> + src += src_stride;
> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
> + src8, src9, src10, src11);
> + src += src_stride;
> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
> + src12, src13, src14, src15);
> + src += src_stride;
> + __lsx_vst(src0, dst, 0);
> + __lsx_vst(src1, dst, 16);
> + __lsx_vst(src2, dst, 32);
> + __lsx_vst(src3, dst, 48);
> + dst += dst_stride;
> + __lsx_vst(src4, dst, 0);
> + __lsx_vst(src5, dst, 16);
> + __lsx_vst(src6, dst, 32);
> + __lsx_vst(src7, dst, 48);
> + dst += dst_stride;
> + __lsx_vst(src8, dst, 0);
> + __lsx_vst(src9, dst, 16);
> + __lsx_vst(src10, dst, 32);
> + __lsx_vst(src11, dst, 48);
> + dst += dst_stride;
> + __lsx_vst(src12, dst, 0);
> + __lsx_vst(src13, dst, 16);
> + __lsx_vst(src14, dst, 32);
> + __lsx_vst(src15, dst, 48);
> + dst += dst_stride;
> + }
> +}
> +
> +static void common_hz_8t_and_aver_dst_4x4_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst, int32_t
> dst_stride,
> + const int8_t *filter)
> +{
> + uint8_t *dst_tmp = dst;
> + __m128i src0, src1, src2, src3;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i mask0, mask1, mask2, mask3;
> + __m128i tmp0, tmp1;
> + __m128i dst0, dst1, dst2, dst3;
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
> + src -= 3;
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
> 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
> mask2, mask3,
> + filter0, filter1, filter2, filter3,
> tmp0, tmp1);
> + dst0 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + dst1 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + dst2 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + dst3 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst0 = __lsx_vilvl_w(dst1, dst0);
> + dst1 = __lsx_vilvl_w(dst3, dst2);
> + dst0 = __lsx_vilvl_d(dst1, dst0);
> + tmp0 = __lsx_vssrarni_b_h(tmp1, tmp0, 7);
> + tmp0 = __lsx_vxori_b(tmp0, 128);
> + dst0 = __lsx_vavgr_bu(tmp0, dst0);
> + __lsx_vstelm_w(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst0, dst, 0, 2);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst0, dst, 0, 3);
> +}
> +
> +static void common_hz_8t_and_aver_dst_4x8_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst, int32_t
> dst_stride,
> + const int8_t *filter)
> +{
> + uint8_t *dst_tmp = dst;
> + __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
> + __m128i mask0, mask1, mask2, mask3, tmp0, tmp1, tmp2, tmp3;
> + __m128i dst0, dst1;
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
> + src -= 3;
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> +
> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
> + src += src_stride;
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
> 128,
> + src0, src1, src2, src3);
> + tmp0 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + tmp1 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + tmp2 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + tmp3 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + tmp0 = __lsx_vilvl_w(tmp1, tmp0);
> + tmp1 = __lsx_vilvl_w(tmp3, tmp2);
> + dst0 = __lsx_vilvl_d(tmp1, tmp0);
> +
> + tmp0 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + tmp1 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + tmp2 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + tmp3 = __lsx_vldrepl_w(dst_tmp, 0);
> + tmp0 = __lsx_vilvl_w(tmp1, tmp0);
> + tmp1 = __lsx_vilvl_w(tmp3, tmp2);
> + dst1 = __lsx_vilvl_d(tmp1, tmp0);
> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
> mask2, mask3,
> + filter0, filter1, filter2, filter3,
> tmp0, tmp1);
> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
> 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
> mask2, mask3,
> + filter0, filter1, filter2, filter3,
> tmp2, tmp3);
> + DUP4_ARG3(__lsx_vssrarni_b_h, tmp0, tmp0, 7, tmp1, tmp1, 7, tmp2,
> tmp2, 7,
> + tmp3, tmp3, 7, tmp0, tmp1, tmp2, tmp3);
> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
> + DUP2_ARG2(__lsx_vavgr_bu, tmp0, dst0, tmp1, dst1, dst0, dst1);
> + __lsx_vstelm_w(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst0, dst, 0, 2);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst0, dst, 0, 3);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst1, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst1, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst1, dst, 0, 2);
> + dst += dst_stride;
> + __lsx_vstelm_w(dst1, dst, 0, 3);
> +}
> +
> +static void common_hz_8t_and_aver_dst_4w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst, int32_t
> dst_stride,
> + const int8_t *filter,
> + int32_t height)
> +{
> + if (height == 4) {
> + common_hz_8t_and_aver_dst_4x4_lsx(src, src_stride, dst,
> dst_stride, filter);
> + } else if (height == 8) {
> + common_hz_8t_and_aver_dst_4x8_lsx(src, src_stride, dst,
> dst_stride, filter);
> + }
> +}
> +
> +static void common_hz_8t_and_aver_dst_8w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst, int32_t
> dst_stride,
> + const int8_t *filter,
> + int32_t height)
> +{
> + int32_t loop_cnt = height >> 2;
> + uint8_t *dst_tmp = dst;
> + __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
> + __m128i mask0, mask1, mask2, mask3;
> + __m128i tmp0, tmp1, tmp2, tmp3;
> + __m128i dst0, dst1, dst2, dst3;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride2 + src_stride;
> + int32_t src_stride4 = src_stride2 << 1;
> + uint8_t *_src = (uint8_t*)src - 3;
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> +
> + for (;loop_cnt--;) {
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
> src1, src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
> src3, 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
> mask1, mask2,
> + mask3,filter0, filter1, filter2, filter3, tmp0, tmp1,
> tmp2, tmp3);
> + dst0 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + dst1 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + dst2 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + dst3 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + DUP2_ARG2(__lsx_vilvl_d, dst1, dst0, dst3, dst2, dst0, dst1);
> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7,
> tmp0, tmp1);
> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
> + DUP2_ARG2(__lsx_vavgr_bu, tmp0, dst0, tmp1, dst1, dst0, dst1);
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(dst0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_d(dst1, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(dst1, dst, 0, 1);
> + dst += dst_stride;
> + }
> +}
> +
> +static void common_hz_8t_and_aver_dst_16w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst, int32_t
> dst_stride,
> + const int8_t *filter,
> + int32_t height)
> +{
> + int32_t loop_cnt = height >> 1;
> + int32_t dst_stride2 = dst_stride << 1;
> + uint8_t *dst_tmp = dst;
> + __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
> + __m128i mask0, mask1, mask2, mask3, dst0, dst1, dst2, dst3;
> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
> + __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
> + src -= 3;
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> +
> + for (;loop_cnt--;) {
> + DUP2_ARG2(__lsx_vld, src, 0, src, 8, src0, src1);
> + src += src_stride;
> + DUP2_ARG2(__lsx_vld, src, 0, src, 8, src2, src3);
> + src += src_stride;
> + dst0 = __lsx_vld(dst_tmp, 0);
> + dst1 = __lsx_vldx(dst_tmp, dst_stride);
> + dst_tmp += dst_stride2;
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
> src3, 128,
> + src0, src1, src2, src3);
> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0,
> src2, src2,
> + mask0, src3, src3, mask0, tmp0, tmp1, tmp2, tmp3);
> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1,
> src2, src2,
> + mask1, src3, src3, mask1, tmp4, tmp5, tmp6, tmp7);
> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2,
> src2, src2,
> + mask2, src3, src3, mask2, tmp8, tmp9, tmp10, tmp11);
> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3,
> src2, src2,
> + mask3, src3, src3, mask3, tmp12, tmp13, tmp14,
> tmp15);
> + DUP4_ARG2(__lsx_vdp2_h_b, tmp0, filter0, tmp1, filter0, tmp2,
> filter0, tmp3,
> + filter0, tmp0, tmp1, tmp2, tmp3);
> + DUP4_ARG2(__lsx_vdp2_h_b, tmp8, filter2, tmp9, filter2, tmp10,
> filter2, tmp11,
> + filter2, tmp8, tmp9, tmp10, tmp11);
> + DUP4_ARG3(__lsx_vdp2add_h_b, tmp0, tmp4, filter1, tmp1, tmp5,
> filter1, tmp2,
> + tmp6, filter1, tmp3, tmp7, filter1, tmp0, tmp1,
> tmp2, tmp3);
> + DUP4_ARG3(__lsx_vdp2add_h_b, tmp8, tmp12, filter3, tmp9,
> tmp13, filter3, tmp10,
> + tmp14, filter3, tmp11, tmp15, filter3, tmp4, tmp5,
> tmp6, tmp7);
> + DUP4_ARG2(__lsx_vsadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6,
> tmp3, tmp7,
> + tmp0, tmp1, tmp2, tmp3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7,
> dst2, dst3);
> + DUP2_ARG2(__lsx_vxori_b, dst2, 128, dst3, 128, dst2, dst3);
> + DUP2_ARG2(__lsx_vavgr_bu, dst0, dst2, dst1, dst3, dst0, dst1);
> + __lsx_vst(dst0, dst, 0);
> + __lsx_vstx(dst1, dst, dst_stride);
> + dst += dst_stride2;
> + }
> +}
> +
> +static void common_hz_8t_and_aver_dst_32w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst, int32_t
> dst_stride,
> + const int8_t *filter,
> + int32_t height)
> +{
> + uint32_t loop_cnt = height;
> + uint8_t *dst_tmp = dst;
> + __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
> + __m128i mask0, mask1, mask2, mask3, dst0, dst1;
> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
> + __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
> + src -= 3;
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> +
> + for (;loop_cnt--;) {
> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
> + src3 = __lsx_vld(src, 24);
> + src1 = __lsx_vshuf_b(src2, src0, shuff);
> + src += src_stride;
> + DUP2_ARG2(__lsx_vld, dst_tmp, 0, dst, 16, dst0, dst1);
> + dst_tmp += dst_stride;
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
> src3, 128,
> + src0, src1, src2, src3);
> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0,
> src2,
> + src2, mask0, src3, src3, mask0, tmp0, tmp1, tmp2,
> tmp3);
> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1,
> src2,
> + src2, mask1, src3, src3, mask1, tmp4, tmp5, tmp6,
> tmp7);
> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2,
> src2,
> + src2, mask2, src3, src3, mask2, tmp8, tmp9, tmp10,
> tmp11);
> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3,
> src2,
> + src2, mask3, src3, src3, mask3, tmp12, tmp13, tmp14,
> tmp15);
> + DUP4_ARG2(__lsx_vdp2_h_b, tmp0, filter0, tmp1, filter0, tmp2,
> filter0,
> + tmp3, filter0, tmp0, tmp1, tmp2, tmp3);
> + DUP4_ARG2(__lsx_vdp2_h_b, tmp8, filter2, tmp9, filter2, tmp10,
> filter2,
> + tmp11, filter2, tmp8, tmp9, tmp10, tmp11);
> + DUP4_ARG3(__lsx_vdp2add_h_b, tmp0, tmp4, filter1, tmp1, tmp5,
> filter1,
> + tmp2, tmp6, filter1, tmp3, tmp7, filter1, tmp0, tmp1,
> tmp2, tmp3);
> + DUP4_ARG3(__lsx_vdp2add_h_b, tmp8, tmp12, filter3, tmp9,
> tmp13, filter3,
> + tmp10, tmp14, filter3, tmp11, tmp15, filter3, tmp4, tmp5,
> tmp6, tmp7);
> + DUP4_ARG2(__lsx_vsadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6,
> tmp3, tmp7,
> + tmp0, tmp1, tmp2, tmp3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7,
> tmp0, tmp1);
> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
> + DUP2_ARG2(__lsx_vavgr_bu, dst0, tmp0, dst1, tmp1, dst0, dst1);
> + __lsx_vst(dst0, dst, 0);
> + __lsx_vst(dst1, dst, 16);
> + dst += dst_stride;
> + }
> +}
> +
> +static void common_hz_8t_and_aver_dst_64w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst, int32_t
> dst_stride,
> + const int8_t *filter,
> + int32_t height)
> +{
> + int32_t loop_cnt = height;
> + __m128i src0, src1, src2, src3;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i mask0, mask1, mask2, mask3;
> + __m128i out0, out1, out2, out3, dst0, dst1;
> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
> + src -= 3;
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> +
> + for (;loop_cnt--;) {
> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
> + src3 = __lsx_vld(src, 24);
> + src1 = __lsx_vshuf_b(src2, src0, shuff);
> + DUP2_ARG2(__lsx_vld, dst, 0, dst, 16, dst0, dst1);
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
> src3, 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
> mask1, mask2,
> + mask3, filter0, filter1, filter2, filter3, out0, out1,
> out2, out3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
> out0, out1);
> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
> + DUP2_ARG2(__lsx_vavgr_bu, out0, dst0, out1, dst1, out0, out1);
> + __lsx_vst(out0, dst, 0);
> + __lsx_vst(out1, dst, 16);
> +
> + DUP2_ARG2(__lsx_vld, src, 32, src, 48, src0, src2);
> + src3 = __lsx_vld(src, 56);
> + src1 = __lsx_vshuf_b(src2, src0, shuff);
> + DUP2_ARG2(__lsx_vld, dst, 32, dst, 48, dst0, dst1);
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
> src3, 128,
> + src0, src1, src2, src3);
> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
> mask1, mask2,
> + mask3, filter0, filter1, filter2, filter3, out0, out1,
> out2, out3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
> out0, out1);
> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
> + DUP2_ARG2(__lsx_vavgr_bu, out0, dst0, out1, dst1, out0, out1);
> + __lsx_vst(out0, dst, 32);
> + __lsx_vst(out1, dst, 48);
> + src += src_stride;
> + dst += dst_stride;
> + }
> +}
> +
> +static void common_vt_8t_and_aver_dst_4w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst, int32_t
> dst_stride,
> + const int8_t *filter,
> + int32_t height)
> +{
> + uint32_t loop_cnt = height >> 2;
> + uint8_t *dst_tmp = dst;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
> src9, src10;
> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
> + __m128i reg0, reg1, reg2, reg3, reg4;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i out0, out1;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + uint8_t* _src = (uint8_t*)src - src_stride3;
> +
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
> src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + src4 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
> src6);
> + _src += src_stride3;
> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
> src1,
> + tmp0, tmp1, tmp2, tmp3);
> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, tmp4, tmp5);
> + DUP2_ARG2(__lsx_vilvl_d, tmp3, tmp0, tmp4, tmp1, reg0, reg1);
> + reg2 = __lsx_vilvl_d(tmp5, tmp2);
> + DUP2_ARG2(__lsx_vxori_b, reg0, 128, reg1, 128, reg0, reg1);
> + reg2 = __lsx_vxori_b(reg2, 128);
> +
> + for (;loop_cnt--;) {
> + src7 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
> src8, src9);
> + src10 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + src0 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + src1 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + src2 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + src3 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + DUP2_ARG2(__lsx_vilvl_w, src1, src0, src3, src2, src0, src1);
> + src0 = __lsx_vilvl_d(src1, src0);
> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
> src10,
> + src9, tmp0, tmp1, tmp2, tmp3);
> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, reg3, reg4);
> + DUP2_ARG2(__lsx_vxori_b, reg3, 128, reg4, 128, reg3, reg4);
> + out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, reg3, filter0,
> + filter1, filter2, filter3);
> + out1 = FILT_8TAP_DPADD_S_H(reg1, reg2, reg3, reg4, filter0,
> + filter1, filter2, filter3);
> + out0 = __lsx_vssrarni_b_h(out1, out0, 7);
> + out0 = __lsx_vxori_b(out0, 128);
> + out0 = __lsx_vavgr_bu(out0, src0);
> + __lsx_vstelm_w(out0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 2);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 3);
> + dst += dst_stride;
> + reg0 = reg2;
> + reg1 = reg3;
> + reg2 = reg4;
> + src6 = src10;
> + }
> +}
> +
> +static void common_vt_8t_and_aver_dst_8w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst, int32_t
> dst_stride,
> + const int8_t *filter,
> + int32_t height)
> +{
> + uint32_t loop_cnt = height >> 2;
> + uint8_t *dst_tmp = dst;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
> src9, src10;
> + __m128i tmp0, tmp1, tmp2, tmp3;
> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i out0, out1, out2, out3;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + uint8_t* _src = (uint8_t*)src - src_stride3;
> +
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> +
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
> src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + src4 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
> src6);
> + _src += src_stride3;
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
> 128,
> + src0, src1, src2, src3);
> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
> + src6 = __lsx_vxori_b(src6, 128);
> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
> + src1, reg0, reg1, reg2, reg3);
> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
> +
> + for (;loop_cnt--;) {
> + src7 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
> src8, src9);
> + src10 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + src0 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + src1 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + src2 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + src3 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + DUP2_ARG2(__lsx_vilvl_d, src1, src0, src3, src2, src0, src1);
> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
> src10, 128,
> + src7, src8, src9, src10);
> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
> src10,
> + src9, tmp0, tmp1, tmp2, tmp3);
> + out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, tmp0, filter0,
> + filter1, filter2, filter3);
> + out1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, tmp1, filter0,
> + filter1, filter2, filter3);
> + out2 = FILT_8TAP_DPADD_S_H(reg1, reg2, tmp0, tmp2, filter0,
> + filter1, filter2, filter3);
> + out3 = FILT_8TAP_DPADD_S_H(reg4, reg5, tmp1, tmp3, filter0,
> + filter1, filter2, filter3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
> out0, out1);
> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
> + DUP2_ARG2(__lsx_vavgr_bu, out0, src0, out1, src1, out0, out1);
> + __lsx_vstelm_d(out0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(out0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_d(out1, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(out1, dst, 0, 1);
> + dst += dst_stride;
> +
> + reg0 = reg2;
> + reg1 = tmp0;
> + reg2 = tmp2;
> + reg3 = reg5;
> + reg4 = tmp1;
> + reg5 = tmp3;
> + src6 = src10;
> + }
> +}
> +
> +static void common_vt_8t_and_aver_dst_16w_mult_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst,
> + int32_t dst_stride,
> + const int8_t
> *filter,
> + int32_t height,
> + int32_t width)
> +{
> + uint8_t *src_tmp;
> + uint32_t cnt = width >> 4;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
> src9, src10;
> + __m128i filter0, filter1, filter2, filter3;
> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
> + __m128i reg6, reg7, reg8, reg9, reg10, reg11;
> + __m128i tmp0, tmp1, tmp2, tmp3;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + int32_t dst_stride2 = dst_stride << 1;
> + int32_t dst_stride3 = dst_stride2 + dst_stride;
> + int32_t dst_stride4 = dst_stride2 << 1;
> + uint8_t *_src = (uint8_t*)src - src_stride3;
> +
> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
> filter, 6,
> + filter0, filter1, filter2, filter3);
> + for (;cnt--;) {
> + uint32_t loop_cnt = height >> 2;
> + uint8_t *dst_reg = dst;
> +
> + src_tmp = _src;
> + src0 = __lsx_vld(src_tmp, 0);
> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
> src_stride2,
> + src1, src2);
> + src3 = __lsx_vldx(src_tmp, src_stride3);
> + src_tmp += src_stride4;
> + src4 = __lsx_vld(src_tmp, 0);
> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
> src_stride2,
> + src5, src6);
> + src_tmp += src_stride3;
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
> src3, 128,
> + src0, src1, src2, src3);
> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
> + src6 = __lsx_vxori_b(src6, 128);
> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4,
> src2, src1,
> + reg0, reg1, reg2, reg3);
> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
> + DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4,
> src2, src1,
> + reg6, reg7, reg8, reg9);
> + DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, reg10, reg11);
> +
> + for (;loop_cnt--;) {
> + src7 = __lsx_vld(src_tmp, 0);
> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
> src_stride2,
> + src8, src9);
> + src10 = __lsx_vldx(src_tmp, src_stride3);
> + src_tmp += src_stride4;
> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
> src10,
> + 128, src7, src8, src9, src10);
> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9,
> src8,
> + src10, src9, src0, src1, src2, src3);
> + DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9,
> src8,
> + src10, src9, src4, src5, src7, src8);
> + tmp0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, src0, filter0,
> + filter1, filter2, filter3);
> + tmp1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, src1, filter0,
> + filter1, filter2, filter3);
> + tmp2 = FILT_8TAP_DPADD_S_H(reg6, reg7, reg8, src4, filter0,
> + filter1, filter2, filter3);
> + tmp3 = FILT_8TAP_DPADD_S_H(reg9, reg10, reg11, src5,
> filter0,
> + filter1, filter2, filter3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
> + tmp0, tmp1);
> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
> + tmp2 = __lsx_vld(dst_reg, 0);
> + tmp3 = __lsx_vldx(dst_reg, dst_stride);
> + DUP2_ARG2(__lsx_vavgr_bu, tmp0, tmp2, tmp1, tmp3, tmp0,
> tmp1);
> + __lsx_vst(tmp0, dst_reg, 0);
> + __lsx_vstx(tmp1, dst_reg, dst_stride);
> + tmp0 = FILT_8TAP_DPADD_S_H(reg1, reg2, src0, src2, filter0,
> + filter1, filter2, filter3);
> + tmp1 = FILT_8TAP_DPADD_S_H(reg4, reg5, src1, src3, filter0,
> + filter1, filter2, filter3);
> + tmp2 = FILT_8TAP_DPADD_S_H(reg7, reg8, src4, src7, filter0,
> + filter1, filter2, filter3);
> + tmp3 = FILT_8TAP_DPADD_S_H(reg10, reg11, src5, src8,
> filter0,
> + filter1, filter2, filter3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
> + tmp0, tmp1);
> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
> + tmp2 = __lsx_vldx(dst_reg, dst_stride2);
> + tmp3 = __lsx_vldx(dst_reg, dst_stride3);
> + DUP2_ARG2(__lsx_vavgr_bu, tmp0, tmp2, tmp1, tmp3, tmp0,
> tmp1);
> + __lsx_vstx(tmp0, dst_reg, dst_stride2);
> + __lsx_vstx(tmp1, dst_reg, dst_stride3);
> + dst_reg += dst_stride4;
> +
> + reg0 = reg2;
> + reg1 = src0;
> + reg2 = src2;
> + reg3 = reg5;
> + reg4 = src1;
> + reg5 = src3;
> + reg6 = reg8;
> + reg7 = src4;
> + reg8 = src7;
> + reg9 = reg11;
> + reg10 = src5;
> + reg11 = src8;
> + src6 = src10;
> + }
> + _src += 16;
> + dst += 16;
> + }
> +}
> +
> +static void common_vt_8t_and_aver_dst_16w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst, int32_t
> dst_stride,
> + const int8_t *filter,
> + int32_t height)
> +{
> + common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst,
> dst_stride,
> + filter, height, 16);
> +}
> +
> +static void common_vt_8t_and_aver_dst_32w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst, int32_t
> dst_stride,
> + const int8_t *filter,
> + int32_t height)
> +{
> + common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst,
> dst_stride,
> + filter, height, 32);
> +}
> +
> +static void common_vt_8t_and_aver_dst_64w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst, int32_t
> dst_stride,
> + const int8_t *filter,
> + int32_t height)
> +{
> + common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst,
> dst_stride,
> + filter, height, 64);
> +}
> +
> +static void common_hv_8ht_8vt_and_aver_dst_4w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst,
> + int32_t dst_stride,
> + const int8_t
> *filter_horiz,
> + const int8_t
> *filter_vert,
> + int32_t height)
> +{
> + uint32_t loop_cnt = height >> 2;
> + uint8_t *dst_tmp = dst;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
> src9, src10;
> + __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
> + __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
> + __m128i mask0, mask1, mask2, mask3;
> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
> + __m128i out0, out1;
> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + uint8_t* _src = (uint8_t*)src - 3 - src_stride3;
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
> + DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2,
> filter_horiz,
> + 4, filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2,
> filt_hz3);
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> +
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
> src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + src4 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
> src6);
> + _src += src_stride3;
> +
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
> 128,
> + src0, src1, src2, src3);
> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
> + src6 = __lsx_vxori_b(src6, 128);
> +
> + tmp0 = HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + tmp2 = HORIZ_8TAP_FILT(src2, src3, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + tmp4 = HORIZ_8TAP_FILT(src4, src5, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + tmp5 = HORIZ_8TAP_FILT(src5, src6, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + DUP2_ARG3(__lsx_vshuf_b, tmp2, tmp0, shuff, tmp4, tmp2, shuff,
> tmp1, tmp3);
> + DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2,
> filter_vert, 4,
> + filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
> + DUP2_ARG2(__lsx_vpackev_b, tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
> + tmp2 = __lsx_vpackev_b(tmp5, tmp4);
> +
> + for (;loop_cnt--;) {
> + src7 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
> src8, src9);
> + src10 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + src2 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + src3 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + src4 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + src5 = __lsx_vldrepl_w(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + DUP2_ARG2(__lsx_vilvl_w, src3, src2, src5, src4, src2, src3);
> + src2 = __lsx_vilvl_d(src3, src2);
> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
> src10, 128,
> + src7, src8, src9, src10);
> + tmp3 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + tmp4 = __lsx_vshuf_b(tmp3, tmp5, shuff);
> + tmp4 = __lsx_vpackev_b(tmp3, tmp4);
> + out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp4, filt_vt0,
> filt_vt1,
> + filt_vt2, filt_vt3);
> + src1 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3,
> + filt_hz0, filt_hz1, filt_hz2, filt_hz3);
> + src0 = __lsx_vshuf_b(src1, tmp3, shuff);
> + src0 = __lsx_vpackev_b(src1, src0);
> + out1 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp4, src0, filt_vt0,
> filt_vt1,
> + filt_vt2, filt_vt3);
> + out0 = __lsx_vssrarni_b_h(out1, out0, 7);
> + out0 = __lsx_vxori_b(out0, 128);
> + out0 = __lsx_vavgr_bu(out0, src2);
> + __lsx_vstelm_w(out0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 2);
> + dst += dst_stride;
> + __lsx_vstelm_w(out0, dst, 0, 3);
> + dst += dst_stride;
> +
> + tmp5 = src1;
> + tmp0 = tmp2;
> + tmp1 = tmp4;
> + tmp2 = src0;
> + }
> +}
> +
> +static void common_hv_8ht_8vt_and_aver_dst_8w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst,
> + int32_t dst_stride,
> + const int8_t
> *filter_horiz,
> + const int8_t
> *filter_vert,
> + int32_t height)
> +{
> + uint32_t loop_cnt = height >> 2;
> + uint8_t *dst_tmp = dst;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
> src9, src10;
> + __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
> + __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
> + __m128i mask0, mask1, mask2, mask3;
> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
> + __m128i out0, out1;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + uint8_t* _src = (uint8_t*)src - 3 - src_stride3;
> +
> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
> + DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2,
> filter_horiz,
> + 4, filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2,
> filt_hz3);
> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
> + mask3 = __lsx_vaddi_bu(mask0, 6);
> +
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
> src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> + src4 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
> src6);
> + _src += src_stride3;
> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
> 128,
> + src0, src1, src2, src3);
> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
> + src6 = __lsx_vxori_b(src6, 128);
> +
> + src0 = HORIZ_8TAP_FILT(src0, src0, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src1 = HORIZ_8TAP_FILT(src1, src1, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src2 = HORIZ_8TAP_FILT(src2, src2, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src3 = HORIZ_8TAP_FILT(src3, src3, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src4 = HORIZ_8TAP_FILT(src4, src4, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src5 = HORIZ_8TAP_FILT(src5, src5, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src6 = HORIZ_8TAP_FILT(src6, src6, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> +
> + DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2,
> filter_vert, 4,
> + filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
> + DUP4_ARG2(__lsx_vpackev_b, src1, src0, src3, src2, src5, src4,
> + src2, src1, tmp0, tmp1, tmp2, tmp4);
> + DUP2_ARG2(__lsx_vpackev_b, src4, src3, src6, src5, tmp5, tmp6);
> +
> + for (;loop_cnt--;) {
> + src7 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
> src8, src9);
> + src10 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> +
> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
> src10, 128,
> + src7, src8, src9, src10);
> + src7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + tmp3 = __lsx_vpackev_b(src7, src6);
> + out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp3, filt_vt0,
> filt_vt1,
> + filt_vt2, filt_vt3);
> + src8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src0 = __lsx_vpackev_b(src8, src7);
> + out1 = FILT_8TAP_DPADD_S_H(tmp4, tmp5, tmp6, src0, filt_vt0,
> filt_vt1,
> + filt_vt2, filt_vt3);
> + src9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3,
> filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src1 = __lsx_vpackev_b(src9, src8);
> + src3 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp3, src1, filt_vt0,
> filt_vt1,
> + filt_vt2, filt_vt3);
> + src10 = HORIZ_8TAP_FILT(src10, src10, mask0, mask1, mask2,
> mask3, filt_hz0,
> + filt_hz1, filt_hz2, filt_hz3);
> + src2 = __lsx_vpackev_b(src10, src9);
> + src4 = FILT_8TAP_DPADD_S_H(tmp5, tmp6, src0, src2, filt_vt0,
> filt_vt1,
> + filt_vt2, filt_vt3);
> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, src4, src3, 7,
> out0, out1);
> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
> + src5 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + src7 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + src8 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + src9 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + DUP2_ARG2(__lsx_vilvl_d, src7, src5, src9, src8, src5, src7);
> + DUP2_ARG2(__lsx_vavgr_bu, out0, src5, out1, src7, out0, out1);
> + __lsx_vstelm_d(out0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(out0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_d(out1, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(out1, dst, 0, 1);
> + dst += dst_stride;
> +
> + src6 = src10;
> + tmp0 = tmp2;
> + tmp1 = tmp3;
> + tmp2 = src1;
> + tmp4 = tmp6;
> + tmp5 = src0;
> + tmp6 = src2;
> + }
> +}
> +
> +static void common_hv_8ht_8vt_and_aver_dst_16w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst,
> + int32_t dst_stride,
> + const int8_t
> *filter_horiz,
> + const int8_t
> *filter_vert,
> + int32_t height)
> +{
> + int32_t multiple8_cnt;
> +
> + for (multiple8_cnt = 2; multiple8_cnt--;) {
> + common_hv_8ht_8vt_and_aver_dst_8w_lsx(src, src_stride, dst,
> dst_stride,
> + filter_horiz,
> filter_vert,
> + height);
> +
> + src += 8;
> + dst += 8;
> + }
> +}
> +
> +static void common_hv_8ht_8vt_and_aver_dst_32w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst,
> + int32_t dst_stride,
> + const int8_t
> *filter_horiz,
> + const int8_t
> *filter_vert,
> + int32_t height)
> +{
> + int32_t multiple8_cnt;
> +
> + for (multiple8_cnt = 4; multiple8_cnt--;) {
> + common_hv_8ht_8vt_and_aver_dst_8w_lsx(src, src_stride, dst,
> dst_stride,
> + filter_horiz,
> filter_vert,
> + height);
> +
> + src += 8;
> + dst += 8;
> + }
> +}
> +
> +static void common_hv_8ht_8vt_and_aver_dst_64w_lsx(const uint8_t *src,
> + int32_t src_stride,
> + uint8_t *dst,
> + int32_t dst_stride,
> + const int8_t
> *filter_horiz,
> + const int8_t
> *filter_vert,
> + int32_t height)
> +{
> + int32_t multiple8_cnt;
> +
> + for (multiple8_cnt = 8; multiple8_cnt--;) {
> + common_hv_8ht_8vt_and_aver_dst_8w_lsx(src, src_stride, dst,
> dst_stride,
> + filter_horiz,
> filter_vert,
> + height);
> +
> + src += 8;
> + dst += 8;
> + }
> +}
> +
> +static void avg_width8_lsx(const uint8_t *src, int32_t src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + int32_t height)
> +{
> + int32_t cnt = height >> 2;
> + uint8_t *dst_tmp = dst;
> + __m128i src0, src1, dst0, dst1;
> + __m128i tmp0, tmp1, tmp2, tmp3;
> +
> + for (;cnt--;) {
> + tmp0 = __lsx_vldrepl_d(src, 0);
> + src += src_stride;
> + tmp1 = __lsx_vldrepl_d(src, 0);
> + src += src_stride;
> + tmp2 = __lsx_vldrepl_d(src, 0);
> + src += src_stride;
> + tmp3 = __lsx_vldrepl_d(src, 0);
> + src += src_stride;
> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, src0, src1);
> + tmp0 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + tmp1 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + tmp2 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + tmp3 = __lsx_vldrepl_d(dst_tmp, 0);
> + dst_tmp += dst_stride;
> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, dst0, dst1);
> + DUP2_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1, dst0, dst1);
> + __lsx_vstelm_d(dst0, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(dst0, dst, 0, 1);
> + dst += dst_stride;
> + __lsx_vstelm_d(dst1, dst, 0, 0);
> + dst += dst_stride;
> + __lsx_vstelm_d(dst1, dst, 0, 1);
> + dst += dst_stride;
> + }
> +}
> +
> +static void avg_width16_lsx(const uint8_t *src, int32_t src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + int32_t height)
> +{
> + int32_t cnt = height >> 2;
> + __m128i src0, src1, src2, src3;
> + __m128i dst0, dst1, dst2, dst3;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + int32_t dst_stride2 = dst_stride << 1;
> + int32_t dst_stride3 = dst_stride2 + dst_stride;
> + int32_t dst_stride4 = dst_stride2 << 1;
> + uint8_t* _src = (uint8_t*)src;
> +
> + for (;cnt--;) {
> + src0 = __lsx_vld(_src, 0);
> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
> src1, src2);
> + src3 = __lsx_vldx(_src, src_stride3);
> + _src += src_stride4;
> +
> + dst0 = __lsx_vld(dst, 0);
> + DUP2_ARG2(__lsx_vldx, dst, dst_stride, dst, dst_stride2,
> + dst1, dst2);
> + dst3 = __lsx_vldx(dst, dst_stride3);
> + DUP4_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1,
> + src2, dst2, src3, dst3, dst0, dst1, dst2, dst3);
> + __lsx_vst(dst0, dst, 0);
> + __lsx_vstx(dst1, dst, dst_stride);
> + __lsx_vstx(dst2, dst, dst_stride2);
> + __lsx_vstx(dst3, dst, dst_stride3);
> + dst += dst_stride4;
> + }
> +}
> +
> +static void avg_width32_lsx(const uint8_t *src, int32_t src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + int32_t height)
> +{
> + int32_t cnt = height >> 2;
> + uint8_t *src_tmp1 = (uint8_t*)src;
> + uint8_t *src_tmp2 = src_tmp1 + 16;
> + uint8_t *dst_tmp1, *dst_tmp2;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
> + __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
> + int32_t src_stride2 = src_stride << 1;
> + int32_t src_stride3 = src_stride + src_stride2;
> + int32_t src_stride4 = src_stride2 << 1;
> + int32_t dst_stride2 = dst_stride << 1;
> + int32_t dst_stride3 = dst_stride2 + dst_stride;
> + int32_t dst_stride4 = dst_stride2 << 1;
> +
> + dst_tmp1 = dst;
> + dst_tmp2 = dst + 16;
> + for (;cnt--;) {
> + src0 = __lsx_vld(src_tmp1, 0);
> + DUP2_ARG2(__lsx_vldx, src_tmp1, src_stride, src_tmp1,
> src_stride2,
> + src2, src4);
> + src6 = __lsx_vldx(src_tmp1, src_stride3);
> + src_tmp1 += src_stride4;
> +
> + src1 = __lsx_vld(src_tmp2, 0);
> + DUP2_ARG2(__lsx_vldx, src_tmp2, src_stride, src_tmp2,
> src_stride2,
> + src3, src5);
> + src7 = __lsx_vldx(src_tmp2, src_stride3);
> + src_tmp2 += src_stride4;
> +
> + dst0 = __lsx_vld(dst_tmp1, 0);
> + DUP2_ARG2(__lsx_vldx, dst_tmp1, dst_stride, dst_tmp1,
> dst_stride2,
> + dst2, dst4);
> + dst6 = __lsx_vldx(dst_tmp1, dst_stride3);
> + dst1 = __lsx_vld(dst_tmp2, 0);
> + DUP2_ARG2(__lsx_vldx, dst_tmp2, dst_stride, dst_tmp2,
> dst_stride2,
> + dst3, dst5);
> + dst7 = __lsx_vldx(dst_tmp2, dst_stride3);
> +
> + DUP4_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1,
> + src2, dst2, src3, dst3, dst0, dst1, dst2, dst3);
> + DUP4_ARG2(__lsx_vavgr_bu, src4, dst4, src5, dst5,
> + src6, dst6, src7, dst7, dst4, dst5, dst6, dst7);
> + __lsx_vst(dst0, dst_tmp1, 0);
> + __lsx_vstx(dst2, dst_tmp1, dst_stride);
> + __lsx_vstx(dst4, dst_tmp1, dst_stride2);
> + __lsx_vstx(dst6, dst_tmp1, dst_stride3);
> + dst_tmp1 += dst_stride4;
> + __lsx_vst(dst1, dst_tmp2, 0);
> + __lsx_vstx(dst3, dst_tmp2, dst_stride);
> + __lsx_vstx(dst5, dst_tmp2, dst_stride2);
> + __lsx_vstx(dst7, dst_tmp2, dst_stride3);
> + dst_tmp2 += dst_stride4;
> + }
> +}
> +
> +static void avg_width64_lsx(const uint8_t *src, int32_t src_stride,
> + uint8_t *dst, int32_t dst_stride,
> + int32_t height)
> +{
> + int32_t cnt = height >> 2;
> + uint8_t *dst_tmp = dst;
> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
> + __m128i src8, src9, src10, src11, src12, src13, src14, src15;
> + __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
> + __m128i dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15;
> +
> + for (;cnt--;) {
> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
> + src0, src1, src2, src3);
> + src += src_stride;
> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
> + src4, src5, src6, src7);
> + src += src_stride;
> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
> + src8, src9, src10, src11);
> + src += src_stride;
> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
> + src12, src13, src14, src15);
> + src += src_stride;
> + DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32,
> dst_tmp, 48,
> + dst0, dst1, dst2, dst3);
> + dst_tmp += dst_stride;
> + DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32,
> dst_tmp, 48,
> + dst4, dst5, dst6, dst7);
> + dst_tmp += dst_stride;
> + DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32,
> dst_tmp, 48,
> + dst8, dst9, dst10, dst11);
> + dst_tmp += dst_stride;
> + DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32,
> dst_tmp, 48,
> + dst12, dst13, dst14, dst15);
> + dst_tmp += dst_stride;
> + DUP4_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1,
> + src2, dst2, src3, dst3, dst0, dst1, dst2, dst3);
> + DUP4_ARG2(__lsx_vavgr_bu, src4, dst4, src5, dst5,
> + src6, dst6, src7, dst7, dst4, dst5, dst6, dst7);
> + DUP4_ARG2(__lsx_vavgr_bu, src8, dst8, src9, dst9, src10,
> + dst10, src11, dst11, dst8, dst9, dst10, dst11);
> + DUP4_ARG2(__lsx_vavgr_bu, src12, dst12, src13, dst13, src14,
> + dst14, src15, dst15, dst12, dst13, dst14, dst15);
> + __lsx_vst(dst0, dst, 0);
> + __lsx_vst(dst1, dst, 16);
> + __lsx_vst(dst2, dst, 32);
> + __lsx_vst(dst3, dst, 48);
> + dst += dst_stride;
> + __lsx_vst(dst4, dst, 0);
> + __lsx_vst(dst5, dst, 16);
> + __lsx_vst(dst6, dst, 32);
> + __lsx_vst(dst7, dst, 48);
> + dst += dst_stride;
> + __lsx_vst(dst8, dst, 0);
> + __lsx_vst(dst9, dst, 16);
> + __lsx_vst(dst10, dst, 32);
> + __lsx_vst(dst11, dst, 48);
> + dst += dst_stride;
> + __lsx_vst(dst12, dst, 0);
> + __lsx_vst(dst13, dst, 16);
> + __lsx_vst(dst14, dst, 32);
> + __lsx_vst(dst15, dst, 48);
> + dst += dst_stride;
> + }
> +}
> +
> +static const int8_t vp9_subpel_filters_lsx[3][15][8] = {
> + [FILTER_8TAP_REGULAR] = {
> + {0, 1, -5, 126, 8, -3, 1, 0},
> + {-1, 3, -10, 122, 18, -6, 2, 0},
> + {-1, 4, -13, 118, 27, -9, 3, -1},
> + {-1, 4, -16, 112, 37, -11, 4, -1},
> + {-1, 5, -18, 105, 48, -14, 4, -1},
> + {-1, 5, -19, 97, 58, -16, 5, -1},
> + {-1, 6, -19, 88, 68, -18, 5, -1},
> + {-1, 6, -19, 78, 78, -19, 6, -1},
> + {-1, 5, -18, 68, 88, -19, 6, -1},
> + {-1, 5, -16, 58, 97, -19, 5, -1},
> + {-1, 4, -14, 48, 105, -18, 5, -1},
> + {-1, 4, -11, 37, 112, -16, 4, -1},
> + {-1, 3, -9, 27, 118, -13, 4, -1},
> + {0, 2, -6, 18, 122, -10, 3, -1},
> + {0, 1, -3, 8, 126, -5, 1, 0},
> + }, [FILTER_8TAP_SHARP] = {
> + {-1, 3, -7, 127, 8, -3, 1, 0},
> + {-2, 5, -13, 125, 17, -6, 3, -1},
> + {-3, 7, -17, 121, 27, -10, 5, -2},
> + {-4, 9, -20, 115, 37, -13, 6, -2},
> + {-4, 10, -23, 108, 48, -16, 8, -3},
> + {-4, 10, -24, 100, 59, -19, 9, -3},
> + {-4, 11, -24, 90, 70, -21, 10, -4},
> + {-4, 11, -23, 80, 80, -23, 11, -4},
> + {-4, 10, -21, 70, 90, -24, 11, -4},
> + {-3, 9, -19, 59, 100, -24, 10, -4},
> + {-3, 8, -16, 48, 108, -23, 10, -4},
> + {-2, 6, -13, 37, 115, -20, 9, -4},
> + {-2, 5, -10, 27, 121, -17, 7, -3},
> + {-1, 3, -6, 17, 125, -13, 5, -2},
> + {0, 1, -3, 8, 127, -7, 3, -1},
> + }, [FILTER_8TAP_SMOOTH] = {
> + {-3, -1, 32, 64, 38, 1, -3, 0},
> + {-2, -2, 29, 63, 41, 2, -3, 0},
> + {-2, -2, 26, 63, 43, 4, -4, 0},
> + {-2, -3, 24, 62, 46, 5, -4, 0},
> + {-2, -3, 21, 60, 49, 7, -4, 0},
> + {-1, -4, 18, 59, 51, 9, -4, 0},
> + {-1, -4, 16, 57, 53, 12, -4, -1},
> + {-1, -4, 14, 55, 55, 14, -4, -1},
> + {-1, -4, 12, 53, 57, 16, -4, -1},
> + {0, -4, 9, 51, 59, 18, -4, -1},
> + {0, -4, 7, 49, 60, 21, -3, -2},
> + {0, -4, 5, 46, 62, 24, -3, -2},
> + {0, -4, 4, 43, 63, 26, -2, -2},
> + {0, -3, 2, 41, 63, 29, -2, -2},
> + {0, -3, 1, 38, 64, 32, -1, -3},
> + }
> +};
> +
> +#define VP9_8TAP_LOONGARCH_LSX_FUNC(SIZE, type, type_idx)
> \
> +void ff_put_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t
> dststride, \
> + const uint8_t *src,
> \
> + ptrdiff_t srcstride,
> \
> + int h, int mx, int my)
> \
> +{
> \
> + const int8_t *filter = vp9_subpel_filters_lsx[type_idx][mx-1];
> \
> +
> \
> + common_hz_8t_##SIZE##w_lsx(src, srcstride, dst, dststride, filter,
> h); \
> +}
> \
> +
> \
> +void ff_put_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t
> dststride, \
> + const uint8_t *src,
> \
> + ptrdiff_t srcstride,
> \
> + int h, int mx, int my)
> \
> +{
> \
> + const int8_t *filter = vp9_subpel_filters_lsx[type_idx][my-1];
> \
> +
> \
> + common_vt_8t_##SIZE##w_lsx(src, srcstride, dst, dststride, filter,
> h); \
> +}
> \
> +
> \
> +void ff_put_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t
> dststride, \
> + const uint8_t *src,
> \
> + ptrdiff_t srcstride,
> \
> + int h, int mx, int my)
> \
> +{
> \
> + const int8_t *hfilter = vp9_subpel_filters_lsx[type_idx][mx-1];
> \
> + const int8_t *vfilter = vp9_subpel_filters_lsx[type_idx][my-1];
> \
> +
> \
> + common_hv_8ht_8vt_##SIZE##w_lsx(src, srcstride, dst, dststride,
> hfilter, \
> + vfilter, h);
> \
> +}
> \
> +
> \
> +void ff_avg_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t
> dststride, \
> + const uint8_t *src,
> \
> + ptrdiff_t srcstride,
> \
> + int h, int mx, int my)
> \
> +{
> \
> + const int8_t *filter = vp9_subpel_filters_lsx[type_idx][mx-1];
> \
> +
> \
> + common_hz_8t_and_aver_dst_##SIZE##w_lsx(src, srcstride, dst,
> \
> + dststride, filter, h);
> \
> +}
> \
> +
> \
> +void ff_avg_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t
> dststride, \
> + const uint8_t *src,
> \
> + ptrdiff_t srcstride,
> \
> + int h, int mx, int my)
> \
> +{
> \
> + const int8_t *filter = vp9_subpel_filters_lsx[type_idx][my-1];
> \
> +
> \
> + common_vt_8t_and_aver_dst_##SIZE##w_lsx(src, srcstride, dst,
> dststride, \
> + filter, h);
> \
> +}
> \
> +
> \
> +void ff_avg_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t
> dststride, \
> + const uint8_t *src,
> \
> + ptrdiff_t srcstride,
> \
> + int h, int mx, int my)
> \
> +{
> \
> + const int8_t *hfilter = vp9_subpel_filters_lsx[type_idx][mx-1];
> \
> + const int8_t *vfilter = vp9_subpel_filters_lsx[type_idx][my-1];
> \
> +
> \
> + common_hv_8ht_8vt_and_aver_dst_##SIZE##w_lsx(src, srcstride, dst,
> \
> + dststride, hfilter,
> \
> + vfilter, h);
> \
> +}
> +
> +#define VP9_COPY_LOONGARCH_LSX_FUNC(SIZE) \
> +void ff_copy##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
> + const uint8_t *src, ptrdiff_t srcstride, \
> + int h, int mx, int my) \
> +{ \
> + \
> + copy_width##SIZE##_lsx(src, srcstride, dst, dststride, h); \
> +} \
> +void ff_avg##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
> + const uint8_t *src, ptrdiff_t srcstride, \
> + int h, int mx, int my) \
> +{ \
> + \
> + avg_width##SIZE##_lsx(src, srcstride, dst, dststride, h); \
> +}
> +
> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, regular, FILTER_8TAP_REGULAR);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, regular, FILTER_8TAP_REGULAR);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, regular, FILTER_8TAP_REGULAR);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, regular, FILTER_8TAP_REGULAR);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, regular, FILTER_8TAP_REGULAR);
> +
> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, sharp, FILTER_8TAP_SHARP);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, sharp, FILTER_8TAP_SHARP);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, sharp, FILTER_8TAP_SHARP);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, sharp, FILTER_8TAP_SHARP);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, sharp, FILTER_8TAP_SHARP);
> +
> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, smooth, FILTER_8TAP_SMOOTH);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, smooth, FILTER_8TAP_SMOOTH);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, smooth, FILTER_8TAP_SMOOTH);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, smooth, FILTER_8TAP_SMOOTH);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, smooth, FILTER_8TAP_SMOOTH);
> +
> +VP9_COPY_LOONGARCH_LSX_FUNC(64);
> +VP9_COPY_LOONGARCH_LSX_FUNC(32);
> +VP9_COPY_LOONGARCH_LSX_FUNC(16);
> +VP9_COPY_LOONGARCH_LSX_FUNC(8);
> +
> +#undef VP9_8TAP_LOONGARCH_LSX_FUNC
> +#undef VP9_COPY_LOONGARCH_LSX_FUNC
> diff --git a/libavcodec/loongarch/vp9dsp_init_loongarch.c
> b/libavcodec/loongarch/vp9dsp_init_loongarch.c
> new file mode 100644
> index 0000000000..c1e01b4558
> --- /dev/null
> +++ b/libavcodec/loongarch/vp9dsp_init_loongarch.c
> @@ -0,0 +1,97 @@
> +/*
> + * Copyright (c) 2021 Loongson Technology Corporation Limited
> + * Contributed by Hao Chen <chenhao@loongson.cn>
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
> 02110-1301 USA
> + */
> +
> +#include "libavutil/loongarch/cpu.h"
> +#include "libavutil/attributes.h"
> +#include "libavcodec/vp9dsp.h"
> +#include "vp9dsp_loongarch.h"
> +
> +#define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type) \
> + dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = \
> + ff_##type##_8tap_smooth_##sz##dir##_lsx; \
> + dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = \
> + ff_##type##_8tap_regular_##sz##dir##_lsx; \
> + dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = \
> + ff_##type##_8tap_sharp_##sz##dir##_lsx;
> +
> +#define init_subpel2(idx, idxh, idxv, dir, type) \
> + init_subpel1(0, idx, idxh, idxv, 64, dir, type); \
> + init_subpel1(1, idx, idxh, idxv, 32, dir, type); \
> + init_subpel1(2, idx, idxh, idxv, 16, dir, type); \
> + init_subpel1(3, idx, idxh, idxv, 8, dir, type); \
> + init_subpel1(4, idx, idxh, idxv, 4, dir, type);
> +
> +#define init_subpel3(idx, type) \
> + init_subpel2(idx, 1, 0, h, type); \
> + init_subpel2(idx, 0, 1, v, type); \
> + init_subpel2(idx, 1, 1, hv, type);
> +
> +#define init_fpel(idx1, idx2, sz, type)
> \
> + dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] =
> ff_##type##sz##_lsx; \
> + dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] =
> ff_##type##sz##_lsx; \
> + dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] =
> ff_##type##sz##_lsx; \
> + dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] =
> ff_##type##sz##_lsx;
> +
> +#define init_copy(idx, sz) \
> + init_fpel(idx, 0, sz, copy); \
> + init_fpel(idx, 1, sz, avg);
> +
> +#define init_intra_pred1_lsx(tx, sz) \
> + dsp->intra_pred[tx][VERT_PRED] = ff_vert_##sz##_lsx; \
> + dsp->intra_pred[tx][HOR_PRED] = ff_hor_##sz##_lsx; \
> + dsp->intra_pred[tx][DC_PRED] = ff_dc_##sz##_lsx; \
> + dsp->intra_pred[tx][LEFT_DC_PRED] = ff_dc_left_##sz##_lsx; \
> + dsp->intra_pred[tx][TOP_DC_PRED] = ff_dc_top_##sz##_lsx; \
> + dsp->intra_pred[tx][DC_128_PRED] = ff_dc_128_##sz##_lsx; \
> + dsp->intra_pred[tx][DC_127_PRED] = ff_dc_127_##sz##_lsx; \
> + dsp->intra_pred[tx][DC_129_PRED] = ff_dc_129_##sz##_lsx; \
> + dsp->intra_pred[tx][TM_VP8_PRED] = ff_tm_##sz##_lsx; \
> +
> +#define init_intra_pred2_lsx(tx, sz) \
> + dsp->intra_pred[tx][DC_PRED] = ff_dc_##sz##_lsx; \
> + dsp->intra_pred[tx][LEFT_DC_PRED] = ff_dc_left_##sz##_lsx; \
> + dsp->intra_pred[tx][TOP_DC_PRED] = ff_dc_top_##sz##_lsx; \
> + dsp->intra_pred[tx][TM_VP8_PRED] = ff_tm_##sz##_lsx; \
> +
> +av_cold void ff_vp9dsp_init_loongarch(VP9DSPContext *dsp, int bpp)
> +{
> + int cpu_flags = av_get_cpu_flags();
> + if (have_lsx(cpu_flags))
> + if (bpp == 8) {
> + init_subpel3(0, put);
> + init_subpel3(1, avg);
> + init_copy(0, 64);
> + init_copy(1, 32);
> + init_copy(2, 16);
> + init_copy(3, 8);
> + init_intra_pred1_lsx(TX_16X16, 16x16);
> + init_intra_pred1_lsx(TX_32X32, 32x32);
> + init_intra_pred2_lsx(TX_4X4, 4x4);
> + init_intra_pred2_lsx(TX_8X8, 8x8);
> + }
> +}
> +#undef init_subpel1
> +#undef init_subpel2
> +#undef init_subpel3
> +#undef init_copy
> +#undef init_fpel
> +#undef init_intra_pred1_lsx
> +#undef init_intra_pred2_lsx
> diff --git a/libavcodec/loongarch/vp9dsp_loongarch.h
> b/libavcodec/loongarch/vp9dsp_loongarch.h
> new file mode 100644
> index 0000000000..b469326fdc
> --- /dev/null
> +++ b/libavcodec/loongarch/vp9dsp_loongarch.h
> @@ -0,0 +1,144 @@
> +/*
> + * Copyright (c) 2021 Loongson Technology Corporation Limited
> + * Contributed by Hao Chen <chenhao@loongson.cn>
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
> 02110-1301 USA
> + */
> +
> +#ifndef AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H
> +#define AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H
> +
> +#define VP9_8TAP_LOONGARCH_LSX_FUNC(SIZE, type, type_idx)
> \
> +void ff_put_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t
> dststride, \
> + const uint8_t *src,
> \
> + ptrdiff_t srcstride,
> \
> + int h, int mx, int my);
> \
> +
> \
> +void ff_put_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t
> dststride, \
> + const uint8_t *src,
> \
> + ptrdiff_t srcstride,
> \
> + int h, int mx, int my);
> \
> +
> \
> +void ff_put_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t
> dststride, \
> + const uint8_t *src,
> \
> + ptrdiff_t srcstride,
> \
> + int h, int mx, int my);
> \
> +
> \
> +void ff_avg_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t
> dststride, \
> + const uint8_t *src,
> \
> + ptrdiff_t srcstride,
> \
> + int h, int mx, int my);
> \
> +
> \
> +void ff_avg_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t
> dststride, \
> + const uint8_t *src,
> \
> + ptrdiff_t srcstride,
> \
> + int h, int mx, int my);
> \
> +
> \
> +void ff_avg_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t
> dststride, \
> + const uint8_t *src,
> \
> + ptrdiff_t srcstride,
> \
> + int h, int mx, int my);
> +
> +#define VP9_COPY_LOONGARCH_LSX_FUNC(SIZE) \
> +void ff_copy##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
> + const uint8_t *src, ptrdiff_t srcstride, \
> + int h, int mx, int my); \
> + \
> +void ff_avg##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
> + const uint8_t *src, ptrdiff_t srcstride, \
> + int h, int mx, int my);
> +
> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, regular, FILTER_8TAP_REGULAR);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, regular, FILTER_8TAP_REGULAR);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, regular, FILTER_8TAP_REGULAR);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, regular, FILTER_8TAP_REGULAR);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, regular, FILTER_8TAP_REGULAR);
> +
> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, sharp, FILTER_8TAP_SHARP);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, sharp, FILTER_8TAP_SHARP);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, sharp, FILTER_8TAP_SHARP);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, sharp, FILTER_8TAP_SHARP);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, sharp, FILTER_8TAP_SHARP);
> +
> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, smooth, FILTER_8TAP_SMOOTH);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, smooth, FILTER_8TAP_SMOOTH);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, smooth, FILTER_8TAP_SMOOTH);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, smooth, FILTER_8TAP_SMOOTH);
> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, smooth, FILTER_8TAP_SMOOTH);
> +
> +VP9_COPY_LOONGARCH_LSX_FUNC(64);
> +VP9_COPY_LOONGARCH_LSX_FUNC(32);
> +VP9_COPY_LOONGARCH_LSX_FUNC(16);
> +VP9_COPY_LOONGARCH_LSX_FUNC(8);
> +
> +#undef VP9_8TAP_LOONGARCH_LSX_FUNC
> +#undef VP9_COPY_LOONGARCH_LSX_FUNC
> +
> +void ff_vert_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> + const uint8_t *top);
> +void ff_vert_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> + const uint8_t *top);
> +void ff_hor_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> + const uint8_t *top);
> +void ff_hor_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> + const uint8_t *top);
> +void ff_dc_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
> + const uint8_t *top);
> +void ff_dc_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
> + const uint8_t *top);
> +void ff_dc_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> + const uint8_t *top);
> +void ff_dc_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> + const uint8_t *top);
> +void ff_dc_left_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> + const uint8_t *top);
> +void ff_dc_left_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> + const uint8_t *top);
> +void ff_dc_left_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
> + const uint8_t *left, const uint8_t *top);
> +void ff_dc_left_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
> + const uint8_t *left, const uint8_t *top);
> +void ff_dc_top_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> + const uint8_t *top);
> +void ff_dc_top_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> + const uint8_t *top);
> +void ff_dc_top_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
> + const uint8_t *left, const uint8_t *top);
> +void ff_dc_top_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
> + const uint8_t *left, const uint8_t *top);
> +void ff_dc_128_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
> + const uint8_t *left, const uint8_t *top);
> +void ff_dc_128_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
> + const uint8_t *left, const uint8_t *top);
> +void ff_dc_127_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
> + const uint8_t *left, const uint8_t *top);
> +void ff_dc_127_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
> + const uint8_t *left, const uint8_t *top);
> +void ff_dc_129_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
> + const uint8_t *left, const uint8_t *top);
> +void ff_dc_129_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
> + const uint8_t *left, const uint8_t *top);
> +void ff_tm_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
> + const uint8_t *top);
> +void ff_tm_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
> + const uint8_t *top);
> +void ff_tm_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> + const uint8_t *top);
> +void ff_tm_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
> *left,
> + const uint8_t *top);
> +
> +#endif /* AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H */
> diff --git a/libavcodec/vp9dsp.c b/libavcodec/vp9dsp.c
> index 41b8ad1ad1..82bfe394d1 100644
> --- a/libavcodec/vp9dsp.c
> +++ b/libavcodec/vp9dsp.c
> @@ -98,4 +98,5 @@ av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int
> bpp, int bitexact)
> if (ARCH_ARM) ff_vp9dsp_init_arm(dsp, bpp);
> if (ARCH_X86) ff_vp9dsp_init_x86(dsp, bpp, bitexact);
> if (ARCH_MIPS) ff_vp9dsp_init_mips(dsp, bpp);
> + if (ARCH_LOONGARCH) ff_vp9dsp_init_loongarch(dsp, bpp);
> }
> diff --git a/libavcodec/vp9dsp.h b/libavcodec/vp9dsp.h
> index e2256316a8..700dd72de8 100644
> --- a/libavcodec/vp9dsp.h
> +++ b/libavcodec/vp9dsp.h
> @@ -132,5 +132,6 @@ void ff_vp9dsp_init_aarch64(VP9DSPContext *dsp, int
> bpp);
> void ff_vp9dsp_init_arm(VP9DSPContext *dsp, int bpp);
> void ff_vp9dsp_init_x86(VP9DSPContext *dsp, int bpp, int bitexact);
> void ff_vp9dsp_init_mips(VP9DSPContext *dsp, int bpp);
> +void ff_vp9dsp_init_loongarch(VP9DSPContext *dsp, int bpp);
>
> #endif /* AVCODEC_VP9DSP_H */
> --
> 2.20.1
>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
--
Jean-Baptiste Kempf - President
+33 672 704 734
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [FFmpeg-devel] [PATCH 2/4] avcodec: [loongarch] Optimize vp9_mc/intra with LSX.
2021-12-18 18:47 ` Jean-Baptiste Kempf
@ 2021-12-20 6:07 ` Hao Chen
2021-12-20 6:20 ` Shiyou Yin
1 sibling, 0 replies; 10+ messages in thread
From: Hao Chen @ 2021-12-20 6:07 UTC (permalink / raw)
To: FFmpeg development discussions and patches, Jean-Baptiste Kempf
Thanks for your questions. At present, we don't have ASM format code,
but we have plans to use ASM format code to replace the existing code.
It will take some time.
Best,
在 2021/12/19 上午2:47, Jean-Baptiste Kempf 写道:
> Sorry to ask, but don't you have an ASM format, instead of intrinsics?
>
> Best,
>
> On Sat, 18 Dec 2021, at 15:27, Hao Chen wrote:
>> ffmpeg -i ../10_vp9_1080p_30fps_3Mbps.webm -f rawvideo -y /dev/null -an
>> before:170fps
>> after :294fps
>> ---
>> libavcodec/loongarch/Makefile | 3 +
>> libavcodec/loongarch/vp9_intra_lsx.c | 653 +++++
>> libavcodec/loongarch/vp9_mc_lsx.c | 2480 ++++++++++++++++++
>> libavcodec/loongarch/vp9dsp_init_loongarch.c | 97 +
>> libavcodec/loongarch/vp9dsp_loongarch.h | 144 +
>> libavcodec/vp9dsp.c | 1 +
>> libavcodec/vp9dsp.h | 1 +
>> 7 files changed, 3379 insertions(+)
>> create mode 100644 libavcodec/loongarch/vp9_intra_lsx.c
>> create mode 100644 libavcodec/loongarch/vp9_mc_lsx.c
>> create mode 100644 libavcodec/loongarch/vp9dsp_init_loongarch.c
>> create mode 100644 libavcodec/loongarch/vp9dsp_loongarch.h
>>
>> diff --git a/libavcodec/loongarch/Makefile
>> b/libavcodec/loongarch/Makefile
>> index 4e1d827e19..6fcebe40a3 100644
>> --- a/libavcodec/loongarch/Makefile
>> +++ b/libavcodec/loongarch/Makefile
>> @@ -3,6 +3,7 @@ OBJS-$(CONFIG_H264QPEL) +=
>> loongarch/h264qpel_init_loongarch.o
>> OBJS-$(CONFIG_H264DSP) +=
>> loongarch/h264dsp_init_loongarch.o
>> OBJS-$(CONFIG_H264PRED) +=
>> loongarch/h264_intrapred_init_loongarch.o
>> OBJS-$(CONFIG_VP8_DECODER) +=
>> loongarch/vp8dsp_init_loongarch.o
>> +OBJS-$(CONFIG_VP9_DECODER) +=
>> loongarch/vp9dsp_init_loongarch.o
>> LASX-OBJS-$(CONFIG_H264CHROMA) += loongarch/h264chroma_lasx.o
>> LASX-OBJS-$(CONFIG_H264QPEL) += loongarch/h264qpel_lasx.o
>> LASX-OBJS-$(CONFIG_H264DSP) += loongarch/h264dsp_lasx.o \
>> @@ -11,3 +12,5 @@ LASX-OBJS-$(CONFIG_H264DSP) +=
>> loongarch/h264dsp_lasx.o \
>> LASX-OBJS-$(CONFIG_H264PRED) +=
>> loongarch/h264_intrapred_lasx.o
>> LSX-OBJS-$(CONFIG_VP8_DECODER) += loongarch/vp8_mc_lsx.o \
>> loongarch/vp8_lpf_lsx.o
>> +LSX-OBJS-$(CONFIG_VP9_DECODER) += loongarch/vp9_mc_lsx.o \
>> + loongarch/vp9_intra_lsx.o
>> diff --git a/libavcodec/loongarch/vp9_intra_lsx.c
>> b/libavcodec/loongarch/vp9_intra_lsx.c
>> new file mode 100644
>> index 0000000000..d3f32646f3
>> --- /dev/null
>> +++ b/libavcodec/loongarch/vp9_intra_lsx.c
>> @@ -0,0 +1,653 @@
>> +/*
>> + * Copyright (c) 2021 Loongson Technology Corporation Limited
>> + * Contributed by Hao Chen <chenhao@loongson.cn>
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with FFmpeg; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
>> 02110-1301 USA
>> + */
>> +
>> +#include "libavcodec/vp9dsp.h"
>> +#include "libavutil/loongarch/loongson_intrinsics.h"
>> +#include "vp9dsp_loongarch.h"
>> +
>> +#define LSX_ST_8(_dst0, _dst1, _dst2, _dst3, _dst4, \
>> + _dst5, _dst6, _dst7, _dst, _stride, \
>> + _stride2, _stride3, _stride4) \
>> +{ \
>> + __lsx_vst(_dst0, _dst, 0); \
>> + __lsx_vstx(_dst1, _dst, _stride); \
>> + __lsx_vstx(_dst2, _dst, _stride2); \
>> + __lsx_vstx(_dst3, _dst, _stride3); \
>> + _dst += _stride4; \
>> + __lsx_vst(_dst4, _dst, 0); \
>> + __lsx_vstx(_dst5, _dst, _stride); \
>> + __lsx_vstx(_dst6, _dst, _stride2); \
>> + __lsx_vstx(_dst7, _dst, _stride3); \
>> +}
>> +
>> +#define LSX_ST_8X16(_dst0, _dst1, _dst2, _dst3, _dst4, \
>> + _dst5, _dst6, _dst7, _dst, _stride) \
>> +{ \
>> + __lsx_vst(_dst0, _dst, 0); \
>> + __lsx_vst(_dst0, _dst, 16); \
>> + _dst += _stride; \
>> + __lsx_vst(_dst1, _dst, 0); \
>> + __lsx_vst(_dst1, _dst, 16); \
>> + _dst += _stride; \
>> + __lsx_vst(_dst2, _dst, 0); \
>> + __lsx_vst(_dst2, _dst, 16); \
>> + _dst += _stride; \
>> + __lsx_vst(_dst3, _dst, 0); \
>> + __lsx_vst(_dst3, _dst, 16); \
>> + _dst += _stride; \
>> + __lsx_vst(_dst4, _dst, 0); \
>> + __lsx_vst(_dst4, _dst, 16); \
>> + _dst += _stride; \
>> + __lsx_vst(_dst5, _dst, 0); \
>> + __lsx_vst(_dst5, _dst, 16); \
>> + _dst += _stride; \
>> + __lsx_vst(_dst6, _dst, 0); \
>> + __lsx_vst(_dst6, _dst, 16); \
>> + _dst += _stride; \
>> + __lsx_vst(_dst7, _dst, 0); \
>> + __lsx_vst(_dst7, _dst, 16); \
>> + _dst += _stride; \
>> +}
>> +
>> +void ff_vert_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, const
>> uint8_t *left,
>> + const uint8_t *src)
>> +{
>> + __m128i src0;
>> + ptrdiff_t stride2 = dst_stride << 1;
>> + ptrdiff_t stride3 = stride2 + dst_stride;
>> + ptrdiff_t stride4 = stride2 << 1;
>> + src0 = __lsx_vld(src, 0);
>> + LSX_ST_8(src0, src0, src0, src0, src0, src0, src0, src0, dst,
>> + dst_stride, stride2, stride3, stride4);
>> + dst += stride4;
>> + LSX_ST_8(src0, src0, src0, src0, src0, src0, src0, src0, dst,
>> + dst_stride, stride2, stride3, stride4);
>> +}
>> +
>> +void ff_vert_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, const
>> uint8_t *left,
>> + const uint8_t *src)
>> +{
>> + uint32_t row;
>> + __m128i src0, src1;
>> +
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src1);
>> + for (row = 32; row--;) {
>> + __lsx_vst(src0, dst, 0);
>> + __lsx_vst(src1, dst, 16);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +void ff_hor_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, const
>> uint8_t *src,
>> + const uint8_t *top)
>> +{
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i src8, src9, src10, src11, src12, src13, src14, src15;
>> + ptrdiff_t stride2 = dst_stride << 1;
>> + ptrdiff_t stride3 = stride2 + dst_stride;
>> + ptrdiff_t stride4 = stride2 << 1;
>> +
>> + src15 = __lsx_vldrepl_b(src, 0);
>> + src14 = __lsx_vldrepl_b(src, 1);
>> + src13 = __lsx_vldrepl_b(src, 2);
>> + src12 = __lsx_vldrepl_b(src, 3);
>> + src11 = __lsx_vldrepl_b(src, 4);
>> + src10 = __lsx_vldrepl_b(src, 5);
>> + src9 = __lsx_vldrepl_b(src, 6);
>> + src8 = __lsx_vldrepl_b(src, 7);
>> + src7 = __lsx_vldrepl_b(src, 8);
>> + src6 = __lsx_vldrepl_b(src, 9);
>> + src5 = __lsx_vldrepl_b(src, 10);
>> + src4 = __lsx_vldrepl_b(src, 11);
>> + src3 = __lsx_vldrepl_b(src, 12);
>> + src2 = __lsx_vldrepl_b(src, 13);
>> + src1 = __lsx_vldrepl_b(src, 14);
>> + src0 = __lsx_vldrepl_b(src, 15);
>> + LSX_ST_8(src0, src1, src2, src3, src4, src5, src6, src7, dst,
>> + dst_stride, stride2, stride3, stride4);
>> + dst += stride4;
>> + LSX_ST_8(src8, src9, src10, src11, src12, src13, src14, src15, dst,
>> + dst_stride, stride2, stride3, stride4);
>> +}
>> +
>> +void ff_hor_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, const
>> uint8_t *src,
>> + const uint8_t *top)
>> +{
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i src8, src9, src10, src11, src12, src13, src14, src15;
>> + __m128i src16, src17, src18, src19, src20, src21, src22, src23;
>> + __m128i src24, src25, src26, src27, src28, src29, src30, src31;
>> +
>> + src31 = __lsx_vldrepl_b(src, 0);
>> + src30 = __lsx_vldrepl_b(src, 1);
>> + src29 = __lsx_vldrepl_b(src, 2);
>> + src28 = __lsx_vldrepl_b(src, 3);
>> + src27 = __lsx_vldrepl_b(src, 4);
>> + src26 = __lsx_vldrepl_b(src, 5);
>> + src25 = __lsx_vldrepl_b(src, 6);
>> + src24 = __lsx_vldrepl_b(src, 7);
>> + src23 = __lsx_vldrepl_b(src, 8);
>> + src22 = __lsx_vldrepl_b(src, 9);
>> + src21 = __lsx_vldrepl_b(src, 10);
>> + src20 = __lsx_vldrepl_b(src, 11);
>> + src19 = __lsx_vldrepl_b(src, 12);
>> + src18 = __lsx_vldrepl_b(src, 13);
>> + src17 = __lsx_vldrepl_b(src, 14);
>> + src16 = __lsx_vldrepl_b(src, 15);
>> + src15 = __lsx_vldrepl_b(src, 16);
>> + src14 = __lsx_vldrepl_b(src, 17);
>> + src13 = __lsx_vldrepl_b(src, 18);
>> + src12 = __lsx_vldrepl_b(src, 19);
>> + src11 = __lsx_vldrepl_b(src, 20);
>> + src10 = __lsx_vldrepl_b(src, 21);
>> + src9 = __lsx_vldrepl_b(src, 22);
>> + src8 = __lsx_vldrepl_b(src, 23);
>> + src7 = __lsx_vldrepl_b(src, 24);
>> + src6 = __lsx_vldrepl_b(src, 25);
>> + src5 = __lsx_vldrepl_b(src, 26);
>> + src4 = __lsx_vldrepl_b(src, 27);
>> + src3 = __lsx_vldrepl_b(src, 28);
>> + src2 = __lsx_vldrepl_b(src, 29);
>> + src1 = __lsx_vldrepl_b(src, 30);
>> + src0 = __lsx_vldrepl_b(src, 31);
>> + LSX_ST_8X16(src0, src1, src2, src3, src4, src5, src6, src7,
>> + dst, dst_stride);
>> + LSX_ST_8X16(src8, src9, src10, src11, src12, src13, src14, src15,
>> + dst, dst_stride);
>> + LSX_ST_8X16(src16, src17, src18, src19, src20, src21, src22, src23,
>> + dst, dst_stride);
>> + LSX_ST_8X16(src24, src25, src26, src27, src28, src29, src30, src31,
>> + dst, dst_stride);
>> +}
>> +
>> +void ff_dc_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t
>> *src_left,
>> + const uint8_t *src_top)
>> +{
>> + __m128i tmp0, tmp1, dst0;
>> +
>> + tmp0 = __lsx_vldrepl_w(src_top, 0);
>> + tmp1 = __lsx_vldrepl_w(src_left, 0);
>> + dst0 = __lsx_vilvl_w(tmp1, tmp0);
>> + dst0 = __lsx_vhaddw_hu_bu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
>> + dst0 = __lsx_vsrari_w(dst0, 3);
>> + dst0 = __lsx_vshuf4i_b(dst0, 0);
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> +}
>> +
>> +#define INTRA_DC_TL_4X4(dir)
>> \
>> +void ff_dc_##dir##_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> \
>> + const uint8_t *left,
>> \
>> + const uint8_t *top)
>> \
>> +{
>> \
>> + __m128i tmp0, dst0;
>> \
>> +
>> \
>> + tmp0 = __lsx_vldrepl_w(dir, 0);
>> \
>> + dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0);
>> \
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> \
>> + dst0 = __lsx_vsrari_w(dst0, 2);
>> \
>> + dst0 = __lsx_vshuf4i_b(dst0, 0);
>> \
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> \
>> +}
>> +INTRA_DC_TL_4X4(top);
>> +INTRA_DC_TL_4X4(left);
>> +
>> +void ff_dc_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t
>> *src_left,
>> + const uint8_t *src_top)
>> +{
>> + __m128i tmp0, tmp1, dst0;
>> +
>> + tmp0 = __lsx_vldrepl_d(src_top, 0);
>> + tmp1 = __lsx_vldrepl_d(src_left, 0);
>> + dst0 = __lsx_vilvl_d(tmp1, tmp0);
>> + dst0 = __lsx_vhaddw_hu_bu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
>> + dst0 = __lsx_vsrari_w(dst0, 4);
>> + dst0 = __lsx_vreplvei_b(dst0, 0);
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> +}
>> +
>> +#define INTRA_DC_TL_8X8(dir)
>> \
>> +void ff_dc_##dir##_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> \
>> + const uint8_t *left,
>> \
>> + const uint8_t *top)
>> \
>> +{
>> \
>> + __m128i tmp0, dst0;
>> \
>> +
>> \
>> + tmp0 = __lsx_vldrepl_d(dir, 0);
>> \
>> + dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0);
>> \
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> \
>> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
>> \
>> + dst0 = __lsx_vsrari_w(dst0, 3);
>> \
>> + dst0 = __lsx_vreplvei_b(dst0, 0);
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> +}
>> +
>> +INTRA_DC_TL_8X8(top);
>> +INTRA_DC_TL_8X8(left);
>> +
>> +void ff_dc_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> + const uint8_t *src_left, const uint8_t *src_top)
>> +{
>> + __m128i tmp0, tmp1, dst0;
>> + ptrdiff_t stride2 = dst_stride << 1;
>> + ptrdiff_t stride3 = stride2 + dst_stride;
>> + ptrdiff_t stride4 = stride2 << 1;
>> +
>> + tmp0 = __lsx_vld(src_top, 0);
>> + tmp1 = __lsx_vld(src_left, 0);
>> + DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1);
>> + dst0 = __lsx_vadd_h(tmp0, tmp1);
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
>> + dst0 = __lsx_vsrari_w(dst0, 5);
>> + dst0 = __lsx_vreplvei_b(dst0, 0);
>> + LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
>> + dst_stride, stride2, stride3, stride4);
>> + dst += stride4;
>> + LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
>> + dst_stride, stride2, stride3, stride4);
>> +}
>> +
>> +#define INTRA_DC_TL_16X16(dir)
>> \
>> +void ff_dc_##dir##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> \
>> + const uint8_t *left,
>> \
>> + const uint8_t *top)
>> \
>> +{
>> \
>> + __m128i tmp0, dst0;
>> \
>> + ptrdiff_t stride2 = dst_stride << 1;
>> \
>> + ptrdiff_t stride3 = stride2 + dst_stride;
>> \
>> + ptrdiff_t stride4 = stride2 << 1;
>> \
>> +
>> \
>> + tmp0 = __lsx_vld(dir, 0);
>> \
>> + dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0);
>> \
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> \
>> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
>> \
>> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
>> \
>> + dst0 = __lsx_vsrari_w(dst0, 4);
>> \
>> + dst0 = __lsx_vreplvei_b(dst0, 0);
>> \
>> + LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
>> \
>> + dst_stride, stride2, stride3, stride4);
>> \
>> + dst += stride4;
>> \
>> + LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
>> \
>> + dst_stride, stride2, stride3, stride4);
>> \
>> +}
>> +
>> +INTRA_DC_TL_16X16(top);
>> +INTRA_DC_TL_16X16(left);
>> +
>> +void ff_dc_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> + const uint8_t *src_left, const uint8_t *src_top)
>> +{
>> + __m128i tmp0, tmp1, tmp2, tmp3, dst0;
>> +
>> + DUP2_ARG2(__lsx_vld, src_top, 0, src_top, 16, tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vld, src_left, 0, src_left, 16, tmp2, tmp3);
>> + DUP4_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp2, tmp2,
>> + tmp3, tmp3, tmp0, tmp1, tmp2, tmp3);
>> + DUP2_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp0, tmp1);
>> + dst0 = __lsx_vadd_h(tmp0, tmp1);
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
>> + dst0 = __lsx_vsrari_w(dst0, 6);
>> + dst0 = __lsx_vreplvei_b(dst0, 0);
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> + dst, dst_stride);
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> + dst, dst_stride);
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> + dst, dst_stride);
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> + dst, dst_stride);
>> +}
>> +
>> +#define INTRA_DC_TL_32X32(dir)
>> \
>> +void ff_dc_##dir##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> \
>> + const uint8_t *left,
>> \
>> + const uint8_t *top)
>> \
>> +{
>> \
>> + __m128i tmp0, tmp1, dst0;
>> \
>> +
>> \
>> + DUP2_ARG2(__lsx_vld, dir, 0, dir, 16, tmp0, tmp1);
>> \
>> + DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1);
>> \
>> + dst0 = __lsx_vadd_h(tmp0, tmp1);
>> \
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> \
>> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
>> \
>> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
>> \
>> + dst0 = __lsx_vsrari_w(dst0, 5);
>> \
>> + dst0 = __lsx_vreplvei_b(dst0, 0);
>> \
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> \
>> + dst, dst_stride);
>> \
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> \
>> + dst, dst_stride);
>> \
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> \
>> + dst, dst_stride);
>> \
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> \
>> + dst, dst_stride);
>> \
>> +}
>> +
>> +INTRA_DC_TL_32X32(top);
>> +INTRA_DC_TL_32X32(left);
>> +
>> +#define INTRA_PREDICT_VALDC_16X16_LSX(val)
>> \
>> +void ff_dc_##val##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> \
>> + const uint8_t *left, const uint8_t *top)
>> \
>> +{
>> \
>> + __m128i out = __lsx_vldi(val);
>> \
>> + ptrdiff_t stride2 = dst_stride << 1;
>> \
>> + ptrdiff_t stride3 = stride2 + dst_stride;
>> \
>> + ptrdiff_t stride4 = stride2 << 1;
>> \
>> +
>> \
>> + LSX_ST_8(out, out, out, out, out, out, out, out, dst,
>> \
>> + dst_stride, stride2, stride3, stride4);
>> \
>> + dst += stride4;
>> \
>> + LSX_ST_8(out, out, out, out, out, out, out, out, dst,
>> \
>> + dst_stride, stride2, stride3, stride4);
>> \
>> +}
>> +
>> +INTRA_PREDICT_VALDC_16X16_LSX(127);
>> +INTRA_PREDICT_VALDC_16X16_LSX(128);
>> +INTRA_PREDICT_VALDC_16X16_LSX(129);
>> +
>> +#define INTRA_PREDICT_VALDC_32X32_LSX(val)
>> \
>> +void ff_dc_##val##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> \
>> + const uint8_t *left, const uint8_t *top)
>> \
>> +{
>> \
>> + __m128i out = __lsx_vldi(val);
>> \
>> +
>> \
>> + LSX_ST_8X16(out, out, out, out, out, out, out, out, dst,
>> dst_stride);\
>> + LSX_ST_8X16(out, out, out, out, out, out, out, out, dst,
>> dst_stride);\
>> + LSX_ST_8X16(out, out, out, out, out, out, out, out, dst,
>> dst_stride);\
>> + LSX_ST_8X16(out, out, out, out, out, out, out, out, dst,
>> dst_stride);\
>> +}
>> +
>> +INTRA_PREDICT_VALDC_32X32_LSX(127);
>> +INTRA_PREDICT_VALDC_32X32_LSX(128);
>> +INTRA_PREDICT_VALDC_32X32_LSX(129);
>> +
>> +void ff_tm_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> + const uint8_t *src_left, const uint8_t *src_top_ptr)
>> +{
>> + uint8_t top_left = src_top_ptr[-1];
>> + __m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1;
>> + __m128i src0, src1, src2, src3;
>> + __m128i dst0, dst1, dst2, dst3;
>> +
>> + reg0 = __lsx_vreplgr2vr_h(top_left);
>> + reg1 = __lsx_vld(src_top_ptr, 0);
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2,
>> src_left,
>> + 3, tmp3, tmp2, tmp1, tmp0);
>> + DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3,
>> reg1,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vhaddw_hu_bu, src0, src0, src1, src1, src2, src2,
>> src3,
>> + src3, dst0, dst1, dst2, dst3);
>> + DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0,
>> dst3, reg0,
>> + dst0, dst1, dst2, dst3);
>> + DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7,
>> + dst0, dst1, dst2, dst3);
>> + DUP2_ARG2(__lsx_vpickev_b, dst1, dst0, dst3, dst2, dst0, dst1);
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst1, dst, 0, 2);
>> +}
>> +
>> +void ff_tm_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> + const uint8_t *src_left, const uint8_t *src_top_ptr)
>> +{
>> + uint8_t top_left = src_top_ptr[-1];
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i reg0, reg1;
>> +
>> + reg0 = __lsx_vreplgr2vr_h(top_left);
>> + reg1 = __lsx_vld(src_top_ptr, 0);
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2,
>> src_left,
>> + 3, tmp7, tmp6, tmp5, tmp4);
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6,
>> src_left,
>> + 7, tmp3, tmp2, tmp1, tmp0);
>> + DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3,
>> reg1,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vilvl_b, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7,
>> reg1,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vhaddw_hu_bu, src0, src0, src1, src1, src2, src2,
>> src3,
>> + src3, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vhaddw_hu_bu, src4, src4, src5, src5, src6, src6,
>> src7,
>> + src7, src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
>> src3, reg0,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
>> src7, reg0,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, src5, src4,
>> src7, src6,
>> + src0, src1, src2, src3);
>> + __lsx_vstelm_d(src0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src1, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src2, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src2, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src3, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src3, dst, 0, 1);
>> +}
>> +
>> +void ff_tm_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> + const uint8_t *src_left, const uint8_t
>> *src_top_ptr)
>> +{
>> + uint8_t top_left = src_top_ptr[-1];
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
>> + __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i reg0, reg1;
>> + ptrdiff_t stride2 = dst_stride << 1;
>> + ptrdiff_t stride3 = stride2 + dst_stride;
>> + ptrdiff_t stride4 = stride2 << 1;
>> +
>> + reg0 = __lsx_vreplgr2vr_h(top_left);
>> + reg1 = __lsx_vld(src_top_ptr, 0);
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2,
>> src_left,
>> + 3, tmp15, tmp14, tmp13, tmp12);
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6,
>> src_left,
>> + 7, tmp11, tmp10, tmp9, tmp8);
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 8, src_left, 9, src_left, 10,
>> + src_left, 11, tmp7, tmp6, tmp5, tmp4);
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 12, src_left, 13, src_left,
>> 14,
>> + src_left, 15, tmp3, tmp2, tmp1, tmp0);
>> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1,
>> tmp3,
>> + reg1, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1,
>> tmp3,
>> + reg1, src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
>> src3, reg0,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
>> src7, reg0,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
>> src7, src3,
>> + tmp0, tmp1, tmp2, tmp3);
>> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1,
>> tmp7,
>> + reg1, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1,
>> tmp7,
>> + reg1, src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
>> src3, reg0,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
>> src7, reg0,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
>> src7, src3,
>> + tmp4, tmp5, tmp6, tmp7);
>> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1,
>> tmp11,
>> + reg1, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1,
>> tmp11,
>> + reg1, src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
>> src3, reg0,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
>> src7, reg0,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
>> src7, src3,
>> + tmp8, tmp9, tmp10, tmp11);
>> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp12, reg1, tmp13, reg1, tmp14,
>> reg1,
>> + tmp15, reg1, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp12, reg1, tmp13, reg1, tmp14,
>> reg1,
>> + tmp15, reg1, src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
>> src3, reg0,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
>> src7, reg0,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
>> src7, src3,
>> + tmp12, tmp13, tmp14, tmp15);
>> + LSX_ST_8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, dst,
>> + dst_stride, stride2, stride3, stride4);
>> + dst += stride4;
>> + LSX_ST_8(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, dst,
>> + dst_stride, stride2, stride3, stride4);
>> +}
>> +
>> +void ff_tm_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> + const uint8_t *src_left, const uint8_t
>> *src_top_ptr)
>> +{
>> + uint8_t top_left = src_top_ptr[-1];
>> + uint32_t loop_cnt;
>> + __m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1, reg2;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
>> +
>> + reg0 = __lsx_vreplgr2vr_h(top_left);
>> + DUP2_ARG2(__lsx_vld, src_top_ptr, 0, src_top_ptr, 16, reg1, reg2);
>> +
>> + src_left += 28;
>> + for (loop_cnt = 8; loop_cnt--;) {
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left,
>> 2,
>> + src_left, 3, tmp3, tmp2, tmp1, tmp0);
>> + src_left -= 4;
>> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2,
>> reg1,
>> + tmp3, reg1, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2,
>> reg1,
>> + tmp3, reg1, src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
>> src3,
>> + reg0, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
>> src7,
>> + reg0, src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg2, tmp1, reg2, tmp2,
>> reg2,
>> + tmp3, reg2, dst0, dst1, dst2, dst3);
>> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg2, tmp1, reg2, tmp2,
>> reg2,
>> + tmp3, reg2, dst4, dst5, dst6, dst7);
>> + DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0,
>> dst3,
>> + reg0, dst0, dst1, dst2, dst3);
>> + DUP4_ARG2(__lsx_vssub_hu, dst4, reg0, dst5, reg0, dst6, reg0,
>> dst7,
>> + reg0, dst4, dst5, dst6, dst7);
>> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7,
>> + dst0, dst1, dst2, dst3);
>> + DUP4_ARG2(__lsx_vsat_hu, dst4, 7, dst5, 7, dst6, 7, dst7, 7,
>> + dst4, dst5, dst6, dst7);
>> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
>> src7,
>> + src3, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vpackev_b, dst4, dst0, dst5, dst1, dst6, dst2,
>> dst7,
>> + dst3, dst0, dst1, dst2, dst3);
>> + __lsx_vst(src0, dst, 0);
>> + __lsx_vst(dst0, dst, 16);
>> + dst += dst_stride;
>> + __lsx_vst(src1, dst, 0);
>> + __lsx_vst(dst1, dst, 16);
>> + dst += dst_stride;
>> + __lsx_vst(src2, dst, 0);
>> + __lsx_vst(dst2, dst, 16);
>> + dst += dst_stride;
>> + __lsx_vst(src3, dst, 0);
>> + __lsx_vst(dst3, dst, 16);
>> + dst += dst_stride;
>> + }
>> +}
>> diff --git a/libavcodec/loongarch/vp9_mc_lsx.c
>> b/libavcodec/loongarch/vp9_mc_lsx.c
>> new file mode 100644
>> index 0000000000..c6746fd87f
>> --- /dev/null
>> +++ b/libavcodec/loongarch/vp9_mc_lsx.c
>> @@ -0,0 +1,2480 @@
>> +/*
>> + * Copyright (c) 2021 Loongson Technology Corporation Limited
>> + * Contributed by Hao Chen <chenhao@loongson.cn>
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with FFmpeg; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
>> 02110-1301 USA
>> + */
>> +
>> +#include "libavcodec/vp9dsp.h"
>> +#include "libavutil/loongarch/loongson_intrinsics.h"
>> +#include "vp9dsp_loongarch.h"
>> +
>> +static const uint8_t mc_filt_mask_arr[16 * 3] = {
>> + /* 8 width cases */
>> + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
>> + /* 4 width cases */
>> + 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
>> + /* 4 width cases */
>> + 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
>> +};
>> +
>> +
>> +#define HORIZ_8TAP_4WID_4VECS_FILT(_src0, _src1, _src2, _src3,
>> \
>> + _mask0, _mask1, _mask2, _mask3,
>> \
>> + _filter0, _filter1, _filter2,
>> _filter3, \
>> + _out0, _out1)
>> \
>> +{
>> \
>> + __m128i _tmp0, _tmp1, _tmp2, _tmp3, _tmp4, _tmp5, _tmp6, _tmp7;
>> \
>> + __m128i _reg0, _reg1, _reg2, _reg3;
>> \
>> +
>> \
>> + DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask0, _src3, _src2,
>> _mask0, \
>> + _tmp0, _tmp1);
>> \
>> + DUP2_ARG2(__lsx_vdp2_h_b, _tmp0, _filter0, _tmp1, _filter0, _reg0,
>> _reg1); \
>> + DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask1, _src3, _src2,
>> _mask1, \
>> + _tmp2, _tmp3);
>> \
>> + DUP2_ARG3(__lsx_vdp2add_h_b, _reg0, _tmp2, _filter1, _reg1, _tmp3,
>> \
>> + _filter1, _reg0, _reg1);
>> \
>> + DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask2, _src3, _src2,
>> _mask2, \
>> + _tmp4, _tmp5);
>> \
>> + DUP2_ARG2(__lsx_vdp2_h_b, _tmp4, _filter2, _tmp5, _filter2, _reg2,
>> _reg3); \
>> + DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask3, _src3, _src2,
>> _mask3, \
>> + _tmp6, _tmp7);
>> \
>> + DUP2_ARG3(__lsx_vdp2add_h_b, _reg2, _tmp6, _filter3, _reg3, _tmp7,
>> \
>> + _filter3, _reg2, _reg3);
>> \
>> + DUP2_ARG2(__lsx_vsadd_h, _reg0, _reg2, _reg1, _reg3, _out0,
>> _out1); \
>> +}
>> +
>> +#define HORIZ_8TAP_8WID_4VECS_FILT(_src0, _src1, _src2, _src3,
>> \
>> + _mask0, _mask1, _mask2, _mask3,
>> \
>> + _filter0, _filter1, _filter2,
>> _filter3, \
>> + _out0, _out1, _out2, _out3)
>> \
>> +{
>> \
>> + __m128i _tmp0, _tmp1, _tmp2, _tmp3, _tmp4, _tmp5, _tmp6, _tmp7;
>> \
>> + __m128i _reg0, _reg1, _reg2, _reg3, _reg4, _reg5, _reg6, _reg7;
>> \
>> +
>> \
>> + DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask0, _src1, _src1,
>> _mask0, _src2,\
>> + _src2, _mask0, _src3, _src3, _mask0, _tmp0, _tmp1,
>> _tmp2, _tmp3);\
>> + DUP4_ARG2(__lsx_vdp2_h_b, _tmp0, _filter0, _tmp1, _filter0, _tmp2,
>> \
>> + _filter0, _tmp3, _filter0, _reg0, _reg1, _reg2, _reg3);
>> \
>> + DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask2, _src1, _src1,
>> _mask2, _src2,\
>> + _src2, _mask2, _src3, _src3, _mask2, _tmp0, _tmp1,
>> _tmp2, _tmp3);\
>> + DUP4_ARG2(__lsx_vdp2_h_b, _tmp0, _filter2, _tmp1, _filter2, _tmp2,
>> \
>> + _filter2, _tmp3, _filter2, _reg4, _reg5, _reg6, _reg7);
>> \
>> + DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask1, _src1, _src1,
>> _mask1, _src2,\
>> + _src2, _mask1, _src3, _src3, _mask1, _tmp4, _tmp5,
>> _tmp6, _tmp7);\
>> + DUP4_ARG3(__lsx_vdp2add_h_b, _reg0, _tmp4, _filter1, _reg1, _tmp5,
>> \
>> + _filter1, _reg2, _tmp6, _filter1, _reg3, _tmp7,
>> _filter1, _reg0, \
>> + _reg1, _reg2, _reg3);
>> \
>> + DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask3, _src1, _src1,
>> _mask3, _src2,\
>> + _src2, _mask3, _src3, _src3, _mask3, _tmp4, _tmp5,
>> _tmp6, _tmp7);\
>> + DUP4_ARG3(__lsx_vdp2add_h_b, _reg4, _tmp4, _filter3, _reg5, _tmp5,
>> \
>> + _filter3, _reg6, _tmp6, _filter3, _reg7, _tmp7,
>> _filter3, _reg4, \
>> + _reg5, _reg6, _reg7);
>> \
>> + DUP4_ARG2(__lsx_vsadd_h, _reg0, _reg4, _reg1, _reg5, _reg2, _reg6,
>> _reg3, \
>> + _reg7, _out0, _out1, _out2, _out3);
>> \
>> +}
>> +
>> +#define FILT_8TAP_DPADD_S_H(_reg0, _reg1, _reg2, _reg3,
>> \
>> + _filter0, _filter1, _filter2, _filter3)
>> \
>> +( {
>> \
>> + __m128i _vec0, _vec1;
>> \
>> +
>> \
>> + _vec0 = __lsx_vdp2_h_b(_reg0, _filter0);
>> \
>> + _vec0 = __lsx_vdp2add_h_b(_vec0, _reg1, _filter1);
>> \
>> + _vec1 = __lsx_vdp2_h_b(_reg2, _filter2);
>> \
>> + _vec1 = __lsx_vdp2add_h_b(_vec1, _reg3, _filter3);
>> \
>> + _vec0 = __lsx_vsadd_h(_vec0, _vec1);
>> \
>> +
>> \
>> + _vec0;
>> \
>> +} )
>> +
>> +#define HORIZ_8TAP_FILT(_src0, _src1, _mask0, _mask1, _mask2, _mask3,
>> \
>> + _filt_h0, _filt_h1, _filt_h2, _filt_h3)
>> \
>> +( {
>> \
>> + __m128i _tmp0, _tmp1, _tmp2, _tmp3;
>> \
>> + __m128i _out;
>> \
>> +
>> \
>> + DUP4_ARG3(__lsx_vshuf_b, _src1, _src0, _mask0, _src1, _src0,
>> _mask1, _src1,\
>> + _src0, _mask2, _src1, _src0, _mask3, _tmp0, _tmp1,
>> _tmp2, _tmp3);\
>> + _out = FILT_8TAP_DPADD_S_H(_tmp0, _tmp1, _tmp2, _tmp3, _filt_h0,
>> _filt_h1, \
>> + _filt_h2, _filt_h3);
>> \
>> + _out = __lsx_vsrari_h(_out, 7);
>> \
>> + _out = __lsx_vsat_h(_out, 7);
>> \
>> +
>> \
>> + _out;
>> \
>> +} )
>> +
>> +#define LSX_LD_4(_src, _stride, _src0, _src1, _src2, _src3)
>> \
>> +{
>> \
>> + _src0 = __lsx_vld(_src, 0);
>> \
>> + _src += _stride;
>> \
>> + _src1 = __lsx_vld(_src, 0);
>> \
>> + _src += _stride;
>> \
>> + _src2 = __lsx_vld(_src, 0);
>> \
>> + _src += _stride;
>> \
>> + _src3 = __lsx_vld(_src, 0);
>> \
>> +}
>> +
>> +static void common_hz_8t_4x4_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter)
>> +{
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out, out0, out1;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
>> + src -= 3;
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> +
>> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
>> mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0,
>> out1);
>> + out = __lsx_vssrarni_b_h(out1, out0, 7);
>> + out = __lsx_vxori_b(out, 128);
>> + __lsx_vstelm_w(out, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out, dst, 0, 3);
>> +}
>> +
>> +static void common_hz_8t_4x8_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter)
>> +{
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out0, out1, out2, out3;
>> + uint8_t *_src = (uint8_t*)src - 3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
>> mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0,
>> out1);
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
>> mask2,
>> + mask3, filter0, filter1, filter2, filter3, out2,
>> out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0,
>> out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vstelm_w(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 3);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out1, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out1, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out1, dst, 0, 3);
>> +}
>> +
>> +static void common_hz_8t_4w_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + if (height == 4) {
>> + common_hz_8t_4x4_lsx(src, src_stride, dst, dst_stride, filter);
>> + } else if (height == 8) {
>> + common_hz_8t_4x8_lsx(src, src_stride, dst, dst_stride, filter);
>> + }
>> +}
>> +
>> +static void common_hz_8t_8x4_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter)
>> +{
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out0, out1, out2, out3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
>> mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1, out2,
>> out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0,
>> out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vstelm_d(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 1);
>> +}
>> +
>> +static void common_hz_8t_8x8mult_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t
>> height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out0, out1, out2, out3;
>> + uint8_t* _src = (uint8_t*)src - 3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (; loop_cnt--;) {
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src1, src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vstelm_d(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 1);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void common_hz_8t_8w_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + if (height == 4) {
>> + common_hz_8t_8x4_lsx(src, src_stride, dst, dst_stride, filter);
>> + } else {
>> + common_hz_8t_8x8mult_lsx(src, src_stride, dst, dst_stride,
>> + filter, height);
>> + }
>> +}
>> +
>> +static void common_hz_8t_16w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 1;
>> + int32_t stride = src_stride << 1;
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out0, out1, out2, out3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (; loop_cnt--;) {
>> + const uint8_t* _src = src + src_stride;
>> + DUP2_ARG2(__lsx_vld, src, 0, _src, 0, src0, src2);
>> + DUP2_ARG2(__lsx_vld, src, 8, _src, 8, src1, src3);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vst(out0, dst, 0);
>> + dst += dst_stride;
>> + __lsx_vst(out1, dst, 0);
>> + dst += dst_stride;
>> + src += stride;
>> + }
>> +}
>> +
>> +static void common_hz_8t_32w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 1;
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out0, out1, out2, out3;
>> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (; loop_cnt--;) {
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
>> + src3 = __lsx_vld(src, 24);
>> + src1 = __lsx_vshuf_b(src2, src0, shuff);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vst(out0, dst, 0);
>> + __lsx_vst(out1, dst, 16);
>> +
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
>> + src3 = __lsx_vld(src, 24);
>> + src1 = __lsx_vshuf_b(src2, src0, shuff);
>> + src += src_stride;
>> +
>> + dst += dst_stride;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vst(out0, dst, 0);
>> + __lsx_vst(out1, dst, 16);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void common_hz_8t_64w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + int32_t loop_cnt = height;
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out0, out1, out2, out3;
>> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (; loop_cnt--;) {
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
>> + src3 = __lsx_vld(src, 24);
>> + src1 = __lsx_vshuf_b(src2, src0, shuff);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vst(out0, dst, 0);
>> + __lsx_vst(out1, dst, 16);
>> +
>> + DUP2_ARG2(__lsx_vld, src, 32, src, 48, src0, src2);
>> + src3 = __lsx_vld(src, 56);
>> + src1 = __lsx_vshuf_b(src2, src0, shuff);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vst(out0, dst, 32);
>> + __lsx_vst(out1, dst, 48);
>> + src += src_stride;
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void common_vt_8t_4w_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
>> + __m128i reg0, reg1, reg2, reg3, reg4;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i out0, out1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
>> src1, tmp0,
>> + tmp1, tmp2, tmp3);
>> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, tmp4, tmp5);
>> + DUP2_ARG2(__lsx_vilvl_d, tmp3, tmp0, tmp4, tmp1, reg0, reg1);
>> + reg2 = __lsx_vilvl_d(tmp5, tmp2);
>> + DUP2_ARG2(__lsx_vxori_b, reg0, 128, reg1, 128, reg0, reg1);
>> + reg2 = __lsx_vxori_b(reg2, 128);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
>> src10,
>> + src9, tmp0, tmp1, tmp2, tmp3);
>> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, reg3, reg4);
>> + DUP2_ARG2(__lsx_vxori_b, reg3, 128, reg4, 128, reg3, reg4);
>> + out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, reg3, filter0,
>> filter1,
>> + filter2, filter3);
>> + out1 = FILT_8TAP_DPADD_S_H(reg1, reg2, reg3, reg4, filter0,
>> filter1,
>> + filter2, filter3);
>> + out0 = __lsx_vssrarni_b_h(out1, out0, 7);
>> + out0 = __lsx_vxori_b(out0, 128);
>> + __lsx_vstelm_w(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 3);
>> + dst += dst_stride;
>> +
>> + reg0 = reg2;
>> + reg1 = reg3;
>> + reg2 = reg4;
>> + src6 = src10;
>> + }
>> +}
>> +
>> +static void common_vt_8t_8w_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i tmp0, tmp1, tmp2, tmp3;
>> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i out0, out1, out2, out3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
>> src1,
>> + reg0, reg1, reg2, reg3);
>> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10, 128,
>> + src7, src8, src9, src10);
>> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
>> src10,
>> + src9, tmp0, tmp1, tmp2, tmp3);
>> + out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, tmp0, filter0,
>> filter1,
>> + filter2, filter3);
>> + out1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, tmp1, filter0,
>> filter1,
>> + filter2, filter3);
>> + out2 = FILT_8TAP_DPADD_S_H(reg1, reg2, tmp0, tmp2, filter0,
>> filter1,
>> + filter2, filter3);
>> + out3 = FILT_8TAP_DPADD_S_H(reg4, reg5, tmp1, tmp3, filter0,
>> filter1,
>> + filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vstelm_d(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 1);
>> + dst += dst_stride;
>> +
>> + reg0 = reg2;
>> + reg1 = tmp0;
>> + reg2 = tmp2;
>> + reg3 = reg5;
>> + reg4 = tmp1;
>> + reg5 = tmp3;
>> + src6 = src10;
>> + }
>> +}
>> +
>> +static void common_vt_8t_16w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
>> + __m128i reg6, reg7, reg8, reg9, reg10, reg11;
>> + __m128i tmp0, tmp1, tmp2, tmp3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
>> src1,
>> + reg0, reg1, reg2, reg3);
>> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
>> + DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4, src2,
>> src1,
>> + reg6, reg7, reg8, reg9);
>> + DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, reg10, reg11);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10, 128,
>> + src7, src8, src9, src10);
>> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
>> src10, src9,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9, src8,
>> src10, src9,
>> + src4, src5, src7, src8);
>> + tmp0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, src0, filter0,
>> filter1,
>> + filter2, filter3);
>> + tmp1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, src1, filter0,
>> filter1,
>> + filter2, filter3);
>> + tmp2 = FILT_8TAP_DPADD_S_H(reg6, reg7, reg8, src4, filter0,
>> filter1,
>> + filter2, filter3);
>> + tmp3 = FILT_8TAP_DPADD_S_H(reg9, reg10, reg11, src5, filter0,
>> filter1,
>> + filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
>> tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + __lsx_vst(tmp0, dst, 0);
>> + dst += dst_stride;
>> + __lsx_vst(tmp1, dst, 0);
>> + dst += dst_stride;
>> + tmp0 = FILT_8TAP_DPADD_S_H(reg1, reg2, src0, src2, filter0,
>> filter1,
>> + filter2, filter3);
>> + tmp1 = FILT_8TAP_DPADD_S_H(reg4, reg5, src1, src3, filter0,
>> filter1,
>> + filter2, filter3);
>> + tmp2 = FILT_8TAP_DPADD_S_H(reg7, reg8, src4, src7, filter0,
>> filter1,
>> + filter2, filter3);
>> + tmp3 = FILT_8TAP_DPADD_S_H(reg10, reg11, src5, src8, filter0,
>> filter1,
>> + filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
>> tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + __lsx_vst(tmp0, dst, 0);
>> + dst += dst_stride;
>> + __lsx_vst(tmp1, dst, 0);
>> + dst += dst_stride;
>> +
>> + reg0 = reg2;
>> + reg1 = src0;
>> + reg2 = src2;
>> + reg3 = reg5;
>> + reg4 = src1;
>> + reg5 = src3;
>> + reg6 = reg8;
>> + reg7 = src4;
>> + reg8 = src7;
>> + reg9 = reg11;
>> + reg10 = src5;
>> + reg11 = src8;
>> + src6 = src10;
>> + }
>> +}
>> +
>> +static void common_vt_8t_16w_mult_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t
>> height,
>> + int32_t width)
>> +{
>> + uint8_t *src_tmp;
>> + uint8_t *dst_tmp;
>> + uint32_t cnt = width >> 4;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
>> + __m128i reg6, reg7, reg8, reg9, reg10, reg11;
>> + __m128i tmp0, tmp1, tmp2, tmp3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + int32_t dst_stride2 = dst_stride << 1;
>> + int32_t dst_stride3 = dst_stride2 + dst_stride;
>> + int32_t dst_stride4 = dst_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> + for (;cnt--;) {
>> + uint32_t loop_cnt = height >> 2;
>> +
>> + src_tmp = _src;
>> + dst_tmp = dst;
>> +
>> + src0 = __lsx_vld(src_tmp, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
>> src_stride2,
>> + src1, src2);
>> + src3 = __lsx_vldx(src_tmp, src_stride3);
>> + src_tmp += src_stride4;
>> + src4 = __lsx_vld(src_tmp, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
>> src_stride2,
>> + src5, src6);
>> + src_tmp += src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4,
>> src2, src1,
>> + reg0, reg1, reg2, reg3);
>> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
>> + DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4,
>> src2, src1,
>> + reg6, reg7, reg8, reg9);
>> + DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, reg10, reg11);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(src_tmp, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
>> src_stride2,
>> + src8, src9);
>> + src10 = __lsx_vldx(src_tmp, src_stride3);
>> + src_tmp += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10,
>> + 128, src7, src8, src9, src10);
>> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9,
>> src8,
>> + src10, src9, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9,
>> src8,
>> + src10, src9, src4, src5, src7, src8);
>> + tmp0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, src0, filter0,
>> + filter1, filter2, filter3);
>> + tmp1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, src1, filter0,
>> + filter1, filter2, filter3);
>> + tmp2 = FILT_8TAP_DPADD_S_H(reg6, reg7, reg8, src4, filter0,
>> + filter1, filter2, filter3);
>> + tmp3 = FILT_8TAP_DPADD_S_H(reg9, reg10, reg11, src5,
>> filter0,
>> + filter1, filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
>> + tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + __lsx_vst(tmp0, dst_tmp, 0);
>> + __lsx_vstx(tmp1, dst_tmp, dst_stride);
>> + tmp0 = FILT_8TAP_DPADD_S_H(reg1, reg2, src0, src2, filter0,
>> + filter1, filter2, filter3);
>> + tmp1 = FILT_8TAP_DPADD_S_H(reg4, reg5, src1, src3, filter0,
>> + filter1, filter2, filter3);
>> + tmp2 = FILT_8TAP_DPADD_S_H(reg7, reg8, src4, src7, filter0,
>> + filter1, filter2, filter3);
>> + tmp3 = FILT_8TAP_DPADD_S_H(reg10, reg11, src5, src8,
>> filter0,
>> + filter1, filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
>> + tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + __lsx_vstx(tmp0, dst_tmp, dst_stride2);
>> + __lsx_vstx(tmp1, dst_tmp, dst_stride3);
>> + dst_tmp += dst_stride4;
>> +
>> + reg0 = reg2;
>> + reg1 = src0;
>> + reg2 = src2;
>> + reg3 = reg5;
>> + reg4 = src1;
>> + reg5 = src3;
>> + reg6 = reg8;
>> + reg7 = src4;
>> + reg8 = src7;
>> + reg9 = reg11;
>> + reg10 = src5;
>> + reg11 = src8;
>> + src6 = src10;
>> + }
>> + _src += 16;
>> + dst += 16;
>> + }
>> +}
>> +
>> +static void common_vt_8t_32w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + common_vt_8t_16w_mult_lsx(src, src_stride, dst, dst_stride,
>> filter, height, 32);
>> +}
>> +
>> +static void common_vt_8t_64w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + common_vt_8t_16w_mult_lsx(src, src_stride, dst, dst_stride,
>> + filter, height, 64);
>> +}
>> +
>> +static void common_hv_8ht_8vt_4w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter_horiz,
>> + const int8_t *filter_vert,
>> + int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
>> + __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
>> + __m128i out0, out1;
>> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3 - 3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2,
>> filter_horiz, 4,
>> + filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2, filt_hz3);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> +
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> +
>> + tmp0 = HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp2 = HORIZ_8TAP_FILT(src2, src3, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp4 = HORIZ_8TAP_FILT(src4, src5, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp5 = HORIZ_8TAP_FILT(src5, src6, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + DUP2_ARG3(__lsx_vshuf_b, tmp2, tmp0, shuff, tmp4, tmp2, shuff,
>> tmp1, tmp3);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2,
>> filter_vert, 4,
>> + filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
>> + DUP2_ARG2(__lsx_vpackev_b, tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
>> + tmp2 = __lsx_vpackev_b(tmp5, tmp4);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10, 128,
>> + src7, src8, src9, src10);
>> + tmp3 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp4 = __lsx_vshuf_b(tmp3, tmp5, shuff);
>> + tmp4 = __lsx_vpackev_b(tmp3, tmp4);
>> + out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp4, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src1 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3,
>> + filt_hz0, filt_hz1, filt_hz2, filt_hz3);
>> + src0 = __lsx_vshuf_b(src1, tmp3, shuff);
>> + src0 = __lsx_vpackev_b(src1, src0);
>> + out1 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp4, src0, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + out0 = __lsx_vssrarni_b_h(out1, out0, 7);
>> + out0 = __lsx_vxori_b(out0, 128);
>> + __lsx_vstelm_w(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 3);
>> + dst += dst_stride;
>> +
>> + tmp5 = src1;
>> + tmp0 = tmp2;
>> + tmp1 = tmp4;
>> + tmp2 = src0;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_8w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter_horiz,
>> + const int8_t *filter_vert,
>> + int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
>> + __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
>> + __m128i out0, out1;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3 - 3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2,
>> filter_horiz,
>> + 4, filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2,
>> filt_hz3);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> +
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> +
>> + src0 = HORIZ_8TAP_FILT(src0, src0, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src1 = HORIZ_8TAP_FILT(src1, src1, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src2 = HORIZ_8TAP_FILT(src2, src2, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src3 = HORIZ_8TAP_FILT(src3, src3, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src4 = HORIZ_8TAP_FILT(src4, src4, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src5 = HORIZ_8TAP_FILT(src5, src5, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src6 = HORIZ_8TAP_FILT(src6, src6, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2,
>> filter_vert, 4,
>> + filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
>> + DUP4_ARG2(__lsx_vpackev_b, src1, src0, src3, src2, src5, src4,
>> + src2, src1, tmp0, tmp1, tmp2, tmp4);
>> + DUP2_ARG2(__lsx_vpackev_b, src4, src3, src6, src5, tmp5, tmp6);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> +
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10, 128,
>> + src7, src8, src9, src10);
>> + src7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp3 = __lsx_vpackev_b(src7, src6);
>> + out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp3, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src0 = __lsx_vpackev_b(src8, src7);
>> + out1 = FILT_8TAP_DPADD_S_H(tmp4, tmp5, tmp6, src0, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src1 = __lsx_vpackev_b(src9, src8);
>> + src3 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp3, src1, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src10 = HORIZ_8TAP_FILT(src10, src10, mask0, mask1, mask2,
>> mask3,
>> + filt_hz0, filt_hz1, filt_hz2,
>> filt_hz3);
>> + src2 = __lsx_vpackev_b(src10, src9);
>> + src4 = FILT_8TAP_DPADD_S_H(tmp5, tmp6, src0, src2, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, src4, src3, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vstelm_d(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 1);
>> + dst += dst_stride;
>> +
>> + src6 = src10;
>> + tmp0 = tmp2;
>> + tmp1 = tmp3;
>> + tmp2 = src1;
>> + tmp4 = tmp6;
>> + tmp5 = src0;
>> + tmp6 = src2;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_16w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter_horiz,
>> + const int8_t *filter_vert,
>> + int32_t height)
>> +{
>> + int32_t multiple8_cnt;
>> +
>> + for (multiple8_cnt = 2; multiple8_cnt--;) {
>> + common_hv_8ht_8vt_8w_lsx(src, src_stride, dst, dst_stride,
>> filter_horiz,
>> + filter_vert, height);
>> + src += 8;
>> + dst += 8;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_32w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter_horiz,
>> + const int8_t *filter_vert,
>> + int32_t height)
>> +{
>> + int32_t multiple8_cnt;
>> +
>> + for (multiple8_cnt = 4; multiple8_cnt--;) {
>> + common_hv_8ht_8vt_8w_lsx(src, src_stride, dst, dst_stride,
>> filter_horiz,
>> + filter_vert, height);
>> + src += 8;
>> + dst += 8;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_64w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter_horiz,
>> + const int8_t *filter_vert,
>> + int32_t height)
>> +{
>> + int32_t multiple8_cnt;
>> +
>> + for (multiple8_cnt = 8; multiple8_cnt--;) {
>> + common_hv_8ht_8vt_8w_lsx(src, src_stride, dst, dst_stride,
>> filter_horiz,
>> + filter_vert, height);
>> + src += 8;
>> + dst += 8;
>> + }
>> +}
>> +
>> +static void copy_width8_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + __m128i src0, src1, src2, src3;
>> +
>> + for (;cnt--;) {
>> + src0 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + src1 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + src2 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + src3 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + __lsx_vstelm_d(src0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src2, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src3, dst, 0, 0);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void copy_width16_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + __m128i src0, src1, src2, src3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + int32_t dst_stride2 = dst_stride << 1;
>> + int32_t dst_stride3 = dst_stride2 + dst_stride;
>> + int32_t dst_stride4 = dst_stride2 << 1;
>> + uint8_t *_src = (uint8_t*)src;
>> +
>> + for (;cnt--;) {
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src1, src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + __lsx_vst(src0, dst, 0);
>> + __lsx_vstx(src1, dst, dst_stride);
>> + __lsx_vstx(src2, dst, dst_stride2);
>> + __lsx_vstx(src3, dst, dst_stride3);
>> + dst += dst_stride4;
>> + }
>> +}
>> +
>> +static void copy_width32_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + uint8_t *src_tmp1 = (uint8_t*)src;
>> + uint8_t *dst_tmp1 = dst;
>> + uint8_t *src_tmp2 = src_tmp1 + 16;
>> + uint8_t *dst_tmp2 = dst_tmp1 + 16;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + int32_t dst_stride2 = dst_stride << 1;
>> + int32_t dst_stride3 = dst_stride2 + dst_stride;
>> + int32_t dst_stride4 = dst_stride2 << 1;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> +
>> + for (;cnt--;) {
>> + src0 = __lsx_vld(src_tmp1, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp1, src_stride, src_tmp1,
>> src_stride2,
>> + src1, src2);
>> + src3 = __lsx_vldx(src_tmp1, src_stride3);
>> + src_tmp1 += src_stride4;
>> +
>> + src4 = __lsx_vld(src_tmp2, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp2, src_stride, src_tmp2,
>> src_stride2,
>> + src5, src6);
>> + src7 = __lsx_vldx(src_tmp2, src_stride3);
>> + src_tmp2 += src_stride4;
>> +
>> + __lsx_vst(src0, dst_tmp1, 0);
>> + __lsx_vstx(src1, dst_tmp1, dst_stride);
>> + __lsx_vstx(src2, dst_tmp1, dst_stride2);
>> + __lsx_vstx(src3, dst_tmp1, dst_stride3);
>> + dst_tmp1 += dst_stride4;
>> + __lsx_vst(src4, dst_tmp2, 0);
>> + __lsx_vstx(src5, dst_tmp2, dst_stride);
>> + __lsx_vstx(src6, dst_tmp2, dst_stride2);
>> + __lsx_vstx(src7, dst_tmp2, dst_stride3);
>> + dst_tmp2 += dst_stride4;
>> + }
>> +}
>> +
>> +static void copy_width64_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i src8, src9, src10, src11, src12, src13, src14, src15;
>> +
>> + for (;cnt--;) {
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src0, src1, src2, src3);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src4, src5, src6, src7);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src8, src9, src10, src11);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src12, src13, src14, src15);
>> + src += src_stride;
>> + __lsx_vst(src0, dst, 0);
>> + __lsx_vst(src1, dst, 16);
>> + __lsx_vst(src2, dst, 32);
>> + __lsx_vst(src3, dst, 48);
>> + dst += dst_stride;
>> + __lsx_vst(src4, dst, 0);
>> + __lsx_vst(src5, dst, 16);
>> + __lsx_vst(src6, dst, 32);
>> + __lsx_vst(src7, dst, 48);
>> + dst += dst_stride;
>> + __lsx_vst(src8, dst, 0);
>> + __lsx_vst(src9, dst, 16);
>> + __lsx_vst(src10, dst, 32);
>> + __lsx_vst(src11, dst, 48);
>> + dst += dst_stride;
>> + __lsx_vst(src12, dst, 0);
>> + __lsx_vst(src13, dst, 16);
>> + __lsx_vst(src14, dst, 32);
>> + __lsx_vst(src15, dst, 48);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void common_hz_8t_and_aver_dst_4x4_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter)
>> +{
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i tmp0, tmp1;
>> + __m128i dst0, dst1, dst2, dst3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
>> mask2, mask3,
>> + filter0, filter1, filter2, filter3,
>> tmp0, tmp1);
>> + dst0 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + dst1 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + dst2 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + dst3 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst0 = __lsx_vilvl_w(dst1, dst0);
>> + dst1 = __lsx_vilvl_w(dst3, dst2);
>> + dst0 = __lsx_vilvl_d(dst1, dst0);
>> + tmp0 = __lsx_vssrarni_b_h(tmp1, tmp0, 7);
>> + tmp0 = __lsx_vxori_b(tmp0, 128);
>> + dst0 = __lsx_vavgr_bu(tmp0, dst0);
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 3);
>> +}
>> +
>> +static void common_hz_8t_and_aver_dst_4x8_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter)
>> +{
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3, tmp0, tmp1, tmp2, tmp3;
>> + __m128i dst0, dst1;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + tmp0 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp1 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp2 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp3 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp0 = __lsx_vilvl_w(tmp1, tmp0);
>> + tmp1 = __lsx_vilvl_w(tmp3, tmp2);
>> + dst0 = __lsx_vilvl_d(tmp1, tmp0);
>> +
>> + tmp0 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp1 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp2 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp3 = __lsx_vldrepl_w(dst_tmp, 0);
>> + tmp0 = __lsx_vilvl_w(tmp1, tmp0);
>> + tmp1 = __lsx_vilvl_w(tmp3, tmp2);
>> + dst1 = __lsx_vilvl_d(tmp1, tmp0);
>> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
>> mask2, mask3,
>> + filter0, filter1, filter2, filter3,
>> tmp0, tmp1);
>> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
>> mask2, mask3,
>> + filter0, filter1, filter2, filter3,
>> tmp2, tmp3);
>> + DUP4_ARG3(__lsx_vssrarni_b_h, tmp0, tmp0, 7, tmp1, tmp1, 7, tmp2,
>> tmp2, 7,
>> + tmp3, tmp3, 7, tmp0, tmp1, tmp2, tmp3);
>> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vavgr_bu, tmp0, dst0, tmp1, dst1, dst0, dst1);
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 3);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst1, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst1, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst1, dst, 0, 3);
>> +}
>> +
>> +static void common_hz_8t_and_aver_dst_4w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + if (height == 4) {
>> + common_hz_8t_and_aver_dst_4x4_lsx(src, src_stride, dst,
>> dst_stride, filter);
>> + } else if (height == 8) {
>> + common_hz_8t_and_aver_dst_4x8_lsx(src, src_stride, dst,
>> dst_stride, filter);
>> + }
>> +}
>> +
>> +static void common_hz_8t_and_aver_dst_8w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + int32_t loop_cnt = height >> 2;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i tmp0, tmp1, tmp2, tmp3;
>> + __m128i dst0, dst1, dst2, dst3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride2 + src_stride;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t *_src = (uint8_t*)src - 3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (;loop_cnt--;) {
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src1, src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3,filter0, filter1, filter2, filter3, tmp0, tmp1,
>> tmp2, tmp3);
>> + dst0 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + dst1 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + dst2 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + dst3 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + DUP2_ARG2(__lsx_vilvl_d, dst1, dst0, dst3, dst2, dst0, dst1);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7,
>> tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vavgr_bu, tmp0, dst0, tmp1, dst1, dst0, dst1);
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst1, dst, 0, 1);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void common_hz_8t_and_aver_dst_16w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + int32_t loop_cnt = height >> 1;
>> + int32_t dst_stride2 = dst_stride << 1;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3, dst0, dst1, dst2, dst3;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
>> + __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (;loop_cnt--;) {
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 8, src0, src1);
>> + src += src_stride;
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 8, src2, src3);
>> + src += src_stride;
>> + dst0 = __lsx_vld(dst_tmp, 0);
>> + dst1 = __lsx_vldx(dst_tmp, dst_stride);
>> + dst_tmp += dst_stride2;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0,
>> src2, src2,
>> + mask0, src3, src3, mask0, tmp0, tmp1, tmp2, tmp3);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1,
>> src2, src2,
>> + mask1, src3, src3, mask1, tmp4, tmp5, tmp6, tmp7);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2,
>> src2, src2,
>> + mask2, src3, src3, mask2, tmp8, tmp9, tmp10, tmp11);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3,
>> src2, src2,
>> + mask3, src3, src3, mask3, tmp12, tmp13, tmp14,
>> tmp15);
>> + DUP4_ARG2(__lsx_vdp2_h_b, tmp0, filter0, tmp1, filter0, tmp2,
>> filter0, tmp3,
>> + filter0, tmp0, tmp1, tmp2, tmp3);
>> + DUP4_ARG2(__lsx_vdp2_h_b, tmp8, filter2, tmp9, filter2, tmp10,
>> filter2, tmp11,
>> + filter2, tmp8, tmp9, tmp10, tmp11);
>> + DUP4_ARG3(__lsx_vdp2add_h_b, tmp0, tmp4, filter1, tmp1, tmp5,
>> filter1, tmp2,
>> + tmp6, filter1, tmp3, tmp7, filter1, tmp0, tmp1,
>> tmp2, tmp3);
>> + DUP4_ARG3(__lsx_vdp2add_h_b, tmp8, tmp12, filter3, tmp9,
>> tmp13, filter3, tmp10,
>> + tmp14, filter3, tmp11, tmp15, filter3, tmp4, tmp5,
>> tmp6, tmp7);
>> + DUP4_ARG2(__lsx_vsadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6,
>> tmp3, tmp7,
>> + tmp0, tmp1, tmp2, tmp3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7,
>> dst2, dst3);
>> + DUP2_ARG2(__lsx_vxori_b, dst2, 128, dst3, 128, dst2, dst3);
>> + DUP2_ARG2(__lsx_vavgr_bu, dst0, dst2, dst1, dst3, dst0, dst1);
>> + __lsx_vst(dst0, dst, 0);
>> + __lsx_vstx(dst1, dst, dst_stride);
>> + dst += dst_stride2;
>> + }
>> +}
>> +
>> +static void common_hz_8t_and_aver_dst_32w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + uint32_t loop_cnt = height;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3, dst0, dst1;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
>> + __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
>> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (;loop_cnt--;) {
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
>> + src3 = __lsx_vld(src, 24);
>> + src1 = __lsx_vshuf_b(src2, src0, shuff);
>> + src += src_stride;
>> + DUP2_ARG2(__lsx_vld, dst_tmp, 0, dst, 16, dst0, dst1);
>> + dst_tmp += dst_stride;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0,
>> src2,
>> + src2, mask0, src3, src3, mask0, tmp0, tmp1, tmp2,
>> tmp3);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1,
>> src2,
>> + src2, mask1, src3, src3, mask1, tmp4, tmp5, tmp6,
>> tmp7);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2,
>> src2,
>> + src2, mask2, src3, src3, mask2, tmp8, tmp9, tmp10,
>> tmp11);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3,
>> src2,
>> + src2, mask3, src3, src3, mask3, tmp12, tmp13, tmp14,
>> tmp15);
>> + DUP4_ARG2(__lsx_vdp2_h_b, tmp0, filter0, tmp1, filter0, tmp2,
>> filter0,
>> + tmp3, filter0, tmp0, tmp1, tmp2, tmp3);
>> + DUP4_ARG2(__lsx_vdp2_h_b, tmp8, filter2, tmp9, filter2, tmp10,
>> filter2,
>> + tmp11, filter2, tmp8, tmp9, tmp10, tmp11);
>> + DUP4_ARG3(__lsx_vdp2add_h_b, tmp0, tmp4, filter1, tmp1, tmp5,
>> filter1,
>> + tmp2, tmp6, filter1, tmp3, tmp7, filter1, tmp0, tmp1,
>> tmp2, tmp3);
>> + DUP4_ARG3(__lsx_vdp2add_h_b, tmp8, tmp12, filter3, tmp9,
>> tmp13, filter3,
>> + tmp10, tmp14, filter3, tmp11, tmp15, filter3, tmp4, tmp5,
>> tmp6, tmp7);
>> + DUP4_ARG2(__lsx_vsadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6,
>> tmp3, tmp7,
>> + tmp0, tmp1, tmp2, tmp3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7,
>> tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vavgr_bu, dst0, tmp0, dst1, tmp1, dst0, dst1);
>> + __lsx_vst(dst0, dst, 0);
>> + __lsx_vst(dst1, dst, 16);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void common_hz_8t_and_aver_dst_64w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + int32_t loop_cnt = height;
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out0, out1, out2, out3, dst0, dst1;
>> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (;loop_cnt--;) {
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
>> + src3 = __lsx_vld(src, 24);
>> + src1 = __lsx_vshuf_b(src2, src0, shuff);
>> + DUP2_ARG2(__lsx_vld, dst, 0, dst, 16, dst0, dst1);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + DUP2_ARG2(__lsx_vavgr_bu, out0, dst0, out1, dst1, out0, out1);
>> + __lsx_vst(out0, dst, 0);
>> + __lsx_vst(out1, dst, 16);
>> +
>> + DUP2_ARG2(__lsx_vld, src, 32, src, 48, src0, src2);
>> + src3 = __lsx_vld(src, 56);
>> + src1 = __lsx_vshuf_b(src2, src0, shuff);
>> + DUP2_ARG2(__lsx_vld, dst, 32, dst, 48, dst0, dst1);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + DUP2_ARG2(__lsx_vavgr_bu, out0, dst0, out1, dst1, out0, out1);
>> + __lsx_vst(out0, dst, 32);
>> + __lsx_vst(out1, dst, 48);
>> + src += src_stride;
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void common_vt_8t_and_aver_dst_4w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
>> + __m128i reg0, reg1, reg2, reg3, reg4;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i out0, out1;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
>> src1,
>> + tmp0, tmp1, tmp2, tmp3);
>> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, tmp4, tmp5);
>> + DUP2_ARG2(__lsx_vilvl_d, tmp3, tmp0, tmp4, tmp1, reg0, reg1);
>> + reg2 = __lsx_vilvl_d(tmp5, tmp2);
>> + DUP2_ARG2(__lsx_vxori_b, reg0, 128, reg1, 128, reg0, reg1);
>> + reg2 = __lsx_vxori_b(reg2, 128);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src0 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src1 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src2 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src3 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + DUP2_ARG2(__lsx_vilvl_w, src1, src0, src3, src2, src0, src1);
>> + src0 = __lsx_vilvl_d(src1, src0);
>> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
>> src10,
>> + src9, tmp0, tmp1, tmp2, tmp3);
>> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, reg3, reg4);
>> + DUP2_ARG2(__lsx_vxori_b, reg3, 128, reg4, 128, reg3, reg4);
>> + out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, reg3, filter0,
>> + filter1, filter2, filter3);
>> + out1 = FILT_8TAP_DPADD_S_H(reg1, reg2, reg3, reg4, filter0,
>> + filter1, filter2, filter3);
>> + out0 = __lsx_vssrarni_b_h(out1, out0, 7);
>> + out0 = __lsx_vxori_b(out0, 128);
>> + out0 = __lsx_vavgr_bu(out0, src0);
>> + __lsx_vstelm_w(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 3);
>> + dst += dst_stride;
>> + reg0 = reg2;
>> + reg1 = reg3;
>> + reg2 = reg4;
>> + src6 = src10;
>> + }
>> +}
>> +
>> +static void common_vt_8t_and_aver_dst_8w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i tmp0, tmp1, tmp2, tmp3;
>> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i out0, out1, out2, out3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
>> + src1, reg0, reg1, reg2, reg3);
>> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src0 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src1 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src2 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src3 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + DUP2_ARG2(__lsx_vilvl_d, src1, src0, src3, src2, src0, src1);
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10, 128,
>> + src7, src8, src9, src10);
>> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
>> src10,
>> + src9, tmp0, tmp1, tmp2, tmp3);
>> + out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, tmp0, filter0,
>> + filter1, filter2, filter3);
>> + out1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, tmp1, filter0,
>> + filter1, filter2, filter3);
>> + out2 = FILT_8TAP_DPADD_S_H(reg1, reg2, tmp0, tmp2, filter0,
>> + filter1, filter2, filter3);
>> + out3 = FILT_8TAP_DPADD_S_H(reg4, reg5, tmp1, tmp3, filter0,
>> + filter1, filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + DUP2_ARG2(__lsx_vavgr_bu, out0, src0, out1, src1, out0, out1);
>> + __lsx_vstelm_d(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 1);
>> + dst += dst_stride;
>> +
>> + reg0 = reg2;
>> + reg1 = tmp0;
>> + reg2 = tmp2;
>> + reg3 = reg5;
>> + reg4 = tmp1;
>> + reg5 = tmp3;
>> + src6 = src10;
>> + }
>> +}
>> +
>> +static void common_vt_8t_and_aver_dst_16w_mult_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst,
>> + int32_t dst_stride,
>> + const int8_t
>> *filter,
>> + int32_t height,
>> + int32_t width)
>> +{
>> + uint8_t *src_tmp;
>> + uint32_t cnt = width >> 4;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
>> + __m128i reg6, reg7, reg8, reg9, reg10, reg11;
>> + __m128i tmp0, tmp1, tmp2, tmp3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + int32_t dst_stride2 = dst_stride << 1;
>> + int32_t dst_stride3 = dst_stride2 + dst_stride;
>> + int32_t dst_stride4 = dst_stride2 << 1;
>> + uint8_t *_src = (uint8_t*)src - src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> + for (;cnt--;) {
>> + uint32_t loop_cnt = height >> 2;
>> + uint8_t *dst_reg = dst;
>> +
>> + src_tmp = _src;
>> + src0 = __lsx_vld(src_tmp, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
>> src_stride2,
>> + src1, src2);
>> + src3 = __lsx_vldx(src_tmp, src_stride3);
>> + src_tmp += src_stride4;
>> + src4 = __lsx_vld(src_tmp, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
>> src_stride2,
>> + src5, src6);
>> + src_tmp += src_stride3;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4,
>> src2, src1,
>> + reg0, reg1, reg2, reg3);
>> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
>> + DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4,
>> src2, src1,
>> + reg6, reg7, reg8, reg9);
>> + DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, reg10, reg11);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(src_tmp, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
>> src_stride2,
>> + src8, src9);
>> + src10 = __lsx_vldx(src_tmp, src_stride3);
>> + src_tmp += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10,
>> + 128, src7, src8, src9, src10);
>> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9,
>> src8,
>> + src10, src9, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9,
>> src8,
>> + src10, src9, src4, src5, src7, src8);
>> + tmp0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, src0, filter0,
>> + filter1, filter2, filter3);
>> + tmp1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, src1, filter0,
>> + filter1, filter2, filter3);
>> + tmp2 = FILT_8TAP_DPADD_S_H(reg6, reg7, reg8, src4, filter0,
>> + filter1, filter2, filter3);
>> + tmp3 = FILT_8TAP_DPADD_S_H(reg9, reg10, reg11, src5,
>> filter0,
>> + filter1, filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
>> + tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + tmp2 = __lsx_vld(dst_reg, 0);
>> + tmp3 = __lsx_vldx(dst_reg, dst_stride);
>> + DUP2_ARG2(__lsx_vavgr_bu, tmp0, tmp2, tmp1, tmp3, tmp0,
>> tmp1);
>> + __lsx_vst(tmp0, dst_reg, 0);
>> + __lsx_vstx(tmp1, dst_reg, dst_stride);
>> + tmp0 = FILT_8TAP_DPADD_S_H(reg1, reg2, src0, src2, filter0,
>> + filter1, filter2, filter3);
>> + tmp1 = FILT_8TAP_DPADD_S_H(reg4, reg5, src1, src3, filter0,
>> + filter1, filter2, filter3);
>> + tmp2 = FILT_8TAP_DPADD_S_H(reg7, reg8, src4, src7, filter0,
>> + filter1, filter2, filter3);
>> + tmp3 = FILT_8TAP_DPADD_S_H(reg10, reg11, src5, src8,
>> filter0,
>> + filter1, filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
>> + tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + tmp2 = __lsx_vldx(dst_reg, dst_stride2);
>> + tmp3 = __lsx_vldx(dst_reg, dst_stride3);
>> + DUP2_ARG2(__lsx_vavgr_bu, tmp0, tmp2, tmp1, tmp3, tmp0,
>> tmp1);
>> + __lsx_vstx(tmp0, dst_reg, dst_stride2);
>> + __lsx_vstx(tmp1, dst_reg, dst_stride3);
>> + dst_reg += dst_stride4;
>> +
>> + reg0 = reg2;
>> + reg1 = src0;
>> + reg2 = src2;
>> + reg3 = reg5;
>> + reg4 = src1;
>> + reg5 = src3;
>> + reg6 = reg8;
>> + reg7 = src4;
>> + reg8 = src7;
>> + reg9 = reg11;
>> + reg10 = src5;
>> + reg11 = src8;
>> + src6 = src10;
>> + }
>> + _src += 16;
>> + dst += 16;
>> + }
>> +}
>> +
>> +static void common_vt_8t_and_aver_dst_16w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst,
>> dst_stride,
>> + filter, height, 16);
>> +}
>> +
>> +static void common_vt_8t_and_aver_dst_32w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst,
>> dst_stride,
>> + filter, height, 32);
>> +}
>> +
>> +static void common_vt_8t_and_aver_dst_64w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst,
>> dst_stride,
>> + filter, height, 64);
>> +}
>> +
>> +static void common_hv_8ht_8vt_and_aver_dst_4w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst,
>> + int32_t dst_stride,
>> + const int8_t
>> *filter_horiz,
>> + const int8_t
>> *filter_vert,
>> + int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
>> + __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
>> + __m128i out0, out1;
>> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - 3 - src_stride3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2,
>> filter_horiz,
>> + 4, filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2,
>> filt_hz3);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> +
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> +
>> + tmp0 = HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp2 = HORIZ_8TAP_FILT(src2, src3, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp4 = HORIZ_8TAP_FILT(src4, src5, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp5 = HORIZ_8TAP_FILT(src5, src6, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + DUP2_ARG3(__lsx_vshuf_b, tmp2, tmp0, shuff, tmp4, tmp2, shuff,
>> tmp1, tmp3);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2,
>> filter_vert, 4,
>> + filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
>> + DUP2_ARG2(__lsx_vpackev_b, tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
>> + tmp2 = __lsx_vpackev_b(tmp5, tmp4);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src2 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src3 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src4 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src5 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + DUP2_ARG2(__lsx_vilvl_w, src3, src2, src5, src4, src2, src3);
>> + src2 = __lsx_vilvl_d(src3, src2);
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10, 128,
>> + src7, src8, src9, src10);
>> + tmp3 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp4 = __lsx_vshuf_b(tmp3, tmp5, shuff);
>> + tmp4 = __lsx_vpackev_b(tmp3, tmp4);
>> + out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp4, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src1 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3,
>> + filt_hz0, filt_hz1, filt_hz2, filt_hz3);
>> + src0 = __lsx_vshuf_b(src1, tmp3, shuff);
>> + src0 = __lsx_vpackev_b(src1, src0);
>> + out1 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp4, src0, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + out0 = __lsx_vssrarni_b_h(out1, out0, 7);
>> + out0 = __lsx_vxori_b(out0, 128);
>> + out0 = __lsx_vavgr_bu(out0, src2);
>> + __lsx_vstelm_w(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 3);
>> + dst += dst_stride;
>> +
>> + tmp5 = src1;
>> + tmp0 = tmp2;
>> + tmp1 = tmp4;
>> + tmp2 = src0;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_and_aver_dst_8w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst,
>> + int32_t dst_stride,
>> + const int8_t
>> *filter_horiz,
>> + const int8_t
>> *filter_vert,
>> + int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
>> + __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
>> + __m128i out0, out1;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - 3 - src_stride3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2,
>> filter_horiz,
>> + 4, filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2,
>> filt_hz3);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> +
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> +
>> + src0 = HORIZ_8TAP_FILT(src0, src0, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src1 = HORIZ_8TAP_FILT(src1, src1, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src2 = HORIZ_8TAP_FILT(src2, src2, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src3 = HORIZ_8TAP_FILT(src3, src3, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src4 = HORIZ_8TAP_FILT(src4, src4, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src5 = HORIZ_8TAP_FILT(src5, src5, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src6 = HORIZ_8TAP_FILT(src6, src6, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2,
>> filter_vert, 4,
>> + filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
>> + DUP4_ARG2(__lsx_vpackev_b, src1, src0, src3, src2, src5, src4,
>> + src2, src1, tmp0, tmp1, tmp2, tmp4);
>> + DUP2_ARG2(__lsx_vpackev_b, src4, src3, src6, src5, tmp5, tmp6);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> +
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10, 128,
>> + src7, src8, src9, src10);
>> + src7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp3 = __lsx_vpackev_b(src7, src6);
>> + out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp3, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src0 = __lsx_vpackev_b(src8, src7);
>> + out1 = FILT_8TAP_DPADD_S_H(tmp4, tmp5, tmp6, src0, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src1 = __lsx_vpackev_b(src9, src8);
>> + src3 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp3, src1, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src10 = HORIZ_8TAP_FILT(src10, src10, mask0, mask1, mask2,
>> mask3, filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src2 = __lsx_vpackev_b(src10, src9);
>> + src4 = FILT_8TAP_DPADD_S_H(tmp5, tmp6, src0, src2, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, src4, src3, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + src5 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src7 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src8 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src9 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + DUP2_ARG2(__lsx_vilvl_d, src7, src5, src9, src8, src5, src7);
>> + DUP2_ARG2(__lsx_vavgr_bu, out0, src5, out1, src7, out0, out1);
>> + __lsx_vstelm_d(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 1);
>> + dst += dst_stride;
>> +
>> + src6 = src10;
>> + tmp0 = tmp2;
>> + tmp1 = tmp3;
>> + tmp2 = src1;
>> + tmp4 = tmp6;
>> + tmp5 = src0;
>> + tmp6 = src2;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_and_aver_dst_16w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst,
>> + int32_t dst_stride,
>> + const int8_t
>> *filter_horiz,
>> + const int8_t
>> *filter_vert,
>> + int32_t height)
>> +{
>> + int32_t multiple8_cnt;
>> +
>> + for (multiple8_cnt = 2; multiple8_cnt--;) {
>> + common_hv_8ht_8vt_and_aver_dst_8w_lsx(src, src_stride, dst,
>> dst_stride,
>> + filter_horiz,
>> filter_vert,
>> + height);
>> +
>> + src += 8;
>> + dst += 8;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_and_aver_dst_32w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst,
>> + int32_t dst_stride,
>> + const int8_t
>> *filter_horiz,
>> + const int8_t
>> *filter_vert,
>> + int32_t height)
>> +{
>> + int32_t multiple8_cnt;
>> +
>> + for (multiple8_cnt = 4; multiple8_cnt--;) {
>> + common_hv_8ht_8vt_and_aver_dst_8w_lsx(src, src_stride, dst,
>> dst_stride,
>> + filter_horiz,
>> filter_vert,
>> + height);
>> +
>> + src += 8;
>> + dst += 8;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_and_aver_dst_64w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst,
>> + int32_t dst_stride,
>> + const int8_t
>> *filter_horiz,
>> + const int8_t
>> *filter_vert,
>> + int32_t height)
>> +{
>> + int32_t multiple8_cnt;
>> +
>> + for (multiple8_cnt = 8; multiple8_cnt--;) {
>> + common_hv_8ht_8vt_and_aver_dst_8w_lsx(src, src_stride, dst,
>> dst_stride,
>> + filter_horiz,
>> filter_vert,
>> + height);
>> +
>> + src += 8;
>> + dst += 8;
>> + }
>> +}
>> +
>> +static void avg_width8_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, dst0, dst1;
>> + __m128i tmp0, tmp1, tmp2, tmp3;
>> +
>> + for (;cnt--;) {
>> + tmp0 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + tmp1 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + tmp2 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + tmp3 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, src0, src1);
>> + tmp0 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp1 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp2 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp3 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, dst0, dst1);
>> + DUP2_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1, dst0, dst1);
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst1, dst, 0, 1);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void avg_width16_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + __m128i src0, src1, src2, src3;
>> + __m128i dst0, dst1, dst2, dst3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + int32_t dst_stride2 = dst_stride << 1;
>> + int32_t dst_stride3 = dst_stride2 + dst_stride;
>> + int32_t dst_stride4 = dst_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src;
>> +
>> + for (;cnt--;) {
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src1, src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> +
>> + dst0 = __lsx_vld(dst, 0);
>> + DUP2_ARG2(__lsx_vldx, dst, dst_stride, dst, dst_stride2,
>> + dst1, dst2);
>> + dst3 = __lsx_vldx(dst, dst_stride3);
>> + DUP4_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1,
>> + src2, dst2, src3, dst3, dst0, dst1, dst2, dst3);
>> + __lsx_vst(dst0, dst, 0);
>> + __lsx_vstx(dst1, dst, dst_stride);
>> + __lsx_vstx(dst2, dst, dst_stride2);
>> + __lsx_vstx(dst3, dst, dst_stride3);
>> + dst += dst_stride4;
>> + }
>> +}
>> +
>> +static void avg_width32_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + uint8_t *src_tmp1 = (uint8_t*)src;
>> + uint8_t *src_tmp2 = src_tmp1 + 16;
>> + uint8_t *dst_tmp1, *dst_tmp2;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + int32_t dst_stride2 = dst_stride << 1;
>> + int32_t dst_stride3 = dst_stride2 + dst_stride;
>> + int32_t dst_stride4 = dst_stride2 << 1;
>> +
>> + dst_tmp1 = dst;
>> + dst_tmp2 = dst + 16;
>> + for (;cnt--;) {
>> + src0 = __lsx_vld(src_tmp1, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp1, src_stride, src_tmp1,
>> src_stride2,
>> + src2, src4);
>> + src6 = __lsx_vldx(src_tmp1, src_stride3);
>> + src_tmp1 += src_stride4;
>> +
>> + src1 = __lsx_vld(src_tmp2, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp2, src_stride, src_tmp2,
>> src_stride2,
>> + src3, src5);
>> + src7 = __lsx_vldx(src_tmp2, src_stride3);
>> + src_tmp2 += src_stride4;
>> +
>> + dst0 = __lsx_vld(dst_tmp1, 0);
>> + DUP2_ARG2(__lsx_vldx, dst_tmp1, dst_stride, dst_tmp1,
>> dst_stride2,
>> + dst2, dst4);
>> + dst6 = __lsx_vldx(dst_tmp1, dst_stride3);
>> + dst1 = __lsx_vld(dst_tmp2, 0);
>> + DUP2_ARG2(__lsx_vldx, dst_tmp2, dst_stride, dst_tmp2,
>> dst_stride2,
>> + dst3, dst5);
>> + dst7 = __lsx_vldx(dst_tmp2, dst_stride3);
>> +
>> + DUP4_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1,
>> + src2, dst2, src3, dst3, dst0, dst1, dst2, dst3);
>> + DUP4_ARG2(__lsx_vavgr_bu, src4, dst4, src5, dst5,
>> + src6, dst6, src7, dst7, dst4, dst5, dst6, dst7);
>> + __lsx_vst(dst0, dst_tmp1, 0);
>> + __lsx_vstx(dst2, dst_tmp1, dst_stride);
>> + __lsx_vstx(dst4, dst_tmp1, dst_stride2);
>> + __lsx_vstx(dst6, dst_tmp1, dst_stride3);
>> + dst_tmp1 += dst_stride4;
>> + __lsx_vst(dst1, dst_tmp2, 0);
>> + __lsx_vstx(dst3, dst_tmp2, dst_stride);
>> + __lsx_vstx(dst5, dst_tmp2, dst_stride2);
>> + __lsx_vstx(dst7, dst_tmp2, dst_stride3);
>> + dst_tmp2 += dst_stride4;
>> + }
>> +}
>> +
>> +static void avg_width64_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i src8, src9, src10, src11, src12, src13, src14, src15;
>> + __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
>> + __m128i dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15;
>> +
>> + for (;cnt--;) {
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src0, src1, src2, src3);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src4, src5, src6, src7);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src8, src9, src10, src11);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src12, src13, src14, src15);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32,
>> dst_tmp, 48,
>> + dst0, dst1, dst2, dst3);
>> + dst_tmp += dst_stride;
>> + DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32,
>> dst_tmp, 48,
>> + dst4, dst5, dst6, dst7);
>> + dst_tmp += dst_stride;
>> + DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32,
>> dst_tmp, 48,
>> + dst8, dst9, dst10, dst11);
>> + dst_tmp += dst_stride;
>> + DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32,
>> dst_tmp, 48,
>> + dst12, dst13, dst14, dst15);
>> + dst_tmp += dst_stride;
>> + DUP4_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1,
>> + src2, dst2, src3, dst3, dst0, dst1, dst2, dst3);
>> + DUP4_ARG2(__lsx_vavgr_bu, src4, dst4, src5, dst5,
>> + src6, dst6, src7, dst7, dst4, dst5, dst6, dst7);
>> + DUP4_ARG2(__lsx_vavgr_bu, src8, dst8, src9, dst9, src10,
>> + dst10, src11, dst11, dst8, dst9, dst10, dst11);
>> + DUP4_ARG2(__lsx_vavgr_bu, src12, dst12, src13, dst13, src14,
>> + dst14, src15, dst15, dst12, dst13, dst14, dst15);
>> + __lsx_vst(dst0, dst, 0);
>> + __lsx_vst(dst1, dst, 16);
>> + __lsx_vst(dst2, dst, 32);
>> + __lsx_vst(dst3, dst, 48);
>> + dst += dst_stride;
>> + __lsx_vst(dst4, dst, 0);
>> + __lsx_vst(dst5, dst, 16);
>> + __lsx_vst(dst6, dst, 32);
>> + __lsx_vst(dst7, dst, 48);
>> + dst += dst_stride;
>> + __lsx_vst(dst8, dst, 0);
>> + __lsx_vst(dst9, dst, 16);
>> + __lsx_vst(dst10, dst, 32);
>> + __lsx_vst(dst11, dst, 48);
>> + dst += dst_stride;
>> + __lsx_vst(dst12, dst, 0);
>> + __lsx_vst(dst13, dst, 16);
>> + __lsx_vst(dst14, dst, 32);
>> + __lsx_vst(dst15, dst, 48);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static const int8_t vp9_subpel_filters_lsx[3][15][8] = {
>> + [FILTER_8TAP_REGULAR] = {
>> + {0, 1, -5, 126, 8, -3, 1, 0},
>> + {-1, 3, -10, 122, 18, -6, 2, 0},
>> + {-1, 4, -13, 118, 27, -9, 3, -1},
>> + {-1, 4, -16, 112, 37, -11, 4, -1},
>> + {-1, 5, -18, 105, 48, -14, 4, -1},
>> + {-1, 5, -19, 97, 58, -16, 5, -1},
>> + {-1, 6, -19, 88, 68, -18, 5, -1},
>> + {-1, 6, -19, 78, 78, -19, 6, -1},
>> + {-1, 5, -18, 68, 88, -19, 6, -1},
>> + {-1, 5, -16, 58, 97, -19, 5, -1},
>> + {-1, 4, -14, 48, 105, -18, 5, -1},
>> + {-1, 4, -11, 37, 112, -16, 4, -1},
>> + {-1, 3, -9, 27, 118, -13, 4, -1},
>> + {0, 2, -6, 18, 122, -10, 3, -1},
>> + {0, 1, -3, 8, 126, -5, 1, 0},
>> + }, [FILTER_8TAP_SHARP] = {
>> + {-1, 3, -7, 127, 8, -3, 1, 0},
>> + {-2, 5, -13, 125, 17, -6, 3, -1},
>> + {-3, 7, -17, 121, 27, -10, 5, -2},
>> + {-4, 9, -20, 115, 37, -13, 6, -2},
>> + {-4, 10, -23, 108, 48, -16, 8, -3},
>> + {-4, 10, -24, 100, 59, -19, 9, -3},
>> + {-4, 11, -24, 90, 70, -21, 10, -4},
>> + {-4, 11, -23, 80, 80, -23, 11, -4},
>> + {-4, 10, -21, 70, 90, -24, 11, -4},
>> + {-3, 9, -19, 59, 100, -24, 10, -4},
>> + {-3, 8, -16, 48, 108, -23, 10, -4},
>> + {-2, 6, -13, 37, 115, -20, 9, -4},
>> + {-2, 5, -10, 27, 121, -17, 7, -3},
>> + {-1, 3, -6, 17, 125, -13, 5, -2},
>> + {0, 1, -3, 8, 127, -7, 3, -1},
>> + }, [FILTER_8TAP_SMOOTH] = {
>> + {-3, -1, 32, 64, 38, 1, -3, 0},
>> + {-2, -2, 29, 63, 41, 2, -3, 0},
>> + {-2, -2, 26, 63, 43, 4, -4, 0},
>> + {-2, -3, 24, 62, 46, 5, -4, 0},
>> + {-2, -3, 21, 60, 49, 7, -4, 0},
>> + {-1, -4, 18, 59, 51, 9, -4, 0},
>> + {-1, -4, 16, 57, 53, 12, -4, -1},
>> + {-1, -4, 14, 55, 55, 14, -4, -1},
>> + {-1, -4, 12, 53, 57, 16, -4, -1},
>> + {0, -4, 9, 51, 59, 18, -4, -1},
>> + {0, -4, 7, 49, 60, 21, -3, -2},
>> + {0, -4, 5, 46, 62, 24, -3, -2},
>> + {0, -4, 4, 43, 63, 26, -2, -2},
>> + {0, -3, 2, 41, 63, 29, -2, -2},
>> + {0, -3, 1, 38, 64, 32, -1, -3},
>> + }
>> +};
>> +
>> +#define VP9_8TAP_LOONGARCH_LSX_FUNC(SIZE, type, type_idx)
>> \
>> +void ff_put_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my)
>> \
>> +{
>> \
>> + const int8_t *filter = vp9_subpel_filters_lsx[type_idx][mx-1];
>> \
>> +
>> \
>> + common_hz_8t_##SIZE##w_lsx(src, srcstride, dst, dststride, filter,
>> h); \
>> +}
>> \
>> +
>> \
>> +void ff_put_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my)
>> \
>> +{
>> \
>> + const int8_t *filter = vp9_subpel_filters_lsx[type_idx][my-1];
>> \
>> +
>> \
>> + common_vt_8t_##SIZE##w_lsx(src, srcstride, dst, dststride, filter,
>> h); \
>> +}
>> \
>> +
>> \
>> +void ff_put_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my)
>> \
>> +{
>> \
>> + const int8_t *hfilter = vp9_subpel_filters_lsx[type_idx][mx-1];
>> \
>> + const int8_t *vfilter = vp9_subpel_filters_lsx[type_idx][my-1];
>> \
>> +
>> \
>> + common_hv_8ht_8vt_##SIZE##w_lsx(src, srcstride, dst, dststride,
>> hfilter, \
>> + vfilter, h);
>> \
>> +}
>> \
>> +
>> \
>> +void ff_avg_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my)
>> \
>> +{
>> \
>> + const int8_t *filter = vp9_subpel_filters_lsx[type_idx][mx-1];
>> \
>> +
>> \
>> + common_hz_8t_and_aver_dst_##SIZE##w_lsx(src, srcstride, dst,
>> \
>> + dststride, filter, h);
>> \
>> +}
>> \
>> +
>> \
>> +void ff_avg_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my)
>> \
>> +{
>> \
>> + const int8_t *filter = vp9_subpel_filters_lsx[type_idx][my-1];
>> \
>> +
>> \
>> + common_vt_8t_and_aver_dst_##SIZE##w_lsx(src, srcstride, dst,
>> dststride, \
>> + filter, h);
>> \
>> +}
>> \
>> +
>> \
>> +void ff_avg_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my)
>> \
>> +{
>> \
>> + const int8_t *hfilter = vp9_subpel_filters_lsx[type_idx][mx-1];
>> \
>> + const int8_t *vfilter = vp9_subpel_filters_lsx[type_idx][my-1];
>> \
>> +
>> \
>> + common_hv_8ht_8vt_and_aver_dst_##SIZE##w_lsx(src, srcstride, dst,
>> \
>> + dststride, hfilter,
>> \
>> + vfilter, h);
>> \
>> +}
>> +
>> +#define VP9_COPY_LOONGARCH_LSX_FUNC(SIZE) \
>> +void ff_copy##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
>> + const uint8_t *src, ptrdiff_t srcstride, \
>> + int h, int mx, int my) \
>> +{ \
>> + \
>> + copy_width##SIZE##_lsx(src, srcstride, dst, dststride, h); \
>> +} \
>> +void ff_avg##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
>> + const uint8_t *src, ptrdiff_t srcstride, \
>> + int h, int mx, int my) \
>> +{ \
>> + \
>> + avg_width##SIZE##_lsx(src, srcstride, dst, dststride, h); \
>> +}
>> +
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, regular, FILTER_8TAP_REGULAR);
>> +
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, sharp, FILTER_8TAP_SHARP);
>> +
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, smooth, FILTER_8TAP_SMOOTH);
>> +
>> +VP9_COPY_LOONGARCH_LSX_FUNC(64);
>> +VP9_COPY_LOONGARCH_LSX_FUNC(32);
>> +VP9_COPY_LOONGARCH_LSX_FUNC(16);
>> +VP9_COPY_LOONGARCH_LSX_FUNC(8);
>> +
>> +#undef VP9_8TAP_LOONGARCH_LSX_FUNC
>> +#undef VP9_COPY_LOONGARCH_LSX_FUNC
>> diff --git a/libavcodec/loongarch/vp9dsp_init_loongarch.c
>> b/libavcodec/loongarch/vp9dsp_init_loongarch.c
>> new file mode 100644
>> index 0000000000..c1e01b4558
>> --- /dev/null
>> +++ b/libavcodec/loongarch/vp9dsp_init_loongarch.c
>> @@ -0,0 +1,97 @@
>> +/*
>> + * Copyright (c) 2021 Loongson Technology Corporation Limited
>> + * Contributed by Hao Chen <chenhao@loongson.cn>
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with FFmpeg; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
>> 02110-1301 USA
>> + */
>> +
>> +#include "libavutil/loongarch/cpu.h"
>> +#include "libavutil/attributes.h"
>> +#include "libavcodec/vp9dsp.h"
>> +#include "vp9dsp_loongarch.h"
>> +
>> +#define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type) \
>> + dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = \
>> + ff_##type##_8tap_smooth_##sz##dir##_lsx; \
>> + dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = \
>> + ff_##type##_8tap_regular_##sz##dir##_lsx; \
>> + dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = \
>> + ff_##type##_8tap_sharp_##sz##dir##_lsx;
>> +
>> +#define init_subpel2(idx, idxh, idxv, dir, type) \
>> + init_subpel1(0, idx, idxh, idxv, 64, dir, type); \
>> + init_subpel1(1, idx, idxh, idxv, 32, dir, type); \
>> + init_subpel1(2, idx, idxh, idxv, 16, dir, type); \
>> + init_subpel1(3, idx, idxh, idxv, 8, dir, type); \
>> + init_subpel1(4, idx, idxh, idxv, 4, dir, type);
>> +
>> +#define init_subpel3(idx, type) \
>> + init_subpel2(idx, 1, 0, h, type); \
>> + init_subpel2(idx, 0, 1, v, type); \
>> + init_subpel2(idx, 1, 1, hv, type);
>> +
>> +#define init_fpel(idx1, idx2, sz, type)
>> \
>> + dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] =
>> ff_##type##sz##_lsx; \
>> + dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] =
>> ff_##type##sz##_lsx; \
>> + dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] =
>> ff_##type##sz##_lsx; \
>> + dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] =
>> ff_##type##sz##_lsx;
>> +
>> +#define init_copy(idx, sz) \
>> + init_fpel(idx, 0, sz, copy); \
>> + init_fpel(idx, 1, sz, avg);
>> +
>> +#define init_intra_pred1_lsx(tx, sz) \
>> + dsp->intra_pred[tx][VERT_PRED] = ff_vert_##sz##_lsx; \
>> + dsp->intra_pred[tx][HOR_PRED] = ff_hor_##sz##_lsx; \
>> + dsp->intra_pred[tx][DC_PRED] = ff_dc_##sz##_lsx; \
>> + dsp->intra_pred[tx][LEFT_DC_PRED] = ff_dc_left_##sz##_lsx; \
>> + dsp->intra_pred[tx][TOP_DC_PRED] = ff_dc_top_##sz##_lsx; \
>> + dsp->intra_pred[tx][DC_128_PRED] = ff_dc_128_##sz##_lsx; \
>> + dsp->intra_pred[tx][DC_127_PRED] = ff_dc_127_##sz##_lsx; \
>> + dsp->intra_pred[tx][DC_129_PRED] = ff_dc_129_##sz##_lsx; \
>> + dsp->intra_pred[tx][TM_VP8_PRED] = ff_tm_##sz##_lsx; \
>> +
>> +#define init_intra_pred2_lsx(tx, sz) \
>> + dsp->intra_pred[tx][DC_PRED] = ff_dc_##sz##_lsx; \
>> + dsp->intra_pred[tx][LEFT_DC_PRED] = ff_dc_left_##sz##_lsx; \
>> + dsp->intra_pred[tx][TOP_DC_PRED] = ff_dc_top_##sz##_lsx; \
>> + dsp->intra_pred[tx][TM_VP8_PRED] = ff_tm_##sz##_lsx; \
>> +
>> +av_cold void ff_vp9dsp_init_loongarch(VP9DSPContext *dsp, int bpp)
>> +{
>> + int cpu_flags = av_get_cpu_flags();
>> + if (have_lsx(cpu_flags))
>> + if (bpp == 8) {
>> + init_subpel3(0, put);
>> + init_subpel3(1, avg);
>> + init_copy(0, 64);
>> + init_copy(1, 32);
>> + init_copy(2, 16);
>> + init_copy(3, 8);
>> + init_intra_pred1_lsx(TX_16X16, 16x16);
>> + init_intra_pred1_lsx(TX_32X32, 32x32);
>> + init_intra_pred2_lsx(TX_4X4, 4x4);
>> + init_intra_pred2_lsx(TX_8X8, 8x8);
>> + }
>> +}
>> +#undef init_subpel1
>> +#undef init_subpel2
>> +#undef init_subpel3
>> +#undef init_copy
>> +#undef init_fpel
>> +#undef init_intra_pred1_lsx
>> +#undef init_intra_pred2_lsx
>> diff --git a/libavcodec/loongarch/vp9dsp_loongarch.h
>> b/libavcodec/loongarch/vp9dsp_loongarch.h
>> new file mode 100644
>> index 0000000000..b469326fdc
>> --- /dev/null
>> +++ b/libavcodec/loongarch/vp9dsp_loongarch.h
>> @@ -0,0 +1,144 @@
>> +/*
>> + * Copyright (c) 2021 Loongson Technology Corporation Limited
>> + * Contributed by Hao Chen <chenhao@loongson.cn>
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with FFmpeg; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
>> 02110-1301 USA
>> + */
>> +
>> +#ifndef AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H
>> +#define AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H
>> +
>> +#define VP9_8TAP_LOONGARCH_LSX_FUNC(SIZE, type, type_idx)
>> \
>> +void ff_put_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my);
>> \
>> +
>> \
>> +void ff_put_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my);
>> \
>> +
>> \
>> +void ff_put_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my);
>> \
>> +
>> \
>> +void ff_avg_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my);
>> \
>> +
>> \
>> +void ff_avg_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my);
>> \
>> +
>> \
>> +void ff_avg_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my);
>> +
>> +#define VP9_COPY_LOONGARCH_LSX_FUNC(SIZE) \
>> +void ff_copy##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
>> + const uint8_t *src, ptrdiff_t srcstride, \
>> + int h, int mx, int my); \
>> + \
>> +void ff_avg##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
>> + const uint8_t *src, ptrdiff_t srcstride, \
>> + int h, int mx, int my);
>> +
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, regular, FILTER_8TAP_REGULAR);
>> +
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, sharp, FILTER_8TAP_SHARP);
>> +
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, smooth, FILTER_8TAP_SMOOTH);
>> +
>> +VP9_COPY_LOONGARCH_LSX_FUNC(64);
>> +VP9_COPY_LOONGARCH_LSX_FUNC(32);
>> +VP9_COPY_LOONGARCH_LSX_FUNC(16);
>> +VP9_COPY_LOONGARCH_LSX_FUNC(8);
>> +
>> +#undef VP9_8TAP_LOONGARCH_LSX_FUNC
>> +#undef VP9_COPY_LOONGARCH_LSX_FUNC
>> +
>> +void ff_vert_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_vert_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_hor_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_hor_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_dc_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
>> + const uint8_t *top);
>> +void ff_dc_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
>> + const uint8_t *top);
>> +void ff_dc_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_dc_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_dc_left_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_dc_left_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_dc_left_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_left_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_top_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_dc_top_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_dc_top_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_top_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_128_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_128_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_127_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_127_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_129_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_129_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_tm_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
>> + const uint8_t *top);
>> +void ff_tm_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
>> + const uint8_t *top);
>> +void ff_tm_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_tm_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +
>> +#endif /* AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H */
>> diff --git a/libavcodec/vp9dsp.c b/libavcodec/vp9dsp.c
>> index 41b8ad1ad1..82bfe394d1 100644
>> --- a/libavcodec/vp9dsp.c
>> +++ b/libavcodec/vp9dsp.c
>> @@ -98,4 +98,5 @@ av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int
>> bpp, int bitexact)
>> if (ARCH_ARM) ff_vp9dsp_init_arm(dsp, bpp);
>> if (ARCH_X86) ff_vp9dsp_init_x86(dsp, bpp, bitexact);
>> if (ARCH_MIPS) ff_vp9dsp_init_mips(dsp, bpp);
>> + if (ARCH_LOONGARCH) ff_vp9dsp_init_loongarch(dsp, bpp);
>> }
>> diff --git a/libavcodec/vp9dsp.h b/libavcodec/vp9dsp.h
>> index e2256316a8..700dd72de8 100644
>> --- a/libavcodec/vp9dsp.h
>> +++ b/libavcodec/vp9dsp.h
>> @@ -132,5 +132,6 @@ void ff_vp9dsp_init_aarch64(VP9DSPContext *dsp, int
>> bpp);
>> void ff_vp9dsp_init_arm(VP9DSPContext *dsp, int bpp);
>> void ff_vp9dsp_init_x86(VP9DSPContext *dsp, int bpp, int bitexact);
>> void ff_vp9dsp_init_mips(VP9DSPContext *dsp, int bpp);
>> +void ff_vp9dsp_init_loongarch(VP9DSPContext *dsp, int bpp);
>>
>> #endif /* AVCODEC_VP9DSP_H */
>> --
>> 2.20.1
>>
>> _______________________________________________
>> ffmpeg-devel mailing list
>> ffmpeg-devel@ffmpeg.org
>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>
>> To unsubscribe, visit link above, or email
>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [FFmpeg-devel] [PATCH 2/4] avcodec: [loongarch] Optimize vp9_mc/intra with LSX.
2021-12-18 18:47 ` Jean-Baptiste Kempf
2021-12-20 6:07 ` Hao Chen
@ 2021-12-20 6:20 ` Shiyou Yin
1 sibling, 0 replies; 10+ messages in thread
From: Shiyou Yin @ 2021-12-20 6:20 UTC (permalink / raw)
To: jb; +Cc: Hao Chen, FFmpeg development discussions and patches
We are glad to see people pay attention to loongarch, any questions or contributions will be greatly appreciated.
About the ASM version, we do have plan on H264, but it will take some time.
Loongson has loongarch hardware donation plan, and cloud host is available directly.
Please feel free to contact me if you want to have a try.
> 2021年12月19日 上午2:47,Jean-Baptiste Kempf <jb@videolan.org> 写道:
>
> Sorry to ask, but don't you have an ASM format, instead of intrinsics?
>
> Best,
>
> On Sat, 18 Dec 2021, at 15:27, Hao Chen wrote:
>> ffmpeg -i ../10_vp9_1080p_30fps_3Mbps.webm -f rawvideo -y /dev/null -an
>> before:170fps
>> after :294fps
>> ---
>> libavcodec/loongarch/Makefile | 3 +
>> libavcodec/loongarch/vp9_intra_lsx.c | 653 +++++
>> libavcodec/loongarch/vp9_mc_lsx.c | 2480 ++++++++++++++++++
>> libavcodec/loongarch/vp9dsp_init_loongarch.c | 97 +
>> libavcodec/loongarch/vp9dsp_loongarch.h | 144 +
>> libavcodec/vp9dsp.c | 1 +
>> libavcodec/vp9dsp.h | 1 +
>> 7 files changed, 3379 insertions(+)
>> create mode 100644 libavcodec/loongarch/vp9_intra_lsx.c
>> create mode 100644 libavcodec/loongarch/vp9_mc_lsx.c
>> create mode 100644 libavcodec/loongarch/vp9dsp_init_loongarch.c
>> create mode 100644 libavcodec/loongarch/vp9dsp_loongarch.h
>>
>> diff --git a/libavcodec/loongarch/Makefile
>> b/libavcodec/loongarch/Makefile
>> index 4e1d827e19..6fcebe40a3 100644
>> --- a/libavcodec/loongarch/Makefile
>> +++ b/libavcodec/loongarch/Makefile
>> @@ -3,6 +3,7 @@ OBJS-$(CONFIG_H264QPEL) +=
>> loongarch/h264qpel_init_loongarch.o
>> OBJS-$(CONFIG_H264DSP) +=
>> loongarch/h264dsp_init_loongarch.o
>> OBJS-$(CONFIG_H264PRED) +=
>> loongarch/h264_intrapred_init_loongarch.o
>> OBJS-$(CONFIG_VP8_DECODER) +=
>> loongarch/vp8dsp_init_loongarch.o
>> +OBJS-$(CONFIG_VP9_DECODER) +=
>> loongarch/vp9dsp_init_loongarch.o
>> LASX-OBJS-$(CONFIG_H264CHROMA) += loongarch/h264chroma_lasx.o
>> LASX-OBJS-$(CONFIG_H264QPEL) += loongarch/h264qpel_lasx.o
>> LASX-OBJS-$(CONFIG_H264DSP) += loongarch/h264dsp_lasx.o \
>> @@ -11,3 +12,5 @@ LASX-OBJS-$(CONFIG_H264DSP) +=
>> loongarch/h264dsp_lasx.o \
>> LASX-OBJS-$(CONFIG_H264PRED) +=
>> loongarch/h264_intrapred_lasx.o
>> LSX-OBJS-$(CONFIG_VP8_DECODER) += loongarch/vp8_mc_lsx.o \
>> loongarch/vp8_lpf_lsx.o
>> +LSX-OBJS-$(CONFIG_VP9_DECODER) += loongarch/vp9_mc_lsx.o \
>> + loongarch/vp9_intra_lsx.o
>> diff --git a/libavcodec/loongarch/vp9_intra_lsx.c
>> b/libavcodec/loongarch/vp9_intra_lsx.c
>> new file mode 100644
>> index 0000000000..d3f32646f3
>> --- /dev/null
>> +++ b/libavcodec/loongarch/vp9_intra_lsx.c
>> @@ -0,0 +1,653 @@
>> +/*
>> + * Copyright (c) 2021 Loongson Technology Corporation Limited
>> + * Contributed by Hao Chen <chenhao@loongson.cn>
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with FFmpeg; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
>> 02110-1301 USA
>> + */
>> +
>> +#include "libavcodec/vp9dsp.h"
>> +#include "libavutil/loongarch/loongson_intrinsics.h"
>> +#include "vp9dsp_loongarch.h"
>> +
>> +#define LSX_ST_8(_dst0, _dst1, _dst2, _dst3, _dst4, \
>> + _dst5, _dst6, _dst7, _dst, _stride, \
>> + _stride2, _stride3, _stride4) \
>> +{ \
>> + __lsx_vst(_dst0, _dst, 0); \
>> + __lsx_vstx(_dst1, _dst, _stride); \
>> + __lsx_vstx(_dst2, _dst, _stride2); \
>> + __lsx_vstx(_dst3, _dst, _stride3); \
>> + _dst += _stride4; \
>> + __lsx_vst(_dst4, _dst, 0); \
>> + __lsx_vstx(_dst5, _dst, _stride); \
>> + __lsx_vstx(_dst6, _dst, _stride2); \
>> + __lsx_vstx(_dst7, _dst, _stride3); \
>> +}
>> +
>> +#define LSX_ST_8X16(_dst0, _dst1, _dst2, _dst3, _dst4, \
>> + _dst5, _dst6, _dst7, _dst, _stride) \
>> +{ \
>> + __lsx_vst(_dst0, _dst, 0); \
>> + __lsx_vst(_dst0, _dst, 16); \
>> + _dst += _stride; \
>> + __lsx_vst(_dst1, _dst, 0); \
>> + __lsx_vst(_dst1, _dst, 16); \
>> + _dst += _stride; \
>> + __lsx_vst(_dst2, _dst, 0); \
>> + __lsx_vst(_dst2, _dst, 16); \
>> + _dst += _stride; \
>> + __lsx_vst(_dst3, _dst, 0); \
>> + __lsx_vst(_dst3, _dst, 16); \
>> + _dst += _stride; \
>> + __lsx_vst(_dst4, _dst, 0); \
>> + __lsx_vst(_dst4, _dst, 16); \
>> + _dst += _stride; \
>> + __lsx_vst(_dst5, _dst, 0); \
>> + __lsx_vst(_dst5, _dst, 16); \
>> + _dst += _stride; \
>> + __lsx_vst(_dst6, _dst, 0); \
>> + __lsx_vst(_dst6, _dst, 16); \
>> + _dst += _stride; \
>> + __lsx_vst(_dst7, _dst, 0); \
>> + __lsx_vst(_dst7, _dst, 16); \
>> + _dst += _stride; \
>> +}
>> +
>> +void ff_vert_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, const
>> uint8_t *left,
>> + const uint8_t *src)
>> +{
>> + __m128i src0;
>> + ptrdiff_t stride2 = dst_stride << 1;
>> + ptrdiff_t stride3 = stride2 + dst_stride;
>> + ptrdiff_t stride4 = stride2 << 1;
>> + src0 = __lsx_vld(src, 0);
>> + LSX_ST_8(src0, src0, src0, src0, src0, src0, src0, src0, dst,
>> + dst_stride, stride2, stride3, stride4);
>> + dst += stride4;
>> + LSX_ST_8(src0, src0, src0, src0, src0, src0, src0, src0, dst,
>> + dst_stride, stride2, stride3, stride4);
>> +}
>> +
>> +void ff_vert_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, const
>> uint8_t *left,
>> + const uint8_t *src)
>> +{
>> + uint32_t row;
>> + __m128i src0, src1;
>> +
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src1);
>> + for (row = 32; row--;) {
>> + __lsx_vst(src0, dst, 0);
>> + __lsx_vst(src1, dst, 16);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +void ff_hor_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride, const
>> uint8_t *src,
>> + const uint8_t *top)
>> +{
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i src8, src9, src10, src11, src12, src13, src14, src15;
>> + ptrdiff_t stride2 = dst_stride << 1;
>> + ptrdiff_t stride3 = stride2 + dst_stride;
>> + ptrdiff_t stride4 = stride2 << 1;
>> +
>> + src15 = __lsx_vldrepl_b(src, 0);
>> + src14 = __lsx_vldrepl_b(src, 1);
>> + src13 = __lsx_vldrepl_b(src, 2);
>> + src12 = __lsx_vldrepl_b(src, 3);
>> + src11 = __lsx_vldrepl_b(src, 4);
>> + src10 = __lsx_vldrepl_b(src, 5);
>> + src9 = __lsx_vldrepl_b(src, 6);
>> + src8 = __lsx_vldrepl_b(src, 7);
>> + src7 = __lsx_vldrepl_b(src, 8);
>> + src6 = __lsx_vldrepl_b(src, 9);
>> + src5 = __lsx_vldrepl_b(src, 10);
>> + src4 = __lsx_vldrepl_b(src, 11);
>> + src3 = __lsx_vldrepl_b(src, 12);
>> + src2 = __lsx_vldrepl_b(src, 13);
>> + src1 = __lsx_vldrepl_b(src, 14);
>> + src0 = __lsx_vldrepl_b(src, 15);
>> + LSX_ST_8(src0, src1, src2, src3, src4, src5, src6, src7, dst,
>> + dst_stride, stride2, stride3, stride4);
>> + dst += stride4;
>> + LSX_ST_8(src8, src9, src10, src11, src12, src13, src14, src15, dst,
>> + dst_stride, stride2, stride3, stride4);
>> +}
>> +
>> +void ff_hor_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride, const
>> uint8_t *src,
>> + const uint8_t *top)
>> +{
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i src8, src9, src10, src11, src12, src13, src14, src15;
>> + __m128i src16, src17, src18, src19, src20, src21, src22, src23;
>> + __m128i src24, src25, src26, src27, src28, src29, src30, src31;
>> +
>> + src31 = __lsx_vldrepl_b(src, 0);
>> + src30 = __lsx_vldrepl_b(src, 1);
>> + src29 = __lsx_vldrepl_b(src, 2);
>> + src28 = __lsx_vldrepl_b(src, 3);
>> + src27 = __lsx_vldrepl_b(src, 4);
>> + src26 = __lsx_vldrepl_b(src, 5);
>> + src25 = __lsx_vldrepl_b(src, 6);
>> + src24 = __lsx_vldrepl_b(src, 7);
>> + src23 = __lsx_vldrepl_b(src, 8);
>> + src22 = __lsx_vldrepl_b(src, 9);
>> + src21 = __lsx_vldrepl_b(src, 10);
>> + src20 = __lsx_vldrepl_b(src, 11);
>> + src19 = __lsx_vldrepl_b(src, 12);
>> + src18 = __lsx_vldrepl_b(src, 13);
>> + src17 = __lsx_vldrepl_b(src, 14);
>> + src16 = __lsx_vldrepl_b(src, 15);
>> + src15 = __lsx_vldrepl_b(src, 16);
>> + src14 = __lsx_vldrepl_b(src, 17);
>> + src13 = __lsx_vldrepl_b(src, 18);
>> + src12 = __lsx_vldrepl_b(src, 19);
>> + src11 = __lsx_vldrepl_b(src, 20);
>> + src10 = __lsx_vldrepl_b(src, 21);
>> + src9 = __lsx_vldrepl_b(src, 22);
>> + src8 = __lsx_vldrepl_b(src, 23);
>> + src7 = __lsx_vldrepl_b(src, 24);
>> + src6 = __lsx_vldrepl_b(src, 25);
>> + src5 = __lsx_vldrepl_b(src, 26);
>> + src4 = __lsx_vldrepl_b(src, 27);
>> + src3 = __lsx_vldrepl_b(src, 28);
>> + src2 = __lsx_vldrepl_b(src, 29);
>> + src1 = __lsx_vldrepl_b(src, 30);
>> + src0 = __lsx_vldrepl_b(src, 31);
>> + LSX_ST_8X16(src0, src1, src2, src3, src4, src5, src6, src7,
>> + dst, dst_stride);
>> + LSX_ST_8X16(src8, src9, src10, src11, src12, src13, src14, src15,
>> + dst, dst_stride);
>> + LSX_ST_8X16(src16, src17, src18, src19, src20, src21, src22, src23,
>> + dst, dst_stride);
>> + LSX_ST_8X16(src24, src25, src26, src27, src28, src29, src30, src31,
>> + dst, dst_stride);
>> +}
>> +
>> +void ff_dc_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t
>> *src_left,
>> + const uint8_t *src_top)
>> +{
>> + __m128i tmp0, tmp1, dst0;
>> +
>> + tmp0 = __lsx_vldrepl_w(src_top, 0);
>> + tmp1 = __lsx_vldrepl_w(src_left, 0);
>> + dst0 = __lsx_vilvl_w(tmp1, tmp0);
>> + dst0 = __lsx_vhaddw_hu_bu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
>> + dst0 = __lsx_vsrari_w(dst0, 3);
>> + dst0 = __lsx_vshuf4i_b(dst0, 0);
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> +}
>> +
>> +#define INTRA_DC_TL_4X4(dir)
>> \
>> +void ff_dc_##dir##_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> \
>> + const uint8_t *left,
>> \
>> + const uint8_t *top)
>> \
>> +{
>> \
>> + __m128i tmp0, dst0;
>> \
>> +
>> \
>> + tmp0 = __lsx_vldrepl_w(dir, 0);
>> \
>> + dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0);
>> \
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> \
>> + dst0 = __lsx_vsrari_w(dst0, 2);
>> \
>> + dst0 = __lsx_vshuf4i_b(dst0, 0);
>> \
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> \
>> +}
>> +INTRA_DC_TL_4X4(top);
>> +INTRA_DC_TL_4X4(left);
>> +
>> +void ff_dc_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride, const uint8_t
>> *src_left,
>> + const uint8_t *src_top)
>> +{
>> + __m128i tmp0, tmp1, dst0;
>> +
>> + tmp0 = __lsx_vldrepl_d(src_top, 0);
>> + tmp1 = __lsx_vldrepl_d(src_left, 0);
>> + dst0 = __lsx_vilvl_d(tmp1, tmp0);
>> + dst0 = __lsx_vhaddw_hu_bu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
>> + dst0 = __lsx_vsrari_w(dst0, 4);
>> + dst0 = __lsx_vreplvei_b(dst0, 0);
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> +}
>> +
>> +#define INTRA_DC_TL_8X8(dir)
>> \
>> +void ff_dc_##dir##_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> \
>> + const uint8_t *left,
>> \
>> + const uint8_t *top)
>> \
>> +{
>> \
>> + __m128i tmp0, dst0;
>> \
>> +
>> \
>> + tmp0 = __lsx_vldrepl_d(dir, 0);
>> \
>> + dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0);
>> \
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> \
>> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
>> \
>> + dst0 = __lsx_vsrari_w(dst0, 3);
>> \
>> + dst0 = __lsx_vreplvei_b(dst0, 0);
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> + dst += dst_stride;
>> \
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> \
>> +}
>> +
>> +INTRA_DC_TL_8X8(top);
>> +INTRA_DC_TL_8X8(left);
>> +
>> +void ff_dc_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> + const uint8_t *src_left, const uint8_t *src_top)
>> +{
>> + __m128i tmp0, tmp1, dst0;
>> + ptrdiff_t stride2 = dst_stride << 1;
>> + ptrdiff_t stride3 = stride2 + dst_stride;
>> + ptrdiff_t stride4 = stride2 << 1;
>> +
>> + tmp0 = __lsx_vld(src_top, 0);
>> + tmp1 = __lsx_vld(src_left, 0);
>> + DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1);
>> + dst0 = __lsx_vadd_h(tmp0, tmp1);
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
>> + dst0 = __lsx_vsrari_w(dst0, 5);
>> + dst0 = __lsx_vreplvei_b(dst0, 0);
>> + LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
>> + dst_stride, stride2, stride3, stride4);
>> + dst += stride4;
>> + LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
>> + dst_stride, stride2, stride3, stride4);
>> +}
>> +
>> +#define INTRA_DC_TL_16X16(dir)
>> \
>> +void ff_dc_##dir##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> \
>> + const uint8_t *left,
>> \
>> + const uint8_t *top)
>> \
>> +{
>> \
>> + __m128i tmp0, dst0;
>> \
>> + ptrdiff_t stride2 = dst_stride << 1;
>> \
>> + ptrdiff_t stride3 = stride2 + dst_stride;
>> \
>> + ptrdiff_t stride4 = stride2 << 1;
>> \
>> +
>> \
>> + tmp0 = __lsx_vld(dir, 0);
>> \
>> + dst0 = __lsx_vhaddw_hu_bu(tmp0, tmp0);
>> \
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> \
>> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
>> \
>> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
>> \
>> + dst0 = __lsx_vsrari_w(dst0, 4);
>> \
>> + dst0 = __lsx_vreplvei_b(dst0, 0);
>> \
>> + LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
>> \
>> + dst_stride, stride2, stride3, stride4);
>> \
>> + dst += stride4;
>> \
>> + LSX_ST_8(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst,
>> \
>> + dst_stride, stride2, stride3, stride4);
>> \
>> +}
>> +
>> +INTRA_DC_TL_16X16(top);
>> +INTRA_DC_TL_16X16(left);
>> +
>> +void ff_dc_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> + const uint8_t *src_left, const uint8_t *src_top)
>> +{
>> + __m128i tmp0, tmp1, tmp2, tmp3, dst0;
>> +
>> + DUP2_ARG2(__lsx_vld, src_top, 0, src_top, 16, tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vld, src_left, 0, src_left, 16, tmp2, tmp3);
>> + DUP4_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp2, tmp2,
>> + tmp3, tmp3, tmp0, tmp1, tmp2, tmp3);
>> + DUP2_ARG2(__lsx_vadd_h, tmp0, tmp1, tmp2, tmp3, tmp0, tmp1);
>> + dst0 = __lsx_vadd_h(tmp0, tmp1);
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
>> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
>> + dst0 = __lsx_vsrari_w(dst0, 6);
>> + dst0 = __lsx_vreplvei_b(dst0, 0);
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> + dst, dst_stride);
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> + dst, dst_stride);
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> + dst, dst_stride);
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> + dst, dst_stride);
>> +}
>> +
>> +#define INTRA_DC_TL_32X32(dir)
>> \
>> +void ff_dc_##dir##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> \
>> + const uint8_t *left,
>> \
>> + const uint8_t *top)
>> \
>> +{
>> \
>> + __m128i tmp0, tmp1, dst0;
>> \
>> +
>> \
>> + DUP2_ARG2(__lsx_vld, dir, 0, dir, 16, tmp0, tmp1);
>> \
>> + DUP2_ARG2(__lsx_vhaddw_hu_bu, tmp0, tmp0, tmp1, tmp1, tmp0, tmp1);
>> \
>> + dst0 = __lsx_vadd_h(tmp0, tmp1);
>> \
>> + dst0 = __lsx_vhaddw_wu_hu(dst0, dst0);
>> \
>> + dst0 = __lsx_vhaddw_du_wu(dst0, dst0);
>> \
>> + dst0 = __lsx_vhaddw_qu_du(dst0, dst0);
>> \
>> + dst0 = __lsx_vsrari_w(dst0, 5);
>> \
>> + dst0 = __lsx_vreplvei_b(dst0, 0);
>> \
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> \
>> + dst, dst_stride);
>> \
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> \
>> + dst, dst_stride);
>> \
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> \
>> + dst, dst_stride);
>> \
>> + LSX_ST_8X16(dst0, dst0, dst0, dst0, dst0, dst0, dst0, dst0,
>> \
>> + dst, dst_stride);
>> \
>> +}
>> +
>> +INTRA_DC_TL_32X32(top);
>> +INTRA_DC_TL_32X32(left);
>> +
>> +#define INTRA_PREDICT_VALDC_16X16_LSX(val)
>> \
>> +void ff_dc_##val##_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> \
>> + const uint8_t *left, const uint8_t *top)
>> \
>> +{
>> \
>> + __m128i out = __lsx_vldi(val);
>> \
>> + ptrdiff_t stride2 = dst_stride << 1;
>> \
>> + ptrdiff_t stride3 = stride2 + dst_stride;
>> \
>> + ptrdiff_t stride4 = stride2 << 1;
>> \
>> +
>> \
>> + LSX_ST_8(out, out, out, out, out, out, out, out, dst,
>> \
>> + dst_stride, stride2, stride3, stride4);
>> \
>> + dst += stride4;
>> \
>> + LSX_ST_8(out, out, out, out, out, out, out, out, dst,
>> \
>> + dst_stride, stride2, stride3, stride4);
>> \
>> +}
>> +
>> +INTRA_PREDICT_VALDC_16X16_LSX(127);
>> +INTRA_PREDICT_VALDC_16X16_LSX(128);
>> +INTRA_PREDICT_VALDC_16X16_LSX(129);
>> +
>> +#define INTRA_PREDICT_VALDC_32X32_LSX(val)
>> \
>> +void ff_dc_##val##_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> \
>> + const uint8_t *left, const uint8_t *top)
>> \
>> +{
>> \
>> + __m128i out = __lsx_vldi(val);
>> \
>> +
>> \
>> + LSX_ST_8X16(out, out, out, out, out, out, out, out, dst,
>> dst_stride);\
>> + LSX_ST_8X16(out, out, out, out, out, out, out, out, dst,
>> dst_stride);\
>> + LSX_ST_8X16(out, out, out, out, out, out, out, out, dst,
>> dst_stride);\
>> + LSX_ST_8X16(out, out, out, out, out, out, out, out, dst,
>> dst_stride);\
>> +}
>> +
>> +INTRA_PREDICT_VALDC_32X32_LSX(127);
>> +INTRA_PREDICT_VALDC_32X32_LSX(128);
>> +INTRA_PREDICT_VALDC_32X32_LSX(129);
>> +
>> +void ff_tm_4x4_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> + const uint8_t *src_left, const uint8_t *src_top_ptr)
>> +{
>> + uint8_t top_left = src_top_ptr[-1];
>> + __m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1;
>> + __m128i src0, src1, src2, src3;
>> + __m128i dst0, dst1, dst2, dst3;
>> +
>> + reg0 = __lsx_vreplgr2vr_h(top_left);
>> + reg1 = __lsx_vld(src_top_ptr, 0);
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2,
>> src_left,
>> + 3, tmp3, tmp2, tmp1, tmp0);
>> + DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3,
>> reg1,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vhaddw_hu_bu, src0, src0, src1, src1, src2, src2,
>> src3,
>> + src3, dst0, dst1, dst2, dst3);
>> + DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0,
>> dst3, reg0,
>> + dst0, dst1, dst2, dst3);
>> + DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7,
>> + dst0, dst1, dst2, dst3);
>> + DUP2_ARG2(__lsx_vpickev_b, dst1, dst0, dst3, dst2, dst0, dst1);
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst1, dst, 0, 2);
>> +}
>> +
>> +void ff_tm_8x8_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> + const uint8_t *src_left, const uint8_t *src_top_ptr)
>> +{
>> + uint8_t top_left = src_top_ptr[-1];
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i reg0, reg1;
>> +
>> + reg0 = __lsx_vreplgr2vr_h(top_left);
>> + reg1 = __lsx_vld(src_top_ptr, 0);
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2,
>> src_left,
>> + 3, tmp7, tmp6, tmp5, tmp4);
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6,
>> src_left,
>> + 7, tmp3, tmp2, tmp1, tmp0);
>> + DUP4_ARG2(__lsx_vilvl_b, tmp0, reg1, tmp1, reg1, tmp2, reg1, tmp3,
>> reg1,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vilvl_b, tmp4, reg1, tmp5, reg1, tmp6, reg1, tmp7,
>> reg1,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vhaddw_hu_bu, src0, src0, src1, src1, src2, src2,
>> src3,
>> + src3, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vhaddw_hu_bu, src4, src4, src5, src5, src6, src6,
>> src7,
>> + src7, src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
>> src3, reg0,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
>> src7, reg0,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vpickev_b, src1, src0, src3, src2, src5, src4,
>> src7, src6,
>> + src0, src1, src2, src3);
>> + __lsx_vstelm_d(src0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src1, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src2, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src2, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src3, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src3, dst, 0, 1);
>> +}
>> +
>> +void ff_tm_16x16_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> + const uint8_t *src_left, const uint8_t
>> *src_top_ptr)
>> +{
>> + uint8_t top_left = src_top_ptr[-1];
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
>> + __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i reg0, reg1;
>> + ptrdiff_t stride2 = dst_stride << 1;
>> + ptrdiff_t stride3 = stride2 + dst_stride;
>> + ptrdiff_t stride4 = stride2 << 1;
>> +
>> + reg0 = __lsx_vreplgr2vr_h(top_left);
>> + reg1 = __lsx_vld(src_top_ptr, 0);
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left, 2,
>> src_left,
>> + 3, tmp15, tmp14, tmp13, tmp12);
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 4, src_left, 5, src_left, 6,
>> src_left,
>> + 7, tmp11, tmp10, tmp9, tmp8);
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 8, src_left, 9, src_left, 10,
>> + src_left, 11, tmp7, tmp6, tmp5, tmp4);
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 12, src_left, 13, src_left,
>> 14,
>> + src_left, 15, tmp3, tmp2, tmp1, tmp0);
>> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1,
>> tmp3,
>> + reg1, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2, reg1,
>> tmp3,
>> + reg1, src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
>> src3, reg0,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
>> src7, reg0,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
>> src7, src3,
>> + tmp0, tmp1, tmp2, tmp3);
>> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1,
>> tmp7,
>> + reg1, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp4, reg1, tmp5, reg1, tmp6, reg1,
>> tmp7,
>> + reg1, src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
>> src3, reg0,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
>> src7, reg0,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
>> src7, src3,
>> + tmp4, tmp5, tmp6, tmp7);
>> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1,
>> tmp11,
>> + reg1, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp8, reg1, tmp9, reg1, tmp10, reg1,
>> tmp11,
>> + reg1, src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
>> src3, reg0,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
>> src7, reg0,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
>> src7, src3,
>> + tmp8, tmp9, tmp10, tmp11);
>> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp12, reg1, tmp13, reg1, tmp14,
>> reg1,
>> + tmp15, reg1, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp12, reg1, tmp13, reg1, tmp14,
>> reg1,
>> + tmp15, reg1, src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
>> src3, reg0,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
>> src7, reg0,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
>> src7, src3,
>> + tmp12, tmp13, tmp14, tmp15);
>> + LSX_ST_8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, dst,
>> + dst_stride, stride2, stride3, stride4);
>> + dst += stride4;
>> + LSX_ST_8(tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15, dst,
>> + dst_stride, stride2, stride3, stride4);
>> +}
>> +
>> +void ff_tm_32x32_lsx(uint8_t *dst, ptrdiff_t dst_stride,
>> + const uint8_t *src_left, const uint8_t
>> *src_top_ptr)
>> +{
>> + uint8_t top_left = src_top_ptr[-1];
>> + uint32_t loop_cnt;
>> + __m128i tmp0, tmp1, tmp2, tmp3, reg0, reg1, reg2;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
>> +
>> + reg0 = __lsx_vreplgr2vr_h(top_left);
>> + DUP2_ARG2(__lsx_vld, src_top_ptr, 0, src_top_ptr, 16, reg1, reg2);
>> +
>> + src_left += 28;
>> + for (loop_cnt = 8; loop_cnt--;) {
>> + DUP4_ARG2(__lsx_vldrepl_b, src_left, 0, src_left, 1, src_left,
>> 2,
>> + src_left, 3, tmp3, tmp2, tmp1, tmp0);
>> + src_left -= 4;
>> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg1, tmp1, reg1, tmp2,
>> reg1,
>> + tmp3, reg1, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg1, tmp1, reg1, tmp2,
>> reg1,
>> + tmp3, reg1, src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vssub_hu, src0, reg0, src1, reg0, src2, reg0,
>> src3,
>> + reg0, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vssub_hu, src4, reg0, src5, reg0, src6, reg0,
>> src7,
>> + reg0, src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vaddwev_h_bu, tmp0, reg2, tmp1, reg2, tmp2,
>> reg2,
>> + tmp3, reg2, dst0, dst1, dst2, dst3);
>> + DUP4_ARG2(__lsx_vaddwod_h_bu, tmp0, reg2, tmp1, reg2, tmp2,
>> reg2,
>> + tmp3, reg2, dst4, dst5, dst6, dst7);
>> + DUP4_ARG2(__lsx_vssub_hu, dst0, reg0, dst1, reg0, dst2, reg0,
>> dst3,
>> + reg0, dst0, dst1, dst2, dst3);
>> + DUP4_ARG2(__lsx_vssub_hu, dst4, reg0, dst5, reg0, dst6, reg0,
>> dst7,
>> + reg0, dst4, dst5, dst6, dst7);
>> + DUP4_ARG2(__lsx_vsat_hu, src0, 7, src1, 7, src2, 7, src3, 7,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vsat_hu, src4, 7, src5, 7, src6, 7, src7, 7,
>> + src4, src5, src6, src7);
>> + DUP4_ARG2(__lsx_vsat_hu, dst0, 7, dst1, 7, dst2, 7, dst3, 7,
>> + dst0, dst1, dst2, dst3);
>> + DUP4_ARG2(__lsx_vsat_hu, dst4, 7, dst5, 7, dst6, 7, dst7, 7,
>> + dst4, dst5, dst6, dst7);
>> + DUP4_ARG2(__lsx_vpackev_b, src4, src0, src5, src1, src6, src2,
>> src7,
>> + src3, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vpackev_b, dst4, dst0, dst5, dst1, dst6, dst2,
>> dst7,
>> + dst3, dst0, dst1, dst2, dst3);
>> + __lsx_vst(src0, dst, 0);
>> + __lsx_vst(dst0, dst, 16);
>> + dst += dst_stride;
>> + __lsx_vst(src1, dst, 0);
>> + __lsx_vst(dst1, dst, 16);
>> + dst += dst_stride;
>> + __lsx_vst(src2, dst, 0);
>> + __lsx_vst(dst2, dst, 16);
>> + dst += dst_stride;
>> + __lsx_vst(src3, dst, 0);
>> + __lsx_vst(dst3, dst, 16);
>> + dst += dst_stride;
>> + }
>> +}
>> diff --git a/libavcodec/loongarch/vp9_mc_lsx.c
>> b/libavcodec/loongarch/vp9_mc_lsx.c
>> new file mode 100644
>> index 0000000000..c6746fd87f
>> --- /dev/null
>> +++ b/libavcodec/loongarch/vp9_mc_lsx.c
>> @@ -0,0 +1,2480 @@
>> +/*
>> + * Copyright (c) 2021 Loongson Technology Corporation Limited
>> + * Contributed by Hao Chen <chenhao@loongson.cn>
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with FFmpeg; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
>> 02110-1301 USA
>> + */
>> +
>> +#include "libavcodec/vp9dsp.h"
>> +#include "libavutil/loongarch/loongson_intrinsics.h"
>> +#include "vp9dsp_loongarch.h"
>> +
>> +static const uint8_t mc_filt_mask_arr[16 * 3] = {
>> + /* 8 width cases */
>> + 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
>> + /* 4 width cases */
>> + 0, 1, 1, 2, 2, 3, 3, 4, 16, 17, 17, 18, 18, 19, 19, 20,
>> + /* 4 width cases */
>> + 8, 9, 9, 10, 10, 11, 11, 12, 24, 25, 25, 26, 26, 27, 27, 28
>> +};
>> +
>> +
>> +#define HORIZ_8TAP_4WID_4VECS_FILT(_src0, _src1, _src2, _src3,
>> \
>> + _mask0, _mask1, _mask2, _mask3,
>> \
>> + _filter0, _filter1, _filter2,
>> _filter3, \
>> + _out0, _out1)
>> \
>> +{
>> \
>> + __m128i _tmp0, _tmp1, _tmp2, _tmp3, _tmp4, _tmp5, _tmp6, _tmp7;
>> \
>> + __m128i _reg0, _reg1, _reg2, _reg3;
>> \
>> +
>> \
>> + DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask0, _src3, _src2,
>> _mask0, \
>> + _tmp0, _tmp1);
>> \
>> + DUP2_ARG2(__lsx_vdp2_h_b, _tmp0, _filter0, _tmp1, _filter0, _reg0,
>> _reg1); \
>> + DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask1, _src3, _src2,
>> _mask1, \
>> + _tmp2, _tmp3);
>> \
>> + DUP2_ARG3(__lsx_vdp2add_h_b, _reg0, _tmp2, _filter1, _reg1, _tmp3,
>> \
>> + _filter1, _reg0, _reg1);
>> \
>> + DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask2, _src3, _src2,
>> _mask2, \
>> + _tmp4, _tmp5);
>> \
>> + DUP2_ARG2(__lsx_vdp2_h_b, _tmp4, _filter2, _tmp5, _filter2, _reg2,
>> _reg3); \
>> + DUP2_ARG3(__lsx_vshuf_b, _src1, _src0, _mask3, _src3, _src2,
>> _mask3, \
>> + _tmp6, _tmp7);
>> \
>> + DUP2_ARG3(__lsx_vdp2add_h_b, _reg2, _tmp6, _filter3, _reg3, _tmp7,
>> \
>> + _filter3, _reg2, _reg3);
>> \
>> + DUP2_ARG2(__lsx_vsadd_h, _reg0, _reg2, _reg1, _reg3, _out0,
>> _out1); \
>> +}
>> +
>> +#define HORIZ_8TAP_8WID_4VECS_FILT(_src0, _src1, _src2, _src3,
>> \
>> + _mask0, _mask1, _mask2, _mask3,
>> \
>> + _filter0, _filter1, _filter2,
>> _filter3, \
>> + _out0, _out1, _out2, _out3)
>> \
>> +{
>> \
>> + __m128i _tmp0, _tmp1, _tmp2, _tmp3, _tmp4, _tmp5, _tmp6, _tmp7;
>> \
>> + __m128i _reg0, _reg1, _reg2, _reg3, _reg4, _reg5, _reg6, _reg7;
>> \
>> +
>> \
>> + DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask0, _src1, _src1,
>> _mask0, _src2,\
>> + _src2, _mask0, _src3, _src3, _mask0, _tmp0, _tmp1,
>> _tmp2, _tmp3);\
>> + DUP4_ARG2(__lsx_vdp2_h_b, _tmp0, _filter0, _tmp1, _filter0, _tmp2,
>> \
>> + _filter0, _tmp3, _filter0, _reg0, _reg1, _reg2, _reg3);
>> \
>> + DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask2, _src1, _src1,
>> _mask2, _src2,\
>> + _src2, _mask2, _src3, _src3, _mask2, _tmp0, _tmp1,
>> _tmp2, _tmp3);\
>> + DUP4_ARG2(__lsx_vdp2_h_b, _tmp0, _filter2, _tmp1, _filter2, _tmp2,
>> \
>> + _filter2, _tmp3, _filter2, _reg4, _reg5, _reg6, _reg7);
>> \
>> + DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask1, _src1, _src1,
>> _mask1, _src2,\
>> + _src2, _mask1, _src3, _src3, _mask1, _tmp4, _tmp5,
>> _tmp6, _tmp7);\
>> + DUP4_ARG3(__lsx_vdp2add_h_b, _reg0, _tmp4, _filter1, _reg1, _tmp5,
>> \
>> + _filter1, _reg2, _tmp6, _filter1, _reg3, _tmp7,
>> _filter1, _reg0, \
>> + _reg1, _reg2, _reg3);
>> \
>> + DUP4_ARG3(__lsx_vshuf_b, _src0, _src0, _mask3, _src1, _src1,
>> _mask3, _src2,\
>> + _src2, _mask3, _src3, _src3, _mask3, _tmp4, _tmp5,
>> _tmp6, _tmp7);\
>> + DUP4_ARG3(__lsx_vdp2add_h_b, _reg4, _tmp4, _filter3, _reg5, _tmp5,
>> \
>> + _filter3, _reg6, _tmp6, _filter3, _reg7, _tmp7,
>> _filter3, _reg4, \
>> + _reg5, _reg6, _reg7);
>> \
>> + DUP4_ARG2(__lsx_vsadd_h, _reg0, _reg4, _reg1, _reg5, _reg2, _reg6,
>> _reg3, \
>> + _reg7, _out0, _out1, _out2, _out3);
>> \
>> +}
>> +
>> +#define FILT_8TAP_DPADD_S_H(_reg0, _reg1, _reg2, _reg3,
>> \
>> + _filter0, _filter1, _filter2, _filter3)
>> \
>> +( {
>> \
>> + __m128i _vec0, _vec1;
>> \
>> +
>> \
>> + _vec0 = __lsx_vdp2_h_b(_reg0, _filter0);
>> \
>> + _vec0 = __lsx_vdp2add_h_b(_vec0, _reg1, _filter1);
>> \
>> + _vec1 = __lsx_vdp2_h_b(_reg2, _filter2);
>> \
>> + _vec1 = __lsx_vdp2add_h_b(_vec1, _reg3, _filter3);
>> \
>> + _vec0 = __lsx_vsadd_h(_vec0, _vec1);
>> \
>> +
>> \
>> + _vec0;
>> \
>> +} )
>> +
>> +#define HORIZ_8TAP_FILT(_src0, _src1, _mask0, _mask1, _mask2, _mask3,
>> \
>> + _filt_h0, _filt_h1, _filt_h2, _filt_h3)
>> \
>> +( {
>> \
>> + __m128i _tmp0, _tmp1, _tmp2, _tmp3;
>> \
>> + __m128i _out;
>> \
>> +
>> \
>> + DUP4_ARG3(__lsx_vshuf_b, _src1, _src0, _mask0, _src1, _src0,
>> _mask1, _src1,\
>> + _src0, _mask2, _src1, _src0, _mask3, _tmp0, _tmp1,
>> _tmp2, _tmp3);\
>> + _out = FILT_8TAP_DPADD_S_H(_tmp0, _tmp1, _tmp2, _tmp3, _filt_h0,
>> _filt_h1, \
>> + _filt_h2, _filt_h3);
>> \
>> + _out = __lsx_vsrari_h(_out, 7);
>> \
>> + _out = __lsx_vsat_h(_out, 7);
>> \
>> +
>> \
>> + _out;
>> \
>> +} )
>> +
>> +#define LSX_LD_4(_src, _stride, _src0, _src1, _src2, _src3)
>> \
>> +{
>> \
>> + _src0 = __lsx_vld(_src, 0);
>> \
>> + _src += _stride;
>> \
>> + _src1 = __lsx_vld(_src, 0);
>> \
>> + _src += _stride;
>> \
>> + _src2 = __lsx_vld(_src, 0);
>> \
>> + _src += _stride;
>> \
>> + _src3 = __lsx_vld(_src, 0);
>> \
>> +}
>> +
>> +static void common_hz_8t_4x4_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter)
>> +{
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out, out0, out1;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
>> + src -= 3;
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> +
>> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
>> mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0,
>> out1);
>> + out = __lsx_vssrarni_b_h(out1, out0, 7);
>> + out = __lsx_vxori_b(out, 128);
>> + __lsx_vstelm_w(out, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out, dst, 0, 3);
>> +}
>> +
>> +static void common_hz_8t_4x8_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter)
>> +{
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out0, out1, out2, out3;
>> + uint8_t *_src = (uint8_t*)src - 3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
>> mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0,
>> out1);
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
>> mask2,
>> + mask3, filter0, filter1, filter2, filter3, out2,
>> out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0,
>> out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vstelm_w(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 3);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out1, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out1, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out1, dst, 0, 3);
>> +}
>> +
>> +static void common_hz_8t_4w_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + if (height == 4) {
>> + common_hz_8t_4x4_lsx(src, src_stride, dst, dst_stride, filter);
>> + } else if (height == 8) {
>> + common_hz_8t_4x8_lsx(src, src_stride, dst, dst_stride, filter);
>> + }
>> +}
>> +
>> +static void common_hz_8t_8x4_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter)
>> +{
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out0, out1, out2, out3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
>> mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1, out2,
>> out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7, out0,
>> out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vstelm_d(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 1);
>> +}
>> +
>> +static void common_hz_8t_8x8mult_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t
>> height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out0, out1, out2, out3;
>> + uint8_t* _src = (uint8_t*)src - 3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (; loop_cnt--;) {
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src1, src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vstelm_d(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 1);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void common_hz_8t_8w_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + if (height == 4) {
>> + common_hz_8t_8x4_lsx(src, src_stride, dst, dst_stride, filter);
>> + } else {
>> + common_hz_8t_8x8mult_lsx(src, src_stride, dst, dst_stride,
>> + filter, height);
>> + }
>> +}
>> +
>> +static void common_hz_8t_16w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 1;
>> + int32_t stride = src_stride << 1;
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out0, out1, out2, out3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (; loop_cnt--;) {
>> + const uint8_t* _src = src + src_stride;
>> + DUP2_ARG2(__lsx_vld, src, 0, _src, 0, src0, src2);
>> + DUP2_ARG2(__lsx_vld, src, 8, _src, 8, src1, src3);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vst(out0, dst, 0);
>> + dst += dst_stride;
>> + __lsx_vst(out1, dst, 0);
>> + dst += dst_stride;
>> + src += stride;
>> + }
>> +}
>> +
>> +static void common_hz_8t_32w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 1;
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out0, out1, out2, out3;
>> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (; loop_cnt--;) {
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
>> + src3 = __lsx_vld(src, 24);
>> + src1 = __lsx_vshuf_b(src2, src0, shuff);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vst(out0, dst, 0);
>> + __lsx_vst(out1, dst, 16);
>> +
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
>> + src3 = __lsx_vld(src, 24);
>> + src1 = __lsx_vshuf_b(src2, src0, shuff);
>> + src += src_stride;
>> +
>> + dst += dst_stride;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vst(out0, dst, 0);
>> + __lsx_vst(out1, dst, 16);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void common_hz_8t_64w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + int32_t loop_cnt = height;
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out0, out1, out2, out3;
>> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (; loop_cnt--;) {
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
>> + src3 = __lsx_vld(src, 24);
>> + src1 = __lsx_vshuf_b(src2, src0, shuff);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vst(out0, dst, 0);
>> + __lsx_vst(out1, dst, 16);
>> +
>> + DUP2_ARG2(__lsx_vld, src, 32, src, 48, src0, src2);
>> + src3 = __lsx_vld(src, 56);
>> + src1 = __lsx_vshuf_b(src2, src0, shuff);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vst(out0, dst, 32);
>> + __lsx_vst(out1, dst, 48);
>> + src += src_stride;
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void common_vt_8t_4w_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
>> + __m128i reg0, reg1, reg2, reg3, reg4;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i out0, out1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
>> src1, tmp0,
>> + tmp1, tmp2, tmp3);
>> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, tmp4, tmp5);
>> + DUP2_ARG2(__lsx_vilvl_d, tmp3, tmp0, tmp4, tmp1, reg0, reg1);
>> + reg2 = __lsx_vilvl_d(tmp5, tmp2);
>> + DUP2_ARG2(__lsx_vxori_b, reg0, 128, reg1, 128, reg0, reg1);
>> + reg2 = __lsx_vxori_b(reg2, 128);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
>> src10,
>> + src9, tmp0, tmp1, tmp2, tmp3);
>> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, reg3, reg4);
>> + DUP2_ARG2(__lsx_vxori_b, reg3, 128, reg4, 128, reg3, reg4);
>> + out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, reg3, filter0,
>> filter1,
>> + filter2, filter3);
>> + out1 = FILT_8TAP_DPADD_S_H(reg1, reg2, reg3, reg4, filter0,
>> filter1,
>> + filter2, filter3);
>> + out0 = __lsx_vssrarni_b_h(out1, out0, 7);
>> + out0 = __lsx_vxori_b(out0, 128);
>> + __lsx_vstelm_w(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 3);
>> + dst += dst_stride;
>> +
>> + reg0 = reg2;
>> + reg1 = reg3;
>> + reg2 = reg4;
>> + src6 = src10;
>> + }
>> +}
>> +
>> +static void common_vt_8t_8w_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i tmp0, tmp1, tmp2, tmp3;
>> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i out0, out1, out2, out3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
>> src1,
>> + reg0, reg1, reg2, reg3);
>> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10, 128,
>> + src7, src8, src9, src10);
>> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
>> src10,
>> + src9, tmp0, tmp1, tmp2, tmp3);
>> + out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, tmp0, filter0,
>> filter1,
>> + filter2, filter3);
>> + out1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, tmp1, filter0,
>> filter1,
>> + filter2, filter3);
>> + out2 = FILT_8TAP_DPADD_S_H(reg1, reg2, tmp0, tmp2, filter0,
>> filter1,
>> + filter2, filter3);
>> + out3 = FILT_8TAP_DPADD_S_H(reg4, reg5, tmp1, tmp3, filter0,
>> filter1,
>> + filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vstelm_d(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 1);
>> + dst += dst_stride;
>> +
>> + reg0 = reg2;
>> + reg1 = tmp0;
>> + reg2 = tmp2;
>> + reg3 = reg5;
>> + reg4 = tmp1;
>> + reg5 = tmp3;
>> + src6 = src10;
>> + }
>> +}
>> +
>> +static void common_vt_8t_16w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
>> + __m128i reg6, reg7, reg8, reg9, reg10, reg11;
>> + __m128i tmp0, tmp1, tmp2, tmp3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
>> src1,
>> + reg0, reg1, reg2, reg3);
>> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
>> + DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4, src2,
>> src1,
>> + reg6, reg7, reg8, reg9);
>> + DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, reg10, reg11);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10, 128,
>> + src7, src8, src9, src10);
>> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
>> src10, src9,
>> + src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9, src8,
>> src10, src9,
>> + src4, src5, src7, src8);
>> + tmp0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, src0, filter0,
>> filter1,
>> + filter2, filter3);
>> + tmp1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, src1, filter0,
>> filter1,
>> + filter2, filter3);
>> + tmp2 = FILT_8TAP_DPADD_S_H(reg6, reg7, reg8, src4, filter0,
>> filter1,
>> + filter2, filter3);
>> + tmp3 = FILT_8TAP_DPADD_S_H(reg9, reg10, reg11, src5, filter0,
>> filter1,
>> + filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
>> tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + __lsx_vst(tmp0, dst, 0);
>> + dst += dst_stride;
>> + __lsx_vst(tmp1, dst, 0);
>> + dst += dst_stride;
>> + tmp0 = FILT_8TAP_DPADD_S_H(reg1, reg2, src0, src2, filter0,
>> filter1,
>> + filter2, filter3);
>> + tmp1 = FILT_8TAP_DPADD_S_H(reg4, reg5, src1, src3, filter0,
>> filter1,
>> + filter2, filter3);
>> + tmp2 = FILT_8TAP_DPADD_S_H(reg7, reg8, src4, src7, filter0,
>> filter1,
>> + filter2, filter3);
>> + tmp3 = FILT_8TAP_DPADD_S_H(reg10, reg11, src5, src8, filter0,
>> filter1,
>> + filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
>> tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + __lsx_vst(tmp0, dst, 0);
>> + dst += dst_stride;
>> + __lsx_vst(tmp1, dst, 0);
>> + dst += dst_stride;
>> +
>> + reg0 = reg2;
>> + reg1 = src0;
>> + reg2 = src2;
>> + reg3 = reg5;
>> + reg4 = src1;
>> + reg5 = src3;
>> + reg6 = reg8;
>> + reg7 = src4;
>> + reg8 = src7;
>> + reg9 = reg11;
>> + reg10 = src5;
>> + reg11 = src8;
>> + src6 = src10;
>> + }
>> +}
>> +
>> +static void common_vt_8t_16w_mult_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t
>> height,
>> + int32_t width)
>> +{
>> + uint8_t *src_tmp;
>> + uint8_t *dst_tmp;
>> + uint32_t cnt = width >> 4;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
>> + __m128i reg6, reg7, reg8, reg9, reg10, reg11;
>> + __m128i tmp0, tmp1, tmp2, tmp3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + int32_t dst_stride2 = dst_stride << 1;
>> + int32_t dst_stride3 = dst_stride2 + dst_stride;
>> + int32_t dst_stride4 = dst_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> + for (;cnt--;) {
>> + uint32_t loop_cnt = height >> 2;
>> +
>> + src_tmp = _src;
>> + dst_tmp = dst;
>> +
>> + src0 = __lsx_vld(src_tmp, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
>> src_stride2,
>> + src1, src2);
>> + src3 = __lsx_vldx(src_tmp, src_stride3);
>> + src_tmp += src_stride4;
>> + src4 = __lsx_vld(src_tmp, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
>> src_stride2,
>> + src5, src6);
>> + src_tmp += src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4,
>> src2, src1,
>> + reg0, reg1, reg2, reg3);
>> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
>> + DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4,
>> src2, src1,
>> + reg6, reg7, reg8, reg9);
>> + DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, reg10, reg11);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(src_tmp, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
>> src_stride2,
>> + src8, src9);
>> + src10 = __lsx_vldx(src_tmp, src_stride3);
>> + src_tmp += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10,
>> + 128, src7, src8, src9, src10);
>> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9,
>> src8,
>> + src10, src9, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9,
>> src8,
>> + src10, src9, src4, src5, src7, src8);
>> + tmp0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, src0, filter0,
>> + filter1, filter2, filter3);
>> + tmp1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, src1, filter0,
>> + filter1, filter2, filter3);
>> + tmp2 = FILT_8TAP_DPADD_S_H(reg6, reg7, reg8, src4, filter0,
>> + filter1, filter2, filter3);
>> + tmp3 = FILT_8TAP_DPADD_S_H(reg9, reg10, reg11, src5,
>> filter0,
>> + filter1, filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
>> + tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + __lsx_vst(tmp0, dst_tmp, 0);
>> + __lsx_vstx(tmp1, dst_tmp, dst_stride);
>> + tmp0 = FILT_8TAP_DPADD_S_H(reg1, reg2, src0, src2, filter0,
>> + filter1, filter2, filter3);
>> + tmp1 = FILT_8TAP_DPADD_S_H(reg4, reg5, src1, src3, filter0,
>> + filter1, filter2, filter3);
>> + tmp2 = FILT_8TAP_DPADD_S_H(reg7, reg8, src4, src7, filter0,
>> + filter1, filter2, filter3);
>> + tmp3 = FILT_8TAP_DPADD_S_H(reg10, reg11, src5, src8,
>> filter0,
>> + filter1, filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
>> + tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + __lsx_vstx(tmp0, dst_tmp, dst_stride2);
>> + __lsx_vstx(tmp1, dst_tmp, dst_stride3);
>> + dst_tmp += dst_stride4;
>> +
>> + reg0 = reg2;
>> + reg1 = src0;
>> + reg2 = src2;
>> + reg3 = reg5;
>> + reg4 = src1;
>> + reg5 = src3;
>> + reg6 = reg8;
>> + reg7 = src4;
>> + reg8 = src7;
>> + reg9 = reg11;
>> + reg10 = src5;
>> + reg11 = src8;
>> + src6 = src10;
>> + }
>> + _src += 16;
>> + dst += 16;
>> + }
>> +}
>> +
>> +static void common_vt_8t_32w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + common_vt_8t_16w_mult_lsx(src, src_stride, dst, dst_stride,
>> filter, height, 32);
>> +}
>> +
>> +static void common_vt_8t_64w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter, int32_t height)
>> +{
>> + common_vt_8t_16w_mult_lsx(src, src_stride, dst, dst_stride,
>> + filter, height, 64);
>> +}
>> +
>> +static void common_hv_8ht_8vt_4w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter_horiz,
>> + const int8_t *filter_vert,
>> + int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
>> + __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
>> + __m128i out0, out1;
>> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3 - 3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2,
>> filter_horiz, 4,
>> + filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2, filt_hz3);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> +
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> +
>> + tmp0 = HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp2 = HORIZ_8TAP_FILT(src2, src3, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp4 = HORIZ_8TAP_FILT(src4, src5, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp5 = HORIZ_8TAP_FILT(src5, src6, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + DUP2_ARG3(__lsx_vshuf_b, tmp2, tmp0, shuff, tmp4, tmp2, shuff,
>> tmp1, tmp3);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2,
>> filter_vert, 4,
>> + filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
>> + DUP2_ARG2(__lsx_vpackev_b, tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
>> + tmp2 = __lsx_vpackev_b(tmp5, tmp4);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10, 128,
>> + src7, src8, src9, src10);
>> + tmp3 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp4 = __lsx_vshuf_b(tmp3, tmp5, shuff);
>> + tmp4 = __lsx_vpackev_b(tmp3, tmp4);
>> + out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp4, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src1 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3,
>> + filt_hz0, filt_hz1, filt_hz2, filt_hz3);
>> + src0 = __lsx_vshuf_b(src1, tmp3, shuff);
>> + src0 = __lsx_vpackev_b(src1, src0);
>> + out1 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp4, src0, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + out0 = __lsx_vssrarni_b_h(out1, out0, 7);
>> + out0 = __lsx_vxori_b(out0, 128);
>> + __lsx_vstelm_w(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 3);
>> + dst += dst_stride;
>> +
>> + tmp5 = src1;
>> + tmp0 = tmp2;
>> + tmp1 = tmp4;
>> + tmp2 = src0;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_8w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter_horiz,
>> + const int8_t *filter_vert,
>> + int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
>> + __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
>> + __m128i out0, out1;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3 - 3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2,
>> filter_horiz,
>> + 4, filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2,
>> filt_hz3);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> +
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> +
>> + src0 = HORIZ_8TAP_FILT(src0, src0, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src1 = HORIZ_8TAP_FILT(src1, src1, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src2 = HORIZ_8TAP_FILT(src2, src2, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src3 = HORIZ_8TAP_FILT(src3, src3, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src4 = HORIZ_8TAP_FILT(src4, src4, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src5 = HORIZ_8TAP_FILT(src5, src5, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src6 = HORIZ_8TAP_FILT(src6, src6, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2,
>> filter_vert, 4,
>> + filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
>> + DUP4_ARG2(__lsx_vpackev_b, src1, src0, src3, src2, src5, src4,
>> + src2, src1, tmp0, tmp1, tmp2, tmp4);
>> + DUP2_ARG2(__lsx_vpackev_b, src4, src3, src6, src5, tmp5, tmp6);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> +
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10, 128,
>> + src7, src8, src9, src10);
>> + src7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp3 = __lsx_vpackev_b(src7, src6);
>> + out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp3, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src0 = __lsx_vpackev_b(src8, src7);
>> + out1 = FILT_8TAP_DPADD_S_H(tmp4, tmp5, tmp6, src0, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src1 = __lsx_vpackev_b(src9, src8);
>> + src3 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp3, src1, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src10 = HORIZ_8TAP_FILT(src10, src10, mask0, mask1, mask2,
>> mask3,
>> + filt_hz0, filt_hz1, filt_hz2,
>> filt_hz3);
>> + src2 = __lsx_vpackev_b(src10, src9);
>> + src4 = FILT_8TAP_DPADD_S_H(tmp5, tmp6, src0, src2, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, src4, src3, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + __lsx_vstelm_d(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 1);
>> + dst += dst_stride;
>> +
>> + src6 = src10;
>> + tmp0 = tmp2;
>> + tmp1 = tmp3;
>> + tmp2 = src1;
>> + tmp4 = tmp6;
>> + tmp5 = src0;
>> + tmp6 = src2;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_16w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter_horiz,
>> + const int8_t *filter_vert,
>> + int32_t height)
>> +{
>> + int32_t multiple8_cnt;
>> +
>> + for (multiple8_cnt = 2; multiple8_cnt--;) {
>> + common_hv_8ht_8vt_8w_lsx(src, src_stride, dst, dst_stride,
>> filter_horiz,
>> + filter_vert, height);
>> + src += 8;
>> + dst += 8;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_32w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter_horiz,
>> + const int8_t *filter_vert,
>> + int32_t height)
>> +{
>> + int32_t multiple8_cnt;
>> +
>> + for (multiple8_cnt = 4; multiple8_cnt--;) {
>> + common_hv_8ht_8vt_8w_lsx(src, src_stride, dst, dst_stride,
>> filter_horiz,
>> + filter_vert, height);
>> + src += 8;
>> + dst += 8;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_64w_lsx(const uint8_t *src, int32_t
>> src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + const int8_t *filter_horiz,
>> + const int8_t *filter_vert,
>> + int32_t height)
>> +{
>> + int32_t multiple8_cnt;
>> +
>> + for (multiple8_cnt = 8; multiple8_cnt--;) {
>> + common_hv_8ht_8vt_8w_lsx(src, src_stride, dst, dst_stride,
>> filter_horiz,
>> + filter_vert, height);
>> + src += 8;
>> + dst += 8;
>> + }
>> +}
>> +
>> +static void copy_width8_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + __m128i src0, src1, src2, src3;
>> +
>> + for (;cnt--;) {
>> + src0 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + src1 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + src2 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + src3 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + __lsx_vstelm_d(src0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src2, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(src3, dst, 0, 0);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void copy_width16_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + __m128i src0, src1, src2, src3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + int32_t dst_stride2 = dst_stride << 1;
>> + int32_t dst_stride3 = dst_stride2 + dst_stride;
>> + int32_t dst_stride4 = dst_stride2 << 1;
>> + uint8_t *_src = (uint8_t*)src;
>> +
>> + for (;cnt--;) {
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src1, src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + __lsx_vst(src0, dst, 0);
>> + __lsx_vstx(src1, dst, dst_stride);
>> + __lsx_vstx(src2, dst, dst_stride2);
>> + __lsx_vstx(src3, dst, dst_stride3);
>> + dst += dst_stride4;
>> + }
>> +}
>> +
>> +static void copy_width32_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + uint8_t *src_tmp1 = (uint8_t*)src;
>> + uint8_t *dst_tmp1 = dst;
>> + uint8_t *src_tmp2 = src_tmp1 + 16;
>> + uint8_t *dst_tmp2 = dst_tmp1 + 16;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + int32_t dst_stride2 = dst_stride << 1;
>> + int32_t dst_stride3 = dst_stride2 + dst_stride;
>> + int32_t dst_stride4 = dst_stride2 << 1;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> +
>> + for (;cnt--;) {
>> + src0 = __lsx_vld(src_tmp1, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp1, src_stride, src_tmp1,
>> src_stride2,
>> + src1, src2);
>> + src3 = __lsx_vldx(src_tmp1, src_stride3);
>> + src_tmp1 += src_stride4;
>> +
>> + src4 = __lsx_vld(src_tmp2, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp2, src_stride, src_tmp2,
>> src_stride2,
>> + src5, src6);
>> + src7 = __lsx_vldx(src_tmp2, src_stride3);
>> + src_tmp2 += src_stride4;
>> +
>> + __lsx_vst(src0, dst_tmp1, 0);
>> + __lsx_vstx(src1, dst_tmp1, dst_stride);
>> + __lsx_vstx(src2, dst_tmp1, dst_stride2);
>> + __lsx_vstx(src3, dst_tmp1, dst_stride3);
>> + dst_tmp1 += dst_stride4;
>> + __lsx_vst(src4, dst_tmp2, 0);
>> + __lsx_vstx(src5, dst_tmp2, dst_stride);
>> + __lsx_vstx(src6, dst_tmp2, dst_stride2);
>> + __lsx_vstx(src7, dst_tmp2, dst_stride3);
>> + dst_tmp2 += dst_stride4;
>> + }
>> +}
>> +
>> +static void copy_width64_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i src8, src9, src10, src11, src12, src13, src14, src15;
>> +
>> + for (;cnt--;) {
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src0, src1, src2, src3);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src4, src5, src6, src7);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src8, src9, src10, src11);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src12, src13, src14, src15);
>> + src += src_stride;
>> + __lsx_vst(src0, dst, 0);
>> + __lsx_vst(src1, dst, 16);
>> + __lsx_vst(src2, dst, 32);
>> + __lsx_vst(src3, dst, 48);
>> + dst += dst_stride;
>> + __lsx_vst(src4, dst, 0);
>> + __lsx_vst(src5, dst, 16);
>> + __lsx_vst(src6, dst, 32);
>> + __lsx_vst(src7, dst, 48);
>> + dst += dst_stride;
>> + __lsx_vst(src8, dst, 0);
>> + __lsx_vst(src9, dst, 16);
>> + __lsx_vst(src10, dst, 32);
>> + __lsx_vst(src11, dst, 48);
>> + dst += dst_stride;
>> + __lsx_vst(src12, dst, 0);
>> + __lsx_vst(src13, dst, 16);
>> + __lsx_vst(src14, dst, 32);
>> + __lsx_vst(src15, dst, 48);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void common_hz_8t_and_aver_dst_4x4_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter)
>> +{
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i tmp0, tmp1;
>> + __m128i dst0, dst1, dst2, dst3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
>> mask2, mask3,
>> + filter0, filter1, filter2, filter3,
>> tmp0, tmp1);
>> + dst0 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + dst1 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + dst2 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + dst3 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst0 = __lsx_vilvl_w(dst1, dst0);
>> + dst1 = __lsx_vilvl_w(dst3, dst2);
>> + dst0 = __lsx_vilvl_d(dst1, dst0);
>> + tmp0 = __lsx_vssrarni_b_h(tmp1, tmp0, 7);
>> + tmp0 = __lsx_vxori_b(tmp0, 128);
>> + dst0 = __lsx_vavgr_bu(tmp0, dst0);
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 3);
>> +}
>> +
>> +static void common_hz_8t_and_aver_dst_4x8_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter)
>> +{
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3, tmp0, tmp1, tmp2, tmp3;
>> + __m128i dst0, dst1;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + tmp0 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp1 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp2 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp3 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp0 = __lsx_vilvl_w(tmp1, tmp0);
>> + tmp1 = __lsx_vilvl_w(tmp3, tmp2);
>> + dst0 = __lsx_vilvl_d(tmp1, tmp0);
>> +
>> + tmp0 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp1 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp2 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp3 = __lsx_vldrepl_w(dst_tmp, 0);
>> + tmp0 = __lsx_vilvl_w(tmp1, tmp0);
>> + tmp1 = __lsx_vilvl_w(tmp3, tmp2);
>> + dst1 = __lsx_vilvl_d(tmp1, tmp0);
>> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
>> mask2, mask3,
>> + filter0, filter1, filter2, filter3,
>> tmp0, tmp1);
>> + LSX_LD_4(src, src_stride, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1,
>> mask2, mask3,
>> + filter0, filter1, filter2, filter3,
>> tmp2, tmp3);
>> + DUP4_ARG3(__lsx_vssrarni_b_h, tmp0, tmp0, 7, tmp1, tmp1, 7, tmp2,
>> tmp2, 7,
>> + tmp3, tmp3, 7, tmp0, tmp1, tmp2, tmp3);
>> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vavgr_bu, tmp0, dst0, tmp1, dst1, dst0, dst1);
>> + __lsx_vstelm_w(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst0, dst, 0, 3);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst1, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst1, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(dst1, dst, 0, 3);
>> +}
>> +
>> +static void common_hz_8t_and_aver_dst_4w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + if (height == 4) {
>> + common_hz_8t_and_aver_dst_4x4_lsx(src, src_stride, dst,
>> dst_stride, filter);
>> + } else if (height == 8) {
>> + common_hz_8t_and_aver_dst_4x8_lsx(src, src_stride, dst,
>> dst_stride, filter);
>> + }
>> +}
>> +
>> +static void common_hz_8t_and_aver_dst_8w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + int32_t loop_cnt = height >> 2;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i tmp0, tmp1, tmp2, tmp3;
>> + __m128i dst0, dst1, dst2, dst3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride2 + src_stride;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t *_src = (uint8_t*)src - 3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (;loop_cnt--;) {
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src1, src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3,filter0, filter1, filter2, filter3, tmp0, tmp1,
>> tmp2, tmp3);
>> + dst0 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + dst1 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + dst2 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + dst3 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + DUP2_ARG2(__lsx_vilvl_d, dst1, dst0, dst3, dst2, dst0, dst1);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7,
>> tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vavgr_bu, tmp0, dst0, tmp1, dst1, dst0, dst1);
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst1, dst, 0, 1);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void common_hz_8t_and_aver_dst_16w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + int32_t loop_cnt = height >> 1;
>> + int32_t dst_stride2 = dst_stride << 1;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3, dst0, dst1, dst2, dst3;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
>> + __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (;loop_cnt--;) {
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 8, src0, src1);
>> + src += src_stride;
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 8, src2, src3);
>> + src += src_stride;
>> + dst0 = __lsx_vld(dst_tmp, 0);
>> + dst1 = __lsx_vldx(dst_tmp, dst_stride);
>> + dst_tmp += dst_stride2;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0,
>> src2, src2,
>> + mask0, src3, src3, mask0, tmp0, tmp1, tmp2, tmp3);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1,
>> src2, src2,
>> + mask1, src3, src3, mask1, tmp4, tmp5, tmp6, tmp7);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2,
>> src2, src2,
>> + mask2, src3, src3, mask2, tmp8, tmp9, tmp10, tmp11);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3,
>> src2, src2,
>> + mask3, src3, src3, mask3, tmp12, tmp13, tmp14,
>> tmp15);
>> + DUP4_ARG2(__lsx_vdp2_h_b, tmp0, filter0, tmp1, filter0, tmp2,
>> filter0, tmp3,
>> + filter0, tmp0, tmp1, tmp2, tmp3);
>> + DUP4_ARG2(__lsx_vdp2_h_b, tmp8, filter2, tmp9, filter2, tmp10,
>> filter2, tmp11,
>> + filter2, tmp8, tmp9, tmp10, tmp11);
>> + DUP4_ARG3(__lsx_vdp2add_h_b, tmp0, tmp4, filter1, tmp1, tmp5,
>> filter1, tmp2,
>> + tmp6, filter1, tmp3, tmp7, filter1, tmp0, tmp1,
>> tmp2, tmp3);
>> + DUP4_ARG3(__lsx_vdp2add_h_b, tmp8, tmp12, filter3, tmp9,
>> tmp13, filter3, tmp10,
>> + tmp14, filter3, tmp11, tmp15, filter3, tmp4, tmp5,
>> tmp6, tmp7);
>> + DUP4_ARG2(__lsx_vsadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6,
>> tmp3, tmp7,
>> + tmp0, tmp1, tmp2, tmp3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7,
>> dst2, dst3);
>> + DUP2_ARG2(__lsx_vxori_b, dst2, 128, dst3, 128, dst2, dst3);
>> + DUP2_ARG2(__lsx_vavgr_bu, dst0, dst2, dst1, dst3, dst0, dst1);
>> + __lsx_vst(dst0, dst, 0);
>> + __lsx_vstx(dst1, dst, dst_stride);
>> + dst += dst_stride2;
>> + }
>> +}
>> +
>> +static void common_hz_8t_and_aver_dst_32w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + uint32_t loop_cnt = height;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3, dst0, dst1;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
>> + __m128i tmp8, tmp9, tmp10, tmp11, tmp12, tmp13, tmp14, tmp15;
>> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (;loop_cnt--;) {
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
>> + src3 = __lsx_vld(src, 24);
>> + src1 = __lsx_vshuf_b(src2, src0, shuff);
>> + src += src_stride;
>> + DUP2_ARG2(__lsx_vld, dst_tmp, 0, dst, 16, dst0, dst1);
>> + dst_tmp += dst_stride;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask0, src1, src1, mask0,
>> src2,
>> + src2, mask0, src3, src3, mask0, tmp0, tmp1, tmp2,
>> tmp3);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask1, src1, src1, mask1,
>> src2,
>> + src2, mask1, src3, src3, mask1, tmp4, tmp5, tmp6,
>> tmp7);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask2, src1, src1, mask2,
>> src2,
>> + src2, mask2, src3, src3, mask2, tmp8, tmp9, tmp10,
>> tmp11);
>> + DUP4_ARG3(__lsx_vshuf_b, src0, src0, mask3, src1, src1, mask3,
>> src2,
>> + src2, mask3, src3, src3, mask3, tmp12, tmp13, tmp14,
>> tmp15);
>> + DUP4_ARG2(__lsx_vdp2_h_b, tmp0, filter0, tmp1, filter0, tmp2,
>> filter0,
>> + tmp3, filter0, tmp0, tmp1, tmp2, tmp3);
>> + DUP4_ARG2(__lsx_vdp2_h_b, tmp8, filter2, tmp9, filter2, tmp10,
>> filter2,
>> + tmp11, filter2, tmp8, tmp9, tmp10, tmp11);
>> + DUP4_ARG3(__lsx_vdp2add_h_b, tmp0, tmp4, filter1, tmp1, tmp5,
>> filter1,
>> + tmp2, tmp6, filter1, tmp3, tmp7, filter1, tmp0, tmp1,
>> tmp2, tmp3);
>> + DUP4_ARG3(__lsx_vdp2add_h_b, tmp8, tmp12, filter3, tmp9,
>> tmp13, filter3,
>> + tmp10, tmp14, filter3, tmp11, tmp15, filter3, tmp4, tmp5,
>> tmp6, tmp7);
>> + DUP4_ARG2(__lsx_vsadd_h, tmp0, tmp4, tmp1, tmp5, tmp2, tmp6,
>> tmp3, tmp7,
>> + tmp0, tmp1, tmp2, tmp3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp1, tmp0, 7, tmp3, tmp2, 7,
>> tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vavgr_bu, dst0, tmp0, dst1, tmp1, dst0, dst1);
>> + __lsx_vst(dst0, dst, 0);
>> + __lsx_vst(dst1, dst, 16);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void common_hz_8t_and_aver_dst_64w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + int32_t loop_cnt = height;
>> + __m128i src0, src1, src2, src3;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i out0, out1, out2, out3, dst0, dst1;
>> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + src -= 3;
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + for (;loop_cnt--;) {
>> + DUP2_ARG2(__lsx_vld, src, 0, src, 16, src0, src2);
>> + src3 = __lsx_vld(src, 24);
>> + src1 = __lsx_vshuf_b(src2, src0, shuff);
>> + DUP2_ARG2(__lsx_vld, dst, 0, dst, 16, dst0, dst1);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + DUP2_ARG2(__lsx_vavgr_bu, out0, dst0, out1, dst1, out0, out1);
>> + __lsx_vst(out0, dst, 0);
>> + __lsx_vst(out1, dst, 16);
>> +
>> + DUP2_ARG2(__lsx_vld, src, 32, src, 48, src0, src2);
>> + src3 = __lsx_vld(src, 56);
>> + src1 = __lsx_vshuf_b(src2, src0, shuff);
>> + DUP2_ARG2(__lsx_vld, dst, 32, dst, 48, dst0, dst1);
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, mask0,
>> mask1, mask2,
>> + mask3, filter0, filter1, filter2, filter3, out0, out1,
>> out2, out3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + DUP2_ARG2(__lsx_vavgr_bu, out0, dst0, out1, dst1, out0, out1);
>> + __lsx_vst(out0, dst, 32);
>> + __lsx_vst(out1, dst, 48);
>> + src += src_stride;
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void common_vt_8t_and_aver_dst_4w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
>> + __m128i reg0, reg1, reg2, reg3, reg4;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i out0, out1;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
>> src1,
>> + tmp0, tmp1, tmp2, tmp3);
>> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, tmp4, tmp5);
>> + DUP2_ARG2(__lsx_vilvl_d, tmp3, tmp0, tmp4, tmp1, reg0, reg1);
>> + reg2 = __lsx_vilvl_d(tmp5, tmp2);
>> + DUP2_ARG2(__lsx_vxori_b, reg0, 128, reg1, 128, reg0, reg1);
>> + reg2 = __lsx_vxori_b(reg2, 128);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src0 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src1 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src2 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src3 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + DUP2_ARG2(__lsx_vilvl_w, src1, src0, src3, src2, src0, src1);
>> + src0 = __lsx_vilvl_d(src1, src0);
>> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
>> src10,
>> + src9, tmp0, tmp1, tmp2, tmp3);
>> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, reg3, reg4);
>> + DUP2_ARG2(__lsx_vxori_b, reg3, 128, reg4, 128, reg3, reg4);
>> + out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, reg3, filter0,
>> + filter1, filter2, filter3);
>> + out1 = FILT_8TAP_DPADD_S_H(reg1, reg2, reg3, reg4, filter0,
>> + filter1, filter2, filter3);
>> + out0 = __lsx_vssrarni_b_h(out1, out0, 7);
>> + out0 = __lsx_vxori_b(out0, 128);
>> + out0 = __lsx_vavgr_bu(out0, src0);
>> + __lsx_vstelm_w(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 3);
>> + dst += dst_stride;
>> + reg0 = reg2;
>> + reg1 = reg3;
>> + reg2 = reg4;
>> + src6 = src10;
>> + }
>> +}
>> +
>> +static void common_vt_8t_and_aver_dst_8w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i tmp0, tmp1, tmp2, tmp3;
>> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i out0, out1, out2, out3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> +
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4, src2,
>> + src1, reg0, reg1, reg2, reg3);
>> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src0 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src1 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src2 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src3 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + DUP2_ARG2(__lsx_vilvl_d, src1, src0, src3, src2, src0, src1);
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10, 128,
>> + src7, src8, src9, src10);
>> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9, src8,
>> src10,
>> + src9, tmp0, tmp1, tmp2, tmp3);
>> + out0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, tmp0, filter0,
>> + filter1, filter2, filter3);
>> + out1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, tmp1, filter0,
>> + filter1, filter2, filter3);
>> + out2 = FILT_8TAP_DPADD_S_H(reg1, reg2, tmp0, tmp2, filter0,
>> + filter1, filter2, filter3);
>> + out3 = FILT_8TAP_DPADD_S_H(reg4, reg5, tmp1, tmp3, filter0,
>> + filter1, filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, out3, out2, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + DUP2_ARG2(__lsx_vavgr_bu, out0, src0, out1, src1, out0, out1);
>> + __lsx_vstelm_d(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 1);
>> + dst += dst_stride;
>> +
>> + reg0 = reg2;
>> + reg1 = tmp0;
>> + reg2 = tmp2;
>> + reg3 = reg5;
>> + reg4 = tmp1;
>> + reg5 = tmp3;
>> + src6 = src10;
>> + }
>> +}
>> +
>> +static void common_vt_8t_and_aver_dst_16w_mult_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst,
>> + int32_t dst_stride,
>> + const int8_t
>> *filter,
>> + int32_t height,
>> + int32_t width)
>> +{
>> + uint8_t *src_tmp;
>> + uint32_t cnt = width >> 4;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i filter0, filter1, filter2, filter3;
>> + __m128i reg0, reg1, reg2, reg3, reg4, reg5;
>> + __m128i reg6, reg7, reg8, reg9, reg10, reg11;
>> + __m128i tmp0, tmp1, tmp2, tmp3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + int32_t dst_stride2 = dst_stride << 1;
>> + int32_t dst_stride3 = dst_stride2 + dst_stride;
>> + int32_t dst_stride4 = dst_stride2 << 1;
>> + uint8_t *_src = (uint8_t*)src - src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter, 0, filter, 2, filter, 4,
>> filter, 6,
>> + filter0, filter1, filter2, filter3);
>> + for (;cnt--;) {
>> + uint32_t loop_cnt = height >> 2;
>> + uint8_t *dst_reg = dst;
>> +
>> + src_tmp = _src;
>> + src0 = __lsx_vld(src_tmp, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
>> src_stride2,
>> + src1, src2);
>> + src3 = __lsx_vldx(src_tmp, src_stride3);
>> + src_tmp += src_stride4;
>> + src4 = __lsx_vld(src_tmp, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
>> src_stride2,
>> + src5, src6);
>> + src_tmp += src_stride3;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128,
>> src3, 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> + DUP4_ARG2(__lsx_vilvl_b, src1, src0, src3, src2, src5, src4,
>> src2, src1,
>> + reg0, reg1, reg2, reg3);
>> + DUP2_ARG2(__lsx_vilvl_b, src4, src3, src6, src5, reg4, reg5);
>> + DUP4_ARG2(__lsx_vilvh_b, src1, src0, src3, src2, src5, src4,
>> src2, src1,
>> + reg6, reg7, reg8, reg9);
>> + DUP2_ARG2(__lsx_vilvh_b, src4, src3, src6, src5, reg10, reg11);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(src_tmp, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp, src_stride, src_tmp,
>> src_stride2,
>> + src8, src9);
>> + src10 = __lsx_vldx(src_tmp, src_stride3);
>> + src_tmp += src_stride4;
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10,
>> + 128, src7, src8, src9, src10);
>> + DUP4_ARG2(__lsx_vilvl_b, src7, src6, src8, src7, src9,
>> src8,
>> + src10, src9, src0, src1, src2, src3);
>> + DUP4_ARG2(__lsx_vilvh_b, src7, src6, src8, src7, src9,
>> src8,
>> + src10, src9, src4, src5, src7, src8);
>> + tmp0 = FILT_8TAP_DPADD_S_H(reg0, reg1, reg2, src0, filter0,
>> + filter1, filter2, filter3);
>> + tmp1 = FILT_8TAP_DPADD_S_H(reg3, reg4, reg5, src1, filter0,
>> + filter1, filter2, filter3);
>> + tmp2 = FILT_8TAP_DPADD_S_H(reg6, reg7, reg8, src4, filter0,
>> + filter1, filter2, filter3);
>> + tmp3 = FILT_8TAP_DPADD_S_H(reg9, reg10, reg11, src5,
>> filter0,
>> + filter1, filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
>> + tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + tmp2 = __lsx_vld(dst_reg, 0);
>> + tmp3 = __lsx_vldx(dst_reg, dst_stride);
>> + DUP2_ARG2(__lsx_vavgr_bu, tmp0, tmp2, tmp1, tmp3, tmp0,
>> tmp1);
>> + __lsx_vst(tmp0, dst_reg, 0);
>> + __lsx_vstx(tmp1, dst_reg, dst_stride);
>> + tmp0 = FILT_8TAP_DPADD_S_H(reg1, reg2, src0, src2, filter0,
>> + filter1, filter2, filter3);
>> + tmp1 = FILT_8TAP_DPADD_S_H(reg4, reg5, src1, src3, filter0,
>> + filter1, filter2, filter3);
>> + tmp2 = FILT_8TAP_DPADD_S_H(reg7, reg8, src4, src7, filter0,
>> + filter1, filter2, filter3);
>> + tmp3 = FILT_8TAP_DPADD_S_H(reg10, reg11, src5, src8,
>> filter0,
>> + filter1, filter2, filter3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, tmp2, tmp0, 7, tmp3, tmp1, 7,
>> + tmp0, tmp1);
>> + DUP2_ARG2(__lsx_vxori_b, tmp0, 128, tmp1, 128, tmp0, tmp1);
>> + tmp2 = __lsx_vldx(dst_reg, dst_stride2);
>> + tmp3 = __lsx_vldx(dst_reg, dst_stride3);
>> + DUP2_ARG2(__lsx_vavgr_bu, tmp0, tmp2, tmp1, tmp3, tmp0,
>> tmp1);
>> + __lsx_vstx(tmp0, dst_reg, dst_stride2);
>> + __lsx_vstx(tmp1, dst_reg, dst_stride3);
>> + dst_reg += dst_stride4;
>> +
>> + reg0 = reg2;
>> + reg1 = src0;
>> + reg2 = src2;
>> + reg3 = reg5;
>> + reg4 = src1;
>> + reg5 = src3;
>> + reg6 = reg8;
>> + reg7 = src4;
>> + reg8 = src7;
>> + reg9 = reg11;
>> + reg10 = src5;
>> + reg11 = src8;
>> + src6 = src10;
>> + }
>> + _src += 16;
>> + dst += 16;
>> + }
>> +}
>> +
>> +static void common_vt_8t_and_aver_dst_16w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst,
>> dst_stride,
>> + filter, height, 16);
>> +}
>> +
>> +static void common_vt_8t_and_aver_dst_32w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst,
>> dst_stride,
>> + filter, height, 32);
>> +}
>> +
>> +static void common_vt_8t_and_aver_dst_64w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst, int32_t
>> dst_stride,
>> + const int8_t *filter,
>> + int32_t height)
>> +{
>> + common_vt_8t_and_aver_dst_16w_mult_lsx(src, src_stride, dst,
>> dst_stride,
>> + filter, height, 64);
>> +}
>> +
>> +static void common_hv_8ht_8vt_and_aver_dst_4w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst,
>> + int32_t dst_stride,
>> + const int8_t
>> *filter_horiz,
>> + const int8_t
>> *filter_vert,
>> + int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
>> + __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
>> + __m128i out0, out1;
>> + __m128i shuff = {0x0F0E0D0C0B0A0908, 0x1716151413121110};
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - 3 - src_stride3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 16);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2,
>> filter_horiz,
>> + 4, filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2,
>> filt_hz3);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> +
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> +
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> +
>> + tmp0 = HORIZ_8TAP_FILT(src0, src1, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp2 = HORIZ_8TAP_FILT(src2, src3, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp4 = HORIZ_8TAP_FILT(src4, src5, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp5 = HORIZ_8TAP_FILT(src5, src6, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + DUP2_ARG3(__lsx_vshuf_b, tmp2, tmp0, shuff, tmp4, tmp2, shuff,
>> tmp1, tmp3);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2,
>> filter_vert, 4,
>> + filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
>> + DUP2_ARG2(__lsx_vpackev_b, tmp1, tmp0, tmp3, tmp2, tmp0, tmp1);
>> + tmp2 = __lsx_vpackev_b(tmp5, tmp4);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src2 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src3 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src4 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src5 = __lsx_vldrepl_w(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + DUP2_ARG2(__lsx_vilvl_w, src3, src2, src5, src4, src2, src3);
>> + src2 = __lsx_vilvl_d(src3, src2);
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10, 128,
>> + src7, src8, src9, src10);
>> + tmp3 = HORIZ_8TAP_FILT(src7, src8, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp4 = __lsx_vshuf_b(tmp3, tmp5, shuff);
>> + tmp4 = __lsx_vpackev_b(tmp3, tmp4);
>> + out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp4, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src1 = HORIZ_8TAP_FILT(src9, src10, mask0, mask1, mask2, mask3,
>> + filt_hz0, filt_hz1, filt_hz2, filt_hz3);
>> + src0 = __lsx_vshuf_b(src1, tmp3, shuff);
>> + src0 = __lsx_vpackev_b(src1, src0);
>> + out1 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp4, src0, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + out0 = __lsx_vssrarni_b_h(out1, out0, 7);
>> + out0 = __lsx_vxori_b(out0, 128);
>> + out0 = __lsx_vavgr_bu(out0, src2);
>> + __lsx_vstelm_w(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 2);
>> + dst += dst_stride;
>> + __lsx_vstelm_w(out0, dst, 0, 3);
>> + dst += dst_stride;
>> +
>> + tmp5 = src1;
>> + tmp0 = tmp2;
>> + tmp1 = tmp4;
>> + tmp2 = src0;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_and_aver_dst_8w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst,
>> + int32_t dst_stride,
>> + const int8_t
>> *filter_horiz,
>> + const int8_t
>> *filter_vert,
>> + int32_t height)
>> +{
>> + uint32_t loop_cnt = height >> 2;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7, src8,
>> src9, src10;
>> + __m128i filt_hz0, filt_hz1, filt_hz2, filt_hz3;
>> + __m128i filt_vt0, filt_vt1, filt_vt2, filt_vt3;
>> + __m128i mask0, mask1, mask2, mask3;
>> + __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
>> + __m128i out0, out1;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src - 3 - src_stride3;
>> +
>> + mask0 = __lsx_vld(mc_filt_mask_arr, 0);
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_horiz, 0, filter_horiz, 2,
>> filter_horiz,
>> + 4, filter_horiz, 6, filt_hz0, filt_hz1, filt_hz2,
>> filt_hz3);
>> + DUP2_ARG2(__lsx_vaddi_bu, mask0, 2, mask0, 4, mask1, mask2);
>> + mask3 = __lsx_vaddi_bu(mask0, 6);
>> +
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src1,
>> src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> + src4 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2, src5,
>> src6);
>> + _src += src_stride3;
>> + DUP4_ARG2(__lsx_vxori_b, src0, 128, src1, 128, src2, 128, src3,
>> 128,
>> + src0, src1, src2, src3);
>> + DUP2_ARG2(__lsx_vxori_b, src4, 128, src5, 128, src4, src5);
>> + src6 = __lsx_vxori_b(src6, 128);
>> +
>> + src0 = HORIZ_8TAP_FILT(src0, src0, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src1 = HORIZ_8TAP_FILT(src1, src1, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src2 = HORIZ_8TAP_FILT(src2, src2, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src3 = HORIZ_8TAP_FILT(src3, src3, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src4 = HORIZ_8TAP_FILT(src4, src4, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src5 = HORIZ_8TAP_FILT(src5, src5, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src6 = HORIZ_8TAP_FILT(src6, src6, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> +
>> + DUP4_ARG2(__lsx_vldrepl_h, filter_vert, 0, filter_vert, 2,
>> filter_vert, 4,
>> + filter_vert, 6, filt_vt0, filt_vt1, filt_vt2, filt_vt3);
>> + DUP4_ARG2(__lsx_vpackev_b, src1, src0, src3, src2, src5, src4,
>> + src2, src1, tmp0, tmp1, tmp2, tmp4);
>> + DUP2_ARG2(__lsx_vpackev_b, src4, src3, src6, src5, tmp5, tmp6);
>> +
>> + for (;loop_cnt--;) {
>> + src7 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src8, src9);
>> + src10 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> +
>> + DUP4_ARG2(__lsx_vxori_b, src7, 128, src8, 128, src9, 128,
>> src10, 128,
>> + src7, src8, src9, src10);
>> + src7 = HORIZ_8TAP_FILT(src7, src7, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + tmp3 = __lsx_vpackev_b(src7, src6);
>> + out0 = FILT_8TAP_DPADD_S_H(tmp0, tmp1, tmp2, tmp3, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src8 = HORIZ_8TAP_FILT(src8, src8, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src0 = __lsx_vpackev_b(src8, src7);
>> + out1 = FILT_8TAP_DPADD_S_H(tmp4, tmp5, tmp6, src0, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src9 = HORIZ_8TAP_FILT(src9, src9, mask0, mask1, mask2, mask3,
>> filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src1 = __lsx_vpackev_b(src9, src8);
>> + src3 = FILT_8TAP_DPADD_S_H(tmp1, tmp2, tmp3, src1, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + src10 = HORIZ_8TAP_FILT(src10, src10, mask0, mask1, mask2,
>> mask3, filt_hz0,
>> + filt_hz1, filt_hz2, filt_hz3);
>> + src2 = __lsx_vpackev_b(src10, src9);
>> + src4 = FILT_8TAP_DPADD_S_H(tmp5, tmp6, src0, src2, filt_vt0,
>> filt_vt1,
>> + filt_vt2, filt_vt3);
>> + DUP2_ARG3(__lsx_vssrarni_b_h, out1, out0, 7, src4, src3, 7,
>> out0, out1);
>> + DUP2_ARG2(__lsx_vxori_b, out0, 128, out1, 128, out0, out1);
>> + src5 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src7 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src8 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + src9 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + DUP2_ARG2(__lsx_vilvl_d, src7, src5, src9, src8, src5, src7);
>> + DUP2_ARG2(__lsx_vavgr_bu, out0, src5, out1, src7, out0, out1);
>> + __lsx_vstelm_d(out0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(out1, dst, 0, 1);
>> + dst += dst_stride;
>> +
>> + src6 = src10;
>> + tmp0 = tmp2;
>> + tmp1 = tmp3;
>> + tmp2 = src1;
>> + tmp4 = tmp6;
>> + tmp5 = src0;
>> + tmp6 = src2;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_and_aver_dst_16w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst,
>> + int32_t dst_stride,
>> + const int8_t
>> *filter_horiz,
>> + const int8_t
>> *filter_vert,
>> + int32_t height)
>> +{
>> + int32_t multiple8_cnt;
>> +
>> + for (multiple8_cnt = 2; multiple8_cnt--;) {
>> + common_hv_8ht_8vt_and_aver_dst_8w_lsx(src, src_stride, dst,
>> dst_stride,
>> + filter_horiz,
>> filter_vert,
>> + height);
>> +
>> + src += 8;
>> + dst += 8;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_and_aver_dst_32w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst,
>> + int32_t dst_stride,
>> + const int8_t
>> *filter_horiz,
>> + const int8_t
>> *filter_vert,
>> + int32_t height)
>> +{
>> + int32_t multiple8_cnt;
>> +
>> + for (multiple8_cnt = 4; multiple8_cnt--;) {
>> + common_hv_8ht_8vt_and_aver_dst_8w_lsx(src, src_stride, dst,
>> dst_stride,
>> + filter_horiz,
>> filter_vert,
>> + height);
>> +
>> + src += 8;
>> + dst += 8;
>> + }
>> +}
>> +
>> +static void common_hv_8ht_8vt_and_aver_dst_64w_lsx(const uint8_t *src,
>> + int32_t src_stride,
>> + uint8_t *dst,
>> + int32_t dst_stride,
>> + const int8_t
>> *filter_horiz,
>> + const int8_t
>> *filter_vert,
>> + int32_t height)
>> +{
>> + int32_t multiple8_cnt;
>> +
>> + for (multiple8_cnt = 8; multiple8_cnt--;) {
>> + common_hv_8ht_8vt_and_aver_dst_8w_lsx(src, src_stride, dst,
>> dst_stride,
>> + filter_horiz,
>> filter_vert,
>> + height);
>> +
>> + src += 8;
>> + dst += 8;
>> + }
>> +}
>> +
>> +static void avg_width8_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, dst0, dst1;
>> + __m128i tmp0, tmp1, tmp2, tmp3;
>> +
>> + for (;cnt--;) {
>> + tmp0 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + tmp1 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + tmp2 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + tmp3 = __lsx_vldrepl_d(src, 0);
>> + src += src_stride;
>> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, src0, src1);
>> + tmp0 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp1 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp2 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + tmp3 = __lsx_vldrepl_d(dst_tmp, 0);
>> + dst_tmp += dst_stride;
>> + DUP2_ARG2(__lsx_vilvl_d, tmp1, tmp0, tmp3, tmp2, dst0, dst1);
>> + DUP2_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1, dst0, dst1);
>> + __lsx_vstelm_d(dst0, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst0, dst, 0, 1);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst1, dst, 0, 0);
>> + dst += dst_stride;
>> + __lsx_vstelm_d(dst1, dst, 0, 1);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static void avg_width16_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + __m128i src0, src1, src2, src3;
>> + __m128i dst0, dst1, dst2, dst3;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + int32_t dst_stride2 = dst_stride << 1;
>> + int32_t dst_stride3 = dst_stride2 + dst_stride;
>> + int32_t dst_stride4 = dst_stride2 << 1;
>> + uint8_t* _src = (uint8_t*)src;
>> +
>> + for (;cnt--;) {
>> + src0 = __lsx_vld(_src, 0);
>> + DUP2_ARG2(__lsx_vldx, _src, src_stride, _src, src_stride2,
>> src1, src2);
>> + src3 = __lsx_vldx(_src, src_stride3);
>> + _src += src_stride4;
>> +
>> + dst0 = __lsx_vld(dst, 0);
>> + DUP2_ARG2(__lsx_vldx, dst, dst_stride, dst, dst_stride2,
>> + dst1, dst2);
>> + dst3 = __lsx_vldx(dst, dst_stride3);
>> + DUP4_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1,
>> + src2, dst2, src3, dst3, dst0, dst1, dst2, dst3);
>> + __lsx_vst(dst0, dst, 0);
>> + __lsx_vstx(dst1, dst, dst_stride);
>> + __lsx_vstx(dst2, dst, dst_stride2);
>> + __lsx_vstx(dst3, dst, dst_stride3);
>> + dst += dst_stride4;
>> + }
>> +}
>> +
>> +static void avg_width32_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + uint8_t *src_tmp1 = (uint8_t*)src;
>> + uint8_t *src_tmp2 = src_tmp1 + 16;
>> + uint8_t *dst_tmp1, *dst_tmp2;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
>> + int32_t src_stride2 = src_stride << 1;
>> + int32_t src_stride3 = src_stride + src_stride2;
>> + int32_t src_stride4 = src_stride2 << 1;
>> + int32_t dst_stride2 = dst_stride << 1;
>> + int32_t dst_stride3 = dst_stride2 + dst_stride;
>> + int32_t dst_stride4 = dst_stride2 << 1;
>> +
>> + dst_tmp1 = dst;
>> + dst_tmp2 = dst + 16;
>> + for (;cnt--;) {
>> + src0 = __lsx_vld(src_tmp1, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp1, src_stride, src_tmp1,
>> src_stride2,
>> + src2, src4);
>> + src6 = __lsx_vldx(src_tmp1, src_stride3);
>> + src_tmp1 += src_stride4;
>> +
>> + src1 = __lsx_vld(src_tmp2, 0);
>> + DUP2_ARG2(__lsx_vldx, src_tmp2, src_stride, src_tmp2,
>> src_stride2,
>> + src3, src5);
>> + src7 = __lsx_vldx(src_tmp2, src_stride3);
>> + src_tmp2 += src_stride4;
>> +
>> + dst0 = __lsx_vld(dst_tmp1, 0);
>> + DUP2_ARG2(__lsx_vldx, dst_tmp1, dst_stride, dst_tmp1,
>> dst_stride2,
>> + dst2, dst4);
>> + dst6 = __lsx_vldx(dst_tmp1, dst_stride3);
>> + dst1 = __lsx_vld(dst_tmp2, 0);
>> + DUP2_ARG2(__lsx_vldx, dst_tmp2, dst_stride, dst_tmp2,
>> dst_stride2,
>> + dst3, dst5);
>> + dst7 = __lsx_vldx(dst_tmp2, dst_stride3);
>> +
>> + DUP4_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1,
>> + src2, dst2, src3, dst3, dst0, dst1, dst2, dst3);
>> + DUP4_ARG2(__lsx_vavgr_bu, src4, dst4, src5, dst5,
>> + src6, dst6, src7, dst7, dst4, dst5, dst6, dst7);
>> + __lsx_vst(dst0, dst_tmp1, 0);
>> + __lsx_vstx(dst2, dst_tmp1, dst_stride);
>> + __lsx_vstx(dst4, dst_tmp1, dst_stride2);
>> + __lsx_vstx(dst6, dst_tmp1, dst_stride3);
>> + dst_tmp1 += dst_stride4;
>> + __lsx_vst(dst1, dst_tmp2, 0);
>> + __lsx_vstx(dst3, dst_tmp2, dst_stride);
>> + __lsx_vstx(dst5, dst_tmp2, dst_stride2);
>> + __lsx_vstx(dst7, dst_tmp2, dst_stride3);
>> + dst_tmp2 += dst_stride4;
>> + }
>> +}
>> +
>> +static void avg_width64_lsx(const uint8_t *src, int32_t src_stride,
>> + uint8_t *dst, int32_t dst_stride,
>> + int32_t height)
>> +{
>> + int32_t cnt = height >> 2;
>> + uint8_t *dst_tmp = dst;
>> + __m128i src0, src1, src2, src3, src4, src5, src6, src7;
>> + __m128i src8, src9, src10, src11, src12, src13, src14, src15;
>> + __m128i dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7;
>> + __m128i dst8, dst9, dst10, dst11, dst12, dst13, dst14, dst15;
>> +
>> + for (;cnt--;) {
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src0, src1, src2, src3);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src4, src5, src6, src7);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src8, src9, src10, src11);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48,
>> + src12, src13, src14, src15);
>> + src += src_stride;
>> + DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32,
>> dst_tmp, 48,
>> + dst0, dst1, dst2, dst3);
>> + dst_tmp += dst_stride;
>> + DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32,
>> dst_tmp, 48,
>> + dst4, dst5, dst6, dst7);
>> + dst_tmp += dst_stride;
>> + DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32,
>> dst_tmp, 48,
>> + dst8, dst9, dst10, dst11);
>> + dst_tmp += dst_stride;
>> + DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32,
>> dst_tmp, 48,
>> + dst12, dst13, dst14, dst15);
>> + dst_tmp += dst_stride;
>> + DUP4_ARG2(__lsx_vavgr_bu, src0, dst0, src1, dst1,
>> + src2, dst2, src3, dst3, dst0, dst1, dst2, dst3);
>> + DUP4_ARG2(__lsx_vavgr_bu, src4, dst4, src5, dst5,
>> + src6, dst6, src7, dst7, dst4, dst5, dst6, dst7);
>> + DUP4_ARG2(__lsx_vavgr_bu, src8, dst8, src9, dst9, src10,
>> + dst10, src11, dst11, dst8, dst9, dst10, dst11);
>> + DUP4_ARG2(__lsx_vavgr_bu, src12, dst12, src13, dst13, src14,
>> + dst14, src15, dst15, dst12, dst13, dst14, dst15);
>> + __lsx_vst(dst0, dst, 0);
>> + __lsx_vst(dst1, dst, 16);
>> + __lsx_vst(dst2, dst, 32);
>> + __lsx_vst(dst3, dst, 48);
>> + dst += dst_stride;
>> + __lsx_vst(dst4, dst, 0);
>> + __lsx_vst(dst5, dst, 16);
>> + __lsx_vst(dst6, dst, 32);
>> + __lsx_vst(dst7, dst, 48);
>> + dst += dst_stride;
>> + __lsx_vst(dst8, dst, 0);
>> + __lsx_vst(dst9, dst, 16);
>> + __lsx_vst(dst10, dst, 32);
>> + __lsx_vst(dst11, dst, 48);
>> + dst += dst_stride;
>> + __lsx_vst(dst12, dst, 0);
>> + __lsx_vst(dst13, dst, 16);
>> + __lsx_vst(dst14, dst, 32);
>> + __lsx_vst(dst15, dst, 48);
>> + dst += dst_stride;
>> + }
>> +}
>> +
>> +static const int8_t vp9_subpel_filters_lsx[3][15][8] = {
>> + [FILTER_8TAP_REGULAR] = {
>> + {0, 1, -5, 126, 8, -3, 1, 0},
>> + {-1, 3, -10, 122, 18, -6, 2, 0},
>> + {-1, 4, -13, 118, 27, -9, 3, -1},
>> + {-1, 4, -16, 112, 37, -11, 4, -1},
>> + {-1, 5, -18, 105, 48, -14, 4, -1},
>> + {-1, 5, -19, 97, 58, -16, 5, -1},
>> + {-1, 6, -19, 88, 68, -18, 5, -1},
>> + {-1, 6, -19, 78, 78, -19, 6, -1},
>> + {-1, 5, -18, 68, 88, -19, 6, -1},
>> + {-1, 5, -16, 58, 97, -19, 5, -1},
>> + {-1, 4, -14, 48, 105, -18, 5, -1},
>> + {-1, 4, -11, 37, 112, -16, 4, -1},
>> + {-1, 3, -9, 27, 118, -13, 4, -1},
>> + {0, 2, -6, 18, 122, -10, 3, -1},
>> + {0, 1, -3, 8, 126, -5, 1, 0},
>> + }, [FILTER_8TAP_SHARP] = {
>> + {-1, 3, -7, 127, 8, -3, 1, 0},
>> + {-2, 5, -13, 125, 17, -6, 3, -1},
>> + {-3, 7, -17, 121, 27, -10, 5, -2},
>> + {-4, 9, -20, 115, 37, -13, 6, -2},
>> + {-4, 10, -23, 108, 48, -16, 8, -3},
>> + {-4, 10, -24, 100, 59, -19, 9, -3},
>> + {-4, 11, -24, 90, 70, -21, 10, -4},
>> + {-4, 11, -23, 80, 80, -23, 11, -4},
>> + {-4, 10, -21, 70, 90, -24, 11, -4},
>> + {-3, 9, -19, 59, 100, -24, 10, -4},
>> + {-3, 8, -16, 48, 108, -23, 10, -4},
>> + {-2, 6, -13, 37, 115, -20, 9, -4},
>> + {-2, 5, -10, 27, 121, -17, 7, -3},
>> + {-1, 3, -6, 17, 125, -13, 5, -2},
>> + {0, 1, -3, 8, 127, -7, 3, -1},
>> + }, [FILTER_8TAP_SMOOTH] = {
>> + {-3, -1, 32, 64, 38, 1, -3, 0},
>> + {-2, -2, 29, 63, 41, 2, -3, 0},
>> + {-2, -2, 26, 63, 43, 4, -4, 0},
>> + {-2, -3, 24, 62, 46, 5, -4, 0},
>> + {-2, -3, 21, 60, 49, 7, -4, 0},
>> + {-1, -4, 18, 59, 51, 9, -4, 0},
>> + {-1, -4, 16, 57, 53, 12, -4, -1},
>> + {-1, -4, 14, 55, 55, 14, -4, -1},
>> + {-1, -4, 12, 53, 57, 16, -4, -1},
>> + {0, -4, 9, 51, 59, 18, -4, -1},
>> + {0, -4, 7, 49, 60, 21, -3, -2},
>> + {0, -4, 5, 46, 62, 24, -3, -2},
>> + {0, -4, 4, 43, 63, 26, -2, -2},
>> + {0, -3, 2, 41, 63, 29, -2, -2},
>> + {0, -3, 1, 38, 64, 32, -1, -3},
>> + }
>> +};
>> +
>> +#define VP9_8TAP_LOONGARCH_LSX_FUNC(SIZE, type, type_idx)
>> \
>> +void ff_put_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my)
>> \
>> +{
>> \
>> + const int8_t *filter = vp9_subpel_filters_lsx[type_idx][mx-1];
>> \
>> +
>> \
>> + common_hz_8t_##SIZE##w_lsx(src, srcstride, dst, dststride, filter,
>> h); \
>> +}
>> \
>> +
>> \
>> +void ff_put_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my)
>> \
>> +{
>> \
>> + const int8_t *filter = vp9_subpel_filters_lsx[type_idx][my-1];
>> \
>> +
>> \
>> + common_vt_8t_##SIZE##w_lsx(src, srcstride, dst, dststride, filter,
>> h); \
>> +}
>> \
>> +
>> \
>> +void ff_put_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my)
>> \
>> +{
>> \
>> + const int8_t *hfilter = vp9_subpel_filters_lsx[type_idx][mx-1];
>> \
>> + const int8_t *vfilter = vp9_subpel_filters_lsx[type_idx][my-1];
>> \
>> +
>> \
>> + common_hv_8ht_8vt_##SIZE##w_lsx(src, srcstride, dst, dststride,
>> hfilter, \
>> + vfilter, h);
>> \
>> +}
>> \
>> +
>> \
>> +void ff_avg_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my)
>> \
>> +{
>> \
>> + const int8_t *filter = vp9_subpel_filters_lsx[type_idx][mx-1];
>> \
>> +
>> \
>> + common_hz_8t_and_aver_dst_##SIZE##w_lsx(src, srcstride, dst,
>> \
>> + dststride, filter, h);
>> \
>> +}
>> \
>> +
>> \
>> +void ff_avg_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my)
>> \
>> +{
>> \
>> + const int8_t *filter = vp9_subpel_filters_lsx[type_idx][my-1];
>> \
>> +
>> \
>> + common_vt_8t_and_aver_dst_##SIZE##w_lsx(src, srcstride, dst,
>> dststride, \
>> + filter, h);
>> \
>> +}
>> \
>> +
>> \
>> +void ff_avg_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my)
>> \
>> +{
>> \
>> + const int8_t *hfilter = vp9_subpel_filters_lsx[type_idx][mx-1];
>> \
>> + const int8_t *vfilter = vp9_subpel_filters_lsx[type_idx][my-1];
>> \
>> +
>> \
>> + common_hv_8ht_8vt_and_aver_dst_##SIZE##w_lsx(src, srcstride, dst,
>> \
>> + dststride, hfilter,
>> \
>> + vfilter, h);
>> \
>> +}
>> +
>> +#define VP9_COPY_LOONGARCH_LSX_FUNC(SIZE) \
>> +void ff_copy##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
>> + const uint8_t *src, ptrdiff_t srcstride, \
>> + int h, int mx, int my) \
>> +{ \
>> + \
>> + copy_width##SIZE##_lsx(src, srcstride, dst, dststride, h); \
>> +} \
>> +void ff_avg##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
>> + const uint8_t *src, ptrdiff_t srcstride, \
>> + int h, int mx, int my) \
>> +{ \
>> + \
>> + avg_width##SIZE##_lsx(src, srcstride, dst, dststride, h); \
>> +}
>> +
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, regular, FILTER_8TAP_REGULAR);
>> +
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, sharp, FILTER_8TAP_SHARP);
>> +
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, smooth, FILTER_8TAP_SMOOTH);
>> +
>> +VP9_COPY_LOONGARCH_LSX_FUNC(64);
>> +VP9_COPY_LOONGARCH_LSX_FUNC(32);
>> +VP9_COPY_LOONGARCH_LSX_FUNC(16);
>> +VP9_COPY_LOONGARCH_LSX_FUNC(8);
>> +
>> +#undef VP9_8TAP_LOONGARCH_LSX_FUNC
>> +#undef VP9_COPY_LOONGARCH_LSX_FUNC
>> diff --git a/libavcodec/loongarch/vp9dsp_init_loongarch.c
>> b/libavcodec/loongarch/vp9dsp_init_loongarch.c
>> new file mode 100644
>> index 0000000000..c1e01b4558
>> --- /dev/null
>> +++ b/libavcodec/loongarch/vp9dsp_init_loongarch.c
>> @@ -0,0 +1,97 @@
>> +/*
>> + * Copyright (c) 2021 Loongson Technology Corporation Limited
>> + * Contributed by Hao Chen <chenhao@loongson.cn>
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with FFmpeg; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
>> 02110-1301 USA
>> + */
>> +
>> +#include "libavutil/loongarch/cpu.h"
>> +#include "libavutil/attributes.h"
>> +#include "libavcodec/vp9dsp.h"
>> +#include "vp9dsp_loongarch.h"
>> +
>> +#define init_subpel1(idx1, idx2, idxh, idxv, sz, dir, type) \
>> + dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][idxh][idxv] = \
>> + ff_##type##_8tap_smooth_##sz##dir##_lsx; \
>> + dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][idxh][idxv] = \
>> + ff_##type##_8tap_regular_##sz##dir##_lsx; \
>> + dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][idxh][idxv] = \
>> + ff_##type##_8tap_sharp_##sz##dir##_lsx;
>> +
>> +#define init_subpel2(idx, idxh, idxv, dir, type) \
>> + init_subpel1(0, idx, idxh, idxv, 64, dir, type); \
>> + init_subpel1(1, idx, idxh, idxv, 32, dir, type); \
>> + init_subpel1(2, idx, idxh, idxv, 16, dir, type); \
>> + init_subpel1(3, idx, idxh, idxv, 8, dir, type); \
>> + init_subpel1(4, idx, idxh, idxv, 4, dir, type);
>> +
>> +#define init_subpel3(idx, type) \
>> + init_subpel2(idx, 1, 0, h, type); \
>> + init_subpel2(idx, 0, 1, v, type); \
>> + init_subpel2(idx, 1, 1, hv, type);
>> +
>> +#define init_fpel(idx1, idx2, sz, type)
>> \
>> + dsp->mc[idx1][FILTER_8TAP_SMOOTH ][idx2][0][0] =
>> ff_##type##sz##_lsx; \
>> + dsp->mc[idx1][FILTER_8TAP_REGULAR][idx2][0][0] =
>> ff_##type##sz##_lsx; \
>> + dsp->mc[idx1][FILTER_8TAP_SHARP ][idx2][0][0] =
>> ff_##type##sz##_lsx; \
>> + dsp->mc[idx1][FILTER_BILINEAR ][idx2][0][0] =
>> ff_##type##sz##_lsx;
>> +
>> +#define init_copy(idx, sz) \
>> + init_fpel(idx, 0, sz, copy); \
>> + init_fpel(idx, 1, sz, avg);
>> +
>> +#define init_intra_pred1_lsx(tx, sz) \
>> + dsp->intra_pred[tx][VERT_PRED] = ff_vert_##sz##_lsx; \
>> + dsp->intra_pred[tx][HOR_PRED] = ff_hor_##sz##_lsx; \
>> + dsp->intra_pred[tx][DC_PRED] = ff_dc_##sz##_lsx; \
>> + dsp->intra_pred[tx][LEFT_DC_PRED] = ff_dc_left_##sz##_lsx; \
>> + dsp->intra_pred[tx][TOP_DC_PRED] = ff_dc_top_##sz##_lsx; \
>> + dsp->intra_pred[tx][DC_128_PRED] = ff_dc_128_##sz##_lsx; \
>> + dsp->intra_pred[tx][DC_127_PRED] = ff_dc_127_##sz##_lsx; \
>> + dsp->intra_pred[tx][DC_129_PRED] = ff_dc_129_##sz##_lsx; \
>> + dsp->intra_pred[tx][TM_VP8_PRED] = ff_tm_##sz##_lsx; \
>> +
>> +#define init_intra_pred2_lsx(tx, sz) \
>> + dsp->intra_pred[tx][DC_PRED] = ff_dc_##sz##_lsx; \
>> + dsp->intra_pred[tx][LEFT_DC_PRED] = ff_dc_left_##sz##_lsx; \
>> + dsp->intra_pred[tx][TOP_DC_PRED] = ff_dc_top_##sz##_lsx; \
>> + dsp->intra_pred[tx][TM_VP8_PRED] = ff_tm_##sz##_lsx; \
>> +
>> +av_cold void ff_vp9dsp_init_loongarch(VP9DSPContext *dsp, int bpp)
>> +{
>> + int cpu_flags = av_get_cpu_flags();
>> + if (have_lsx(cpu_flags))
>> + if (bpp == 8) {
>> + init_subpel3(0, put);
>> + init_subpel3(1, avg);
>> + init_copy(0, 64);
>> + init_copy(1, 32);
>> + init_copy(2, 16);
>> + init_copy(3, 8);
>> + init_intra_pred1_lsx(TX_16X16, 16x16);
>> + init_intra_pred1_lsx(TX_32X32, 32x32);
>> + init_intra_pred2_lsx(TX_4X4, 4x4);
>> + init_intra_pred2_lsx(TX_8X8, 8x8);
>> + }
>> +}
>> +#undef init_subpel1
>> +#undef init_subpel2
>> +#undef init_subpel3
>> +#undef init_copy
>> +#undef init_fpel
>> +#undef init_intra_pred1_lsx
>> +#undef init_intra_pred2_lsx
>> diff --git a/libavcodec/loongarch/vp9dsp_loongarch.h
>> b/libavcodec/loongarch/vp9dsp_loongarch.h
>> new file mode 100644
>> index 0000000000..b469326fdc
>> --- /dev/null
>> +++ b/libavcodec/loongarch/vp9dsp_loongarch.h
>> @@ -0,0 +1,144 @@
>> +/*
>> + * Copyright (c) 2021 Loongson Technology Corporation Limited
>> + * Contributed by Hao Chen <chenhao@loongson.cn>
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with FFmpeg; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
>> 02110-1301 USA
>> + */
>> +
>> +#ifndef AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H
>> +#define AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H
>> +
>> +#define VP9_8TAP_LOONGARCH_LSX_FUNC(SIZE, type, type_idx)
>> \
>> +void ff_put_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my);
>> \
>> +
>> \
>> +void ff_put_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my);
>> \
>> +
>> \
>> +void ff_put_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my);
>> \
>> +
>> \
>> +void ff_avg_8tap_##type##_##SIZE##h_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my);
>> \
>> +
>> \
>> +void ff_avg_8tap_##type##_##SIZE##v_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my);
>> \
>> +
>> \
>> +void ff_avg_8tap_##type##_##SIZE##hv_lsx(uint8_t *dst, ptrdiff_t
>> dststride, \
>> + const uint8_t *src,
>> \
>> + ptrdiff_t srcstride,
>> \
>> + int h, int mx, int my);
>> +
>> +#define VP9_COPY_LOONGARCH_LSX_FUNC(SIZE) \
>> +void ff_copy##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
>> + const uint8_t *src, ptrdiff_t srcstride, \
>> + int h, int mx, int my); \
>> + \
>> +void ff_avg##SIZE##_lsx(uint8_t *dst, ptrdiff_t dststride, \
>> + const uint8_t *src, ptrdiff_t srcstride, \
>> + int h, int mx, int my);
>> +
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, regular, FILTER_8TAP_REGULAR);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, regular, FILTER_8TAP_REGULAR);
>> +
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, sharp, FILTER_8TAP_SHARP);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, sharp, FILTER_8TAP_SHARP);
>> +
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(64, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(32, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(16, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(8, smooth, FILTER_8TAP_SMOOTH);
>> +VP9_8TAP_LOONGARCH_LSX_FUNC(4, smooth, FILTER_8TAP_SMOOTH);
>> +
>> +VP9_COPY_LOONGARCH_LSX_FUNC(64);
>> +VP9_COPY_LOONGARCH_LSX_FUNC(32);
>> +VP9_COPY_LOONGARCH_LSX_FUNC(16);
>> +VP9_COPY_LOONGARCH_LSX_FUNC(8);
>> +
>> +#undef VP9_8TAP_LOONGARCH_LSX_FUNC
>> +#undef VP9_COPY_LOONGARCH_LSX_FUNC
>> +
>> +void ff_vert_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_vert_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_hor_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_hor_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_dc_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
>> + const uint8_t *top);
>> +void ff_dc_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
>> + const uint8_t *top);
>> +void ff_dc_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_dc_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_dc_left_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_dc_left_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_dc_left_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_left_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_top_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_dc_top_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_dc_top_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_top_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_128_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_128_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_127_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_127_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_129_16x16_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_dc_129_32x32_lsx(uint8_t *dst, ptrdiff_t stride,
>> + const uint8_t *left, const uint8_t *top);
>> +void ff_tm_4x4_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
>> + const uint8_t *top);
>> +void ff_tm_8x8_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
>> + const uint8_t *top);
>> +void ff_tm_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +void ff_tm_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t
>> *left,
>> + const uint8_t *top);
>> +
>> +#endif /* AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H */
>> diff --git a/libavcodec/vp9dsp.c b/libavcodec/vp9dsp.c
>> index 41b8ad1ad1..82bfe394d1 100644
>> --- a/libavcodec/vp9dsp.c
>> +++ b/libavcodec/vp9dsp.c
>> @@ -98,4 +98,5 @@ av_cold void ff_vp9dsp_init(VP9DSPContext *dsp, int
>> bpp, int bitexact)
>> if (ARCH_ARM) ff_vp9dsp_init_arm(dsp, bpp);
>> if (ARCH_X86) ff_vp9dsp_init_x86(dsp, bpp, bitexact);
>> if (ARCH_MIPS) ff_vp9dsp_init_mips(dsp, bpp);
>> + if (ARCH_LOONGARCH) ff_vp9dsp_init_loongarch(dsp, bpp);
>> }
>> diff --git a/libavcodec/vp9dsp.h b/libavcodec/vp9dsp.h
>> index e2256316a8..700dd72de8 100644
>> --- a/libavcodec/vp9dsp.h
>> +++ b/libavcodec/vp9dsp.h
>> @@ -132,5 +132,6 @@ void ff_vp9dsp_init_aarch64(VP9DSPContext *dsp, int
>> bpp);
>> void ff_vp9dsp_init_arm(VP9DSPContext *dsp, int bpp);
>> void ff_vp9dsp_init_x86(VP9DSPContext *dsp, int bpp, int bitexact);
>> void ff_vp9dsp_init_mips(VP9DSPContext *dsp, int bpp);
>> +void ff_vp9dsp_init_loongarch(VP9DSPContext *dsp, int bpp);
>>
>> #endif /* AVCODEC_VP9DSP_H */
>> --
>> 2.20.1
>>
>> _______________________________________________
>> ffmpeg-devel mailing list
>> ffmpeg-devel@ffmpeg.org
>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>
>> To unsubscribe, visit link above, or email
>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>
> --
> Jean-Baptiste Kempf - President
> +33 672 704 734
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 10+ messages in thread
* [FFmpeg-devel] [PATCH 3/4] avcodec: [loongarch] Optimize vp9_lpf/idct with LSX.
2021-12-18 14:27 [FFmpeg-devel] Optimize VP8,VP9,WMV3 decoding for loongarch Hao Chen
2021-12-18 14:27 ` [FFmpeg-devel] [PATCH 1/4] avcodec: [loongarch] Optimize vp8_lpf/mc with LSX Hao Chen
2021-12-18 14:27 ` [FFmpeg-devel] [PATCH 2/4] avcodec: [loongarch] Optimize vp9_mc/intra " Hao Chen
@ 2021-12-18 14:27 ` Hao Chen
2021-12-18 14:27 ` [FFmpeg-devel] [PATCH 4/4] avcodec: [loongarch] Optimize vc1dsp with LASX Hao Chen
2021-12-20 8:37 ` [FFmpeg-devel] Optimize VP8,VP9,WMV3 decoding for loongarch Shiyou Yin
4 siblings, 0 replies; 10+ messages in thread
From: Hao Chen @ 2021-12-18 14:27 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: Jin Bo
From: Jin Bo <jinbo@loongson.cn>
ffmpeg -i ../10_vp9_1080p_30fps_3Mbps.webm -f rawvideo -y /dev/null -an
before:294fps
after :567fps
---
libavcodec/loongarch/Makefile | 4 +-
libavcodec/loongarch/vp9_idct_lsx.c | 1411 ++++++++
libavcodec/loongarch/vp9_lpf_lsx.c | 3141 ++++++++++++++++++
libavcodec/loongarch/vp9dsp_init_loongarch.c | 33 +
libavcodec/loongarch/vp9dsp_loongarch.h | 38 +
5 files changed, 4626 insertions(+), 1 deletion(-)
create mode 100644 libavcodec/loongarch/vp9_idct_lsx.c
create mode 100644 libavcodec/loongarch/vp9_lpf_lsx.c
diff --git a/libavcodec/loongarch/Makefile b/libavcodec/loongarch/Makefile
index 6fcebe40a3..4b83f20e92 100644
--- a/libavcodec/loongarch/Makefile
+++ b/libavcodec/loongarch/Makefile
@@ -13,4 +13,6 @@ LASX-OBJS-$(CONFIG_H264PRED) += loongarch/h264_intrapred_lasx.o
LSX-OBJS-$(CONFIG_VP8_DECODER) += loongarch/vp8_mc_lsx.o \
loongarch/vp8_lpf_lsx.o
LSX-OBJS-$(CONFIG_VP9_DECODER) += loongarch/vp9_mc_lsx.o \
- loongarch/vp9_intra_lsx.o
+ loongarch/vp9_intra_lsx.o \
+ loongarch/vp9_lpf_lsx.o \
+ loongarch/vp9_idct_lsx.o
diff --git a/libavcodec/loongarch/vp9_idct_lsx.c b/libavcodec/loongarch/vp9_idct_lsx.c
new file mode 100644
index 0000000000..88805814c6
--- /dev/null
+++ b/libavcodec/loongarch/vp9_idct_lsx.c
@@ -0,0 +1,1411 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Jin Bo <jinbo@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavcodec/vp9dsp.h"
+#include "libavutil/loongarch/loongson_intrinsics.h"
+#include "vp9dsp_loongarch.h"
+#include "libavutil/attributes.h"
+
+#define VP9_DCT_CONST_BITS 14
+#define ALLOC_ALIGNED(align) __attribute__ ((aligned(align)))
+#define ROUND_POWER_OF_TWO(value, n) (((value) + (1 << ((n) - 1))) >> (n))
+
+const int32_t cospi_1_64 = 16364;
+const int32_t cospi_2_64 = 16305;
+const int32_t cospi_3_64 = 16207;
+const int32_t cospi_4_64 = 16069;
+const int32_t cospi_5_64 = 15893;
+const int32_t cospi_6_64 = 15679;
+const int32_t cospi_7_64 = 15426;
+const int32_t cospi_8_64 = 15137;
+const int32_t cospi_9_64 = 14811;
+const int32_t cospi_10_64 = 14449;
+const int32_t cospi_11_64 = 14053;
+const int32_t cospi_12_64 = 13623;
+const int32_t cospi_13_64 = 13160;
+const int32_t cospi_14_64 = 12665;
+const int32_t cospi_15_64 = 12140;
+const int32_t cospi_16_64 = 11585;
+const int32_t cospi_17_64 = 11003;
+const int32_t cospi_18_64 = 10394;
+const int32_t cospi_19_64 = 9760;
+const int32_t cospi_20_64 = 9102;
+const int32_t cospi_21_64 = 8423;
+const int32_t cospi_22_64 = 7723;
+const int32_t cospi_23_64 = 7005;
+const int32_t cospi_24_64 = 6270;
+const int32_t cospi_25_64 = 5520;
+const int32_t cospi_26_64 = 4756;
+const int32_t cospi_27_64 = 3981;
+const int32_t cospi_28_64 = 3196;
+const int32_t cospi_29_64 = 2404;
+const int32_t cospi_30_64 = 1606;
+const int32_t cospi_31_64 = 804;
+
+const int32_t sinpi_1_9 = 5283;
+const int32_t sinpi_2_9 = 9929;
+const int32_t sinpi_3_9 = 13377;
+const int32_t sinpi_4_9 = 15212;
+
+#define VP9_DOTP_CONST_PAIR(reg0, reg1, cnst0, cnst1, out0, out1) \
+{ \
+ __m128i k0_m = __lsx_vreplgr2vr_h(cnst0); \
+ __m128i s0_m, s1_m, s2_m, s3_m; \
+ \
+ s0_m = __lsx_vreplgr2vr_h(cnst1); \
+ k0_m = __lsx_vpackev_h(s0_m, k0_m); \
+ \
+ s1_m = __lsx_vilvl_h(__lsx_vneg_h(reg1), reg0); \
+ s0_m = __lsx_vilvh_h(__lsx_vneg_h(reg1), reg0); \
+ s3_m = __lsx_vilvl_h(reg0, reg1); \
+ s2_m = __lsx_vilvh_h(reg0, reg1); \
+ DUP2_ARG2(__lsx_vdp2_w_h, s1_m, k0_m, s0_m, k0_m, s1_m, s0_m); \
+ DUP2_ARG2(__lsx_vsrari_w, s1_m, VP9_DCT_CONST_BITS, \
+ s0_m, VP9_DCT_CONST_BITS, s1_m, s0_m); \
+ out0 = __lsx_vpickev_h(s0_m, s1_m); \
+ DUP2_ARG2(__lsx_vdp2_w_h, s3_m, k0_m, s2_m, k0_m, s1_m, s0_m); \
+ DUP2_ARG2(__lsx_vsrari_w, s1_m, VP9_DCT_CONST_BITS, \
+ s0_m, VP9_DCT_CONST_BITS, s1_m, s0_m); \
+ out1 = __lsx_vpickev_h(s0_m, s1_m); \
+}
+
+#define VP9_SET_COSPI_PAIR(c0_h, c1_h) \
+( { \
+ __m128i out0_m, r0_m, r1_m; \
+ \
+ r0_m = __lsx_vreplgr2vr_h(c0_h); \
+ r1_m = __lsx_vreplgr2vr_h(c1_h); \
+ out0_m = __lsx_vpackev_h(r1_m, r0_m); \
+ \
+ out0_m; \
+} )
+
+#define VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3) \
+{ \
+ uint8_t *dst_m = (uint8_t *) (dst); \
+ __m128i dst0_m, dst1_m, dst2_m, dst3_m; \
+ __m128i tmp0_m, tmp1_m; \
+ __m128i res0_m, res1_m, res2_m, res3_m; \
+ __m128i zero_m = __lsx_vldi(0); \
+ DUP4_ARG2(__lsx_vld, dst_m, 0, dst_m + dst_stride, 0, \
+ dst_m + 2 * dst_stride, 0, dst_m + 3 * dst_stride, 0, \
+ dst0_m, dst1_m, dst2_m, dst3_m); \
+ DUP4_ARG2(__lsx_vilvl_b, zero_m, dst0_m, zero_m, dst1_m, zero_m, \
+ dst2_m, zero_m, dst3_m, res0_m, res1_m, res2_m, res3_m);\
+ DUP4_ARG2(__lsx_vadd_h, res0_m, in0, res1_m, in1, res2_m, in2, \
+ res3_m, in3, res0_m, res1_m, res2_m, res3_m); \
+ DUP4_ARG1(__lsx_vclip255_h, res0_m, res1_m, res2_m, res3_m, \
+ res0_m, res1_m, res2_m, res3_m); \
+ DUP2_ARG2(__lsx_vpickev_b, res1_m, res0_m, res3_m, res2_m, \
+ tmp0_m, tmp1_m); \
+ __lsx_vstelm_d(tmp0_m, dst_m, 0, 0); \
+ __lsx_vstelm_d(tmp0_m, dst_m + dst_stride, 0, 1); \
+ __lsx_vstelm_d(tmp1_m, dst_m + 2 * dst_stride, 0, 0); \
+ __lsx_vstelm_d(tmp1_m, dst_m + 3 * dst_stride, 0, 1); \
+}
+
+#define VP9_UNPCK_UB_SH(in, out_h, out_l) \
+{ \
+ __m128i zero = __lsx_vldi(0); \
+ out_l = __lsx_vilvl_b(zero, in); \
+ out_h = __lsx_vilvh_b(zero, in); \
+}
+
+#define VP9_ILVLTRANS4x8_H(in0, in1, in2, in3, in4, in5, in6, in7, \
+ out0, out1, out2, out3, out4, out5, out6, out7) \
+{ \
+ __m128i tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
+ __m128i tmp0_n, tmp1_n, tmp2_n, tmp3_n; \
+ __m128i zero_m = __lsx_vldi(0); \
+ \
+ DUP4_ARG2(__lsx_vilvl_h, in1, in0, in3, in2, in5, in4, in7, in6, \
+ tmp0_n, tmp1_n, tmp2_n, tmp3_n); \
+ tmp0_m = __lsx_vilvl_w(tmp1_n, tmp0_n); \
+ tmp2_m = __lsx_vilvh_w(tmp1_n, tmp0_n); \
+ tmp1_m = __lsx_vilvl_w(tmp3_n, tmp2_n); \
+ tmp3_m = __lsx_vilvh_w(tmp3_n, tmp2_n); \
+ \
+ out0 = __lsx_vilvl_d(tmp1_m, tmp0_m); \
+ out1 = __lsx_vilvh_d(tmp1_m, tmp0_m); \
+ out2 = __lsx_vilvl_d(tmp3_m, tmp2_m); \
+ out3 = __lsx_vilvh_d(tmp3_m, tmp2_m); \
+ \
+ out4 = zero_m; \
+ out5 = zero_m; \
+ out6 = zero_m; \
+ out7 = zero_m; \
+}
+
+/* multiply and add macro */
+#define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, \
+ out0, out1, out2, out3) \
+{ \
+ __m128i madd_s0_m, madd_s1_m, madd_s2_m, madd_s3_m; \
+ __m128i tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
+ \
+ madd_s1_m = __lsx_vilvl_h(inp1, inp0); \
+ madd_s0_m = __lsx_vilvh_h(inp1, inp0); \
+ madd_s3_m = __lsx_vilvl_h(inp3, inp2); \
+ madd_s2_m = __lsx_vilvh_h(inp3, inp2); \
+ DUP4_ARG2(__lsx_vdp2_w_h, madd_s1_m, cst0, madd_s0_m, cst0, \
+ madd_s1_m, cst1, madd_s0_m, cst1, tmp0_m, tmp1_m, \
+ tmp2_m, tmp3_m); \
+ DUP4_ARG2(__lsx_vsrari_w, tmp0_m, VP9_DCT_CONST_BITS, tmp1_m, \
+ VP9_DCT_CONST_BITS, tmp2_m, VP9_DCT_CONST_BITS, tmp3_m, \
+ VP9_DCT_CONST_BITS, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
+ DUP2_ARG2(__lsx_vpickev_h, tmp1_m, tmp0_m, tmp3_m, tmp2_m, out0, out1); \
+ DUP4_ARG2(__lsx_vdp2_w_h, madd_s3_m, cst2, madd_s2_m, cst2, madd_s3_m, \
+ cst3, madd_s2_m, cst3, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
+ DUP4_ARG2(__lsx_vsrari_w, tmp0_m, VP9_DCT_CONST_BITS, \
+ tmp1_m, VP9_DCT_CONST_BITS, tmp2_m, VP9_DCT_CONST_BITS, \
+ tmp3_m, VP9_DCT_CONST_BITS, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
+ DUP2_ARG2(__lsx_vpickev_h, tmp1_m, tmp0_m, tmp3_m, tmp2_m, out2, out3); \
+}
+
+#define VP9_SET_CONST_PAIR(mask_h, idx1_h, idx2_h) \
+( { \
+ __m128i c0_m, c1_m; \
+ \
+ DUP2_ARG2(__lsx_vreplvei_h, mask_h, idx1_h, mask_h, idx2_h, c0_m, c1_m); \
+ c0_m = __lsx_vpackev_h(c1_m, c0_m); \
+ \
+ c0_m; \
+} )
+
+/* idct 8x8 macro */
+#define VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, \
+ out0, out1, out2, out3, out4, out5, out6, out7) \
+{ \
+ __m128i tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m; \
+ __m128i k0_m, k1_m, k2_m, k3_m, res0_m, res1_m, res2_m, res3_m; \
+ __m128i tmp0_m, tmp1_m, tmp2_m, tmp3_m; \
+ v8i16 mask_m = { cospi_28_64, cospi_4_64, cospi_20_64, cospi_12_64, \
+ cospi_16_64, -cospi_4_64, -cospi_20_64, -cospi_16_64 }; \
+ \
+ k0_m = VP9_SET_CONST_PAIR(mask_m, 0, 5); \
+ k1_m = VP9_SET_CONST_PAIR(mask_m, 1, 0); \
+ k2_m = VP9_SET_CONST_PAIR(mask_m, 6, 3); \
+ k3_m = VP9_SET_CONST_PAIR(mask_m, 3, 2); \
+ VP9_MADD(in1, in7, in3, in5, k0_m, k1_m, k2_m, k3_m, in1, in7, in3, in5); \
+ DUP2_ARG2(__lsx_vsub_h, in1, in3, in7, in5, res0_m, res1_m); \
+ k0_m = VP9_SET_CONST_PAIR(mask_m, 4, 7); \
+ k1_m = __lsx_vreplvei_h(mask_m, 4); \
+ \
+ res2_m = __lsx_vilvl_h(res0_m, res1_m); \
+ res3_m = __lsx_vilvh_h(res0_m, res1_m); \
+ DUP4_ARG2(__lsx_vdp2_w_h, res2_m, k0_m, res3_m, k0_m, res2_m, k1_m, \
+ res3_m, k1_m, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
+ DUP4_ARG2(__lsx_vsrari_w, tmp0_m, VP9_DCT_CONST_BITS, \
+ tmp1_m, VP9_DCT_CONST_BITS, tmp2_m, VP9_DCT_CONST_BITS, \
+ tmp3_m, VP9_DCT_CONST_BITS, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
+ tp4_m = __lsx_vadd_h(in1, in3); \
+ DUP2_ARG2(__lsx_vpickev_h, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tp5_m, tp6_m); \
+ tp7_m = __lsx_vadd_h(in7, in5); \
+ k2_m = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64); \
+ k3_m = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64); \
+ VP9_MADD(in0, in4, in2, in6, k1_m, k0_m, k2_m, k3_m, \
+ in0, in4, in2, in6); \
+ LSX_BUTTERFLY_4_H(in0, in4, in2, in6, tp0_m, tp1_m, tp2_m, tp3_m); \
+ LSX_BUTTERFLY_8_H(tp0_m, tp1_m, tp2_m, tp3_m, tp4_m, tp5_m, tp6_m, tp7_m, \
+ out0, out1, out2, out3, out4, out5, out6, out7); \
+}
+
+static av_always_inline
+void vp9_idct8x8_1_add_lsx(int16_t *input, uint8_t *dst,
+ int32_t dst_stride)
+{
+ int16_t out;
+ int32_t val;
+ __m128i vec;
+
+ out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS);
+ out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
+ val = ROUND_POWER_OF_TWO(out, 5);
+ vec = __lsx_vreplgr2vr_h(val);
+ input[0] = 0;
+
+ VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
+ dst += (4 * dst_stride);
+ VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
+}
+
+static void vp9_idct8x8_12_colcol_addblk_lsx(int16_t *input, uint8_t *dst,
+ int32_t dst_stride)
+{
+ __m128i in0, in1, in2, in3, in4, in5, in6, in7;
+ __m128i s0, s1, s2, s3, s4, s5, s6, s7, k0, k1, k2, k3, m0, m1, m2, m3;
+ __m128i tmp0, tmp1, tmp2, tmp3;
+ __m128i zero = __lsx_vldi(0);
+
+ /* load vector elements of 8x8 block */
+ DUP4_ARG2(__lsx_vld, input, 0, input, 16, input, 32, input, 48,
+ in0, in1, in2, in3);
+ DUP4_ARG2(__lsx_vld, input, 64, input, 80, input, 96, input, 112,
+ in4, in5, in6, in7);
+ __lsx_vst(zero, input, 0);
+ __lsx_vst(zero, input, 16);
+ __lsx_vst(zero, input, 32);
+ __lsx_vst(zero, input, 48);
+ __lsx_vst(zero, input, 64);
+ __lsx_vst(zero, input, 80);
+ __lsx_vst(zero, input, 96);
+ __lsx_vst(zero, input, 112);
+ DUP4_ARG2(__lsx_vilvl_d,in1, in0, in3, in2, in5, in4, in7,
+ in6, in0, in1, in2, in3);
+
+ /* stage1 */
+ DUP2_ARG2(__lsx_vilvh_h, in3, in0, in2, in1, s0, s1);
+ k0 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
+ k1 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
+ k2 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
+ k3 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
+ DUP4_ARG2(__lsx_vdp2_w_h, s0, k0, s0, k1, s1, k2, s1, k3,
+ tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vsrari_w, tmp0, VP9_DCT_CONST_BITS, tmp1,
+ VP9_DCT_CONST_BITS, tmp2, VP9_DCT_CONST_BITS, tmp3,
+ VP9_DCT_CONST_BITS, tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vpickev_h, zero, tmp0, zero, tmp1, zero, tmp2, zero, tmp3,
+ s0, s1, s2, s3);
+ LSX_BUTTERFLY_4_H(s0, s1, s3, s2, s4, s7, s6, s5);
+
+ /* stage2 */
+ DUP2_ARG2(__lsx_vilvl_h, in3, in1, in2, in0, s1, s0);
+ k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
+ k1 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
+ k2 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
+ k3 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
+ DUP4_ARG2(__lsx_vdp2_w_h, s0, k0, s0, k1, s1, k2, s1, k3,
+ tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vsrari_w, tmp0, VP9_DCT_CONST_BITS, tmp1,
+ VP9_DCT_CONST_BITS, tmp2, VP9_DCT_CONST_BITS, tmp3,
+ VP9_DCT_CONST_BITS, tmp0, tmp1, tmp2, tmp3);
+ DUP4_ARG2(__lsx_vpickev_h, zero, tmp0, zero, tmp1, zero, tmp2, zero, tmp3,
+ s0, s1, s2, s3);
+ LSX_BUTTERFLY_4_H(s0, s1, s2, s3, m0, m1, m2, m3);
+
+ /* stage3 */
+ s0 = __lsx_vilvl_h(s6, s5);
+
+ k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
+ DUP2_ARG2(__lsx_vdp2_w_h, s0, k1, s0, k0, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vsrari_w, tmp0, VP9_DCT_CONST_BITS, tmp1,
+ VP9_DCT_CONST_BITS, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vpickev_h, zero, tmp0, zero, tmp1, s2, s3);
+
+ /* stage4 */
+ LSX_BUTTERFLY_8_H(m0, m1, m2, m3, s4, s2, s3, s7,
+ in0, in1, in2, in3, in4, in5, in6, in7);
+ VP9_ILVLTRANS4x8_H(in0, in1, in2, in3, in4, in5, in6, in7,
+ in0, in1, in2, in3, in4, in5, in6, in7);
+ VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
+ in0, in1, in2, in3, in4, in5, in6, in7);
+
+ /* final rounding (add 2^4, divide by 2^5) and shift */
+ DUP4_ARG2(__lsx_vsrari_h, in0 , 5, in1, 5, in2, 5, in3, 5,
+ in0, in1, in2, in3);
+ DUP4_ARG2(__lsx_vsrari_h, in4 , 5, in5, 5, in6, 5, in7, 5,
+ in4, in5, in6, in7);
+
+ /* add block and store 8x8 */
+ VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
+ dst += (4 * dst_stride);
+ VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
+}
+
+static void vp9_idct8x8_colcol_addblk_lsx(int16_t *input, uint8_t *dst,
+ int32_t dst_stride)
+{
+ __m128i in0, in1, in2, in3, in4, in5, in6, in7;
+ __m128i zero = __lsx_vldi(0);
+
+ /* load vector elements of 8x8 block */
+ DUP4_ARG2(__lsx_vld, input, 0, input, 16, input, 32, input, 48,
+ in0, in1, in2, in3);
+ DUP4_ARG2(__lsx_vld, input, 64, input, 80, input, 96, input, 112,
+ in4, in5, in6, in7);
+ __lsx_vst(zero, input, 0);
+ __lsx_vst(zero, input, 16);
+ __lsx_vst(zero, input, 32);
+ __lsx_vst(zero, input, 48);
+ __lsx_vst(zero, input, 64);
+ __lsx_vst(zero, input, 80);
+ __lsx_vst(zero, input, 96);
+ __lsx_vst(zero, input, 112);
+ /* 1D idct8x8 */
+ VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
+ in0, in1, in2, in3, in4, in5, in6, in7);
+ /* columns transform */
+ LSX_TRANSPOSE8x8_H(in0, in1, in2, in3, in4, in5, in6, in7,
+ in0, in1, in2, in3, in4, in5, in6, in7);
+ /* 1D idct8x8 */
+ VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7,
+ in0, in1, in2, in3, in4, in5, in6, in7);
+ /* final rounding (add 2^4, divide by 2^5) and shift */
+ DUP4_ARG2(__lsx_vsrari_h, in0, 5, in1, 5, in2, 5, in3, 5,
+ in0, in1, in2, in3);
+ DUP4_ARG2(__lsx_vsrari_h, in4, 5, in5, 5, in6, 5, in7, 5,
+ in4, in5, in6, in7);
+ /* add block and store 8x8 */
+ VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
+ dst += (4 * dst_stride);
+ VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
+}
+
+static void vp9_idct16_1d_columns_addblk_lsx(int16_t *input, uint8_t *dst,
+ int32_t dst_stride)
+{
+ __m128i loc0, loc1, loc2, loc3;
+ __m128i reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
+ __m128i reg1, reg3, reg5, reg7, reg9, reg11, reg13, reg15;
+ __m128i tmp5, tmp6, tmp7;
+ __m128i zero = __lsx_vldi(0);
+ int32_t offset = dst_stride << 2;
+
+ DUP4_ARG2(__lsx_vld, input, 32*0, input, 32*1, input, 32*2, input, 32*3,
+ reg0, reg1, reg2, reg3);
+ DUP4_ARG2(__lsx_vld, input, 32*4, input, 32*5, input, 32*6, input, 32*7,
+ reg4, reg5, reg6, reg7);
+ DUP4_ARG2(__lsx_vld, input, 32*8, input, 32*9, input, 32*10, input, 32*11,
+ reg8, reg9, reg10, reg11);
+ DUP4_ARG2(__lsx_vld, input, 32*12, input, 32*13, input, 32*14, input,
+ 32*15, reg12, reg13, reg14, reg15);
+
+ __lsx_vst(zero, input, 32*0);
+ __lsx_vst(zero, input, 32*1);
+ __lsx_vst(zero, input, 32*2);
+ __lsx_vst(zero, input, 32*3);
+ __lsx_vst(zero, input, 32*4);
+ __lsx_vst(zero, input, 32*5);
+ __lsx_vst(zero, input, 32*6);
+ __lsx_vst(zero, input, 32*7);
+ __lsx_vst(zero, input, 32*8);
+ __lsx_vst(zero, input, 32*9);
+ __lsx_vst(zero, input, 32*10);
+ __lsx_vst(zero, input, 32*11);
+ __lsx_vst(zero, input, 32*12);
+ __lsx_vst(zero, input, 32*13);
+ __lsx_vst(zero, input, 32*14);
+ __lsx_vst(zero, input, 32*15);
+
+ VP9_DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14);
+ VP9_DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6);
+ LSX_BUTTERFLY_4_H(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
+ VP9_DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3);
+ VP9_DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8);
+ VP9_DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12);
+ LSX_BUTTERFLY_4_H(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14);
+
+ reg0 = __lsx_vsub_h(reg2, loc1);
+ reg2 = __lsx_vadd_h(reg2, loc1);
+ reg12 = __lsx_vsub_h(reg14, loc0);
+ reg14 = __lsx_vadd_h(reg14, loc0);
+ reg4 = __lsx_vsub_h(reg6, loc3);
+ reg6 = __lsx_vadd_h(reg6, loc3);
+ reg8 = __lsx_vsub_h(reg10, loc2);
+ reg10 = __lsx_vadd_h(reg10, loc2);
+
+ /* stage2 */
+ VP9_DOTP_CONST_PAIR(reg1, reg15, cospi_30_64, cospi_2_64, reg1, reg15);
+ VP9_DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3);
+
+ reg9 = __lsx_vsub_h(reg1, loc2);
+ reg1 = __lsx_vadd_h(reg1, loc2);
+ reg7 = __lsx_vsub_h(reg15, loc3);
+ reg15 = __lsx_vadd_h(reg15, loc3);
+
+ VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_22_64, cospi_10_64, reg5, reg11);
+ VP9_DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1);
+ LSX_BUTTERFLY_4_H(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5);
+
+ loc1 = __lsx_vadd_h(reg15, reg3);
+ reg3 = __lsx_vsub_h(reg15, reg3);
+ loc2 = __lsx_vadd_h(reg2, loc1);
+ reg15 = __lsx_vsub_h(reg2, loc1);
+
+ loc1 = __lsx_vadd_h(reg1, reg13);
+ reg13 = __lsx_vsub_h(reg1, reg13);
+ loc0 = __lsx_vadd_h(reg0, loc1);
+ loc1 = __lsx_vsub_h(reg0, loc1);
+ tmp6 = loc0;
+ tmp7 = loc1;
+ reg0 = loc2;
+
+ VP9_DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9);
+ VP9_DOTP_CONST_PAIR(__lsx_vneg_h(reg5), __lsx_vneg_h(reg11), cospi_8_64,
+ cospi_24_64, reg5, reg11);
+
+ loc0 = __lsx_vadd_h(reg9, reg5);
+ reg5 = __lsx_vsub_h(reg9, reg5);
+ reg2 = __lsx_vadd_h(reg6, loc0);
+ reg1 = __lsx_vsub_h(reg6, loc0);
+
+ loc0 = __lsx_vadd_h(reg7, reg11);
+ reg11 = __lsx_vsub_h(reg7, reg11);
+ loc1 = __lsx_vadd_h(reg4, loc0);
+ loc2 = __lsx_vsub_h(reg4, loc0);
+ tmp5 = loc1;
+
+ VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11);
+ LSX_BUTTERFLY_4_H(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1);
+
+ reg10 = loc0;
+ reg11 = loc1;
+
+ VP9_DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13);
+ LSX_BUTTERFLY_4_H(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5);
+ reg13 = loc2;
+
+ /* Transpose and store the output */
+ reg12 = tmp5;
+ reg14 = tmp6;
+ reg3 = tmp7;
+
+ DUP4_ARG2(__lsx_vsrari_h, reg0, 6, reg2, 6, reg4, 6, reg6, 6,
+ reg0, reg2, reg4, reg6);
+ VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg0, reg2, reg4, reg6);
+ dst += offset;
+ DUP4_ARG2(__lsx_vsrari_h, reg8, 6, reg10, 6, reg12, 6, reg14, 6,
+ reg8, reg10, reg12, reg14);
+ VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg8, reg10, reg12, reg14);
+ dst += offset;
+ DUP4_ARG2(__lsx_vsrari_h, reg3, 6, reg5, 6, reg11, 6, reg13, 6,
+ reg3, reg5, reg11, reg13);
+ VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg3, reg13, reg11, reg5);
+ dst += offset;
+ DUP4_ARG2(__lsx_vsrari_h, reg1, 6, reg7, 6, reg9, 6, reg15, 6,
+ reg1, reg7, reg9, reg15);
+ VP9_ADDBLK_ST8x4_UB(dst, dst_stride, reg7, reg9, reg1, reg15);
+}
+
+static void vp9_idct16_1d_columns_lsx(int16_t *input, int16_t *output)
+{
+ __m128i loc0, loc1, loc2, loc3;
+ __m128i reg1, reg3, reg5, reg7, reg9, reg11, reg13, reg15;
+ __m128i reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14;
+ __m128i tmp5, tmp6, tmp7;
+ __m128i zero = __lsx_vldi(0);
+ int16_t *offset;
+
+ DUP4_ARG2(__lsx_vld, input, 32*0, input, 32*1, input, 32*2, input, 32*3,
+ reg0, reg1, reg2, reg3);
+ DUP4_ARG2(__lsx_vld, input, 32*4, input, 32*5, input, 32*6, input, 32*7,
+ reg4, reg5, reg6, reg7);
+ DUP4_ARG2(__lsx_vld, input, 32*8, input, 32*9, input, 32*10, input, 32*11,
+ reg8, reg9, reg10, reg11);
+ DUP4_ARG2(__lsx_vld, input, 32*12, input, 32*13, input, 32*14, input,
+ 32*15, reg12, reg13, reg14, reg15);
+
+ __lsx_vst(zero, input, 32*0);
+ __lsx_vst(zero, input, 32*1);
+ __lsx_vst(zero, input, 32*2);
+ __lsx_vst(zero, input, 32*3);
+ __lsx_vst(zero, input, 32*4);
+ __lsx_vst(zero, input, 32*5);
+ __lsx_vst(zero, input, 32*6);
+ __lsx_vst(zero, input, 32*7);
+ __lsx_vst(zero, input, 32*8);
+ __lsx_vst(zero, input, 32*9);
+ __lsx_vst(zero, input, 32*10);
+ __lsx_vst(zero, input, 32*11);
+ __lsx_vst(zero, input, 32*12);
+ __lsx_vst(zero, input, 32*13);
+ __lsx_vst(zero, input, 32*14);
+ __lsx_vst(zero, input, 32*15);
+
+ VP9_DOTP_CONST_PAIR(reg2, reg14, cospi_28_64, cospi_4_64, reg2, reg14);
+ VP9_DOTP_CONST_PAIR(reg10, reg6, cospi_12_64, cospi_20_64, reg10, reg6);
+ LSX_BUTTERFLY_4_H(reg2, reg14, reg6, reg10, loc0, loc1, reg14, reg2);
+ VP9_DOTP_CONST_PAIR(reg14, reg2, cospi_16_64, cospi_16_64, loc2, loc3);
+ VP9_DOTP_CONST_PAIR(reg0, reg8, cospi_16_64, cospi_16_64, reg0, reg8);
+ VP9_DOTP_CONST_PAIR(reg4, reg12, cospi_24_64, cospi_8_64, reg4, reg12);
+ LSX_BUTTERFLY_4_H(reg8, reg0, reg4, reg12, reg2, reg6, reg10, reg14);
+
+ reg0 = __lsx_vsub_h(reg2, loc1);
+ reg2 = __lsx_vadd_h(reg2, loc1);
+ reg12 = __lsx_vsub_h(reg14, loc0);
+ reg14 = __lsx_vadd_h(reg14, loc0);
+ reg4 = __lsx_vsub_h(reg6, loc3);
+ reg6 = __lsx_vadd_h(reg6, loc3);
+ reg8 = __lsx_vsub_h(reg10, loc2);
+ reg10 = __lsx_vadd_h(reg10, loc2);
+
+ /* stage2 */
+ VP9_DOTP_CONST_PAIR(reg1, reg15, cospi_30_64, cospi_2_64, reg1, reg15);
+ VP9_DOTP_CONST_PAIR(reg9, reg7, cospi_14_64, cospi_18_64, loc2, loc3);
+
+ reg9 = __lsx_vsub_h(reg1, loc2);
+ reg1 = __lsx_vadd_h(reg1, loc2);
+ reg7 = __lsx_vsub_h(reg15, loc3);
+ reg15 = __lsx_vadd_h(reg15, loc3);
+
+ VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_22_64, cospi_10_64, reg5, reg11);
+ VP9_DOTP_CONST_PAIR(reg13, reg3, cospi_6_64, cospi_26_64, loc0, loc1);
+ LSX_BUTTERFLY_4_H(loc0, loc1, reg11, reg5, reg13, reg3, reg11, reg5);
+
+ loc1 = __lsx_vadd_h(reg15, reg3);
+ reg3 = __lsx_vsub_h(reg15, reg3);
+ loc2 = __lsx_vadd_h(reg2, loc1);
+ reg15 = __lsx_vsub_h(reg2, loc1);
+
+ loc1 = __lsx_vadd_h(reg1, reg13);
+ reg13 = __lsx_vsub_h(reg1, reg13);
+ loc0 = __lsx_vadd_h(reg0, loc1);
+ loc1 = __lsx_vsub_h(reg0, loc1);
+ tmp6 = loc0;
+ tmp7 = loc1;
+ reg0 = loc2;
+
+ VP9_DOTP_CONST_PAIR(reg7, reg9, cospi_24_64, cospi_8_64, reg7, reg9);
+ VP9_DOTP_CONST_PAIR(__lsx_vneg_h(reg5), __lsx_vneg_h(reg11), cospi_8_64,
+ cospi_24_64, reg5, reg11);
+
+ loc0 = __lsx_vadd_h(reg9, reg5);
+ reg5 = __lsx_vsub_h(reg9, reg5);
+ reg2 = __lsx_vadd_h(reg6, loc0);
+ reg1 = __lsx_vsub_h(reg6, loc0);
+
+ loc0 = __lsx_vadd_h(reg7, reg11);
+ reg11 = __lsx_vsub_h(reg7, reg11);
+ loc1 = __lsx_vadd_h(reg4, loc0);
+ loc2 = __lsx_vsub_h(reg4, loc0);
+
+ tmp5 = loc1;
+
+ VP9_DOTP_CONST_PAIR(reg5, reg11, cospi_16_64, cospi_16_64, reg5, reg11);
+ LSX_BUTTERFLY_4_H(reg8, reg10, reg11, reg5, loc0, reg4, reg9, loc1);
+
+ reg10 = loc0;
+ reg11 = loc1;
+
+ VP9_DOTP_CONST_PAIR(reg3, reg13, cospi_16_64, cospi_16_64, reg3, reg13);
+ LSX_BUTTERFLY_4_H(reg12, reg14, reg13, reg3, reg8, reg6, reg7, reg5);
+ reg13 = loc2;
+
+ /* Transpose and store the output */
+ reg12 = tmp5;
+ reg14 = tmp6;
+ reg3 = tmp7;
+
+ /* transpose block */
+ LSX_TRANSPOSE8x8_H(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14,
+ reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14);
+
+ __lsx_vst(reg0, output, 32*0);
+ __lsx_vst(reg2, output, 32*1);
+ __lsx_vst(reg4, output, 32*2);
+ __lsx_vst(reg6, output, 32*3);
+ __lsx_vst(reg8, output, 32*4);
+ __lsx_vst(reg10, output, 32*5);
+ __lsx_vst(reg12, output, 32*6);
+ __lsx_vst(reg14, output, 32*7);
+
+ /* transpose block */
+ LSX_TRANSPOSE8x8_H(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15,
+ reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15);
+
+ offset = output + 8;
+ __lsx_vst(reg3, offset, 32*0);
+ __lsx_vst(reg13, offset, 32*1);
+ __lsx_vst(reg11, offset, 32*2);
+ __lsx_vst(reg5, offset, 32*3);
+
+ offset = output + 8 + 4 * 16;
+ __lsx_vst(reg7, offset, 32*0);
+ __lsx_vst(reg9, offset, 32*1);
+ __lsx_vst(reg1, offset, 32*2);
+ __lsx_vst(reg15, offset, 32*3);
+}
+
+static void vp9_idct16x16_1_add_lsx(int16_t *input, uint8_t *dst,
+ int32_t dst_stride)
+{
+ uint8_t i;
+ int16_t out;
+ __m128i vec, res0, res1, res2, res3, res4, res5, res6, res7;
+ __m128i dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
+ int32_t stride2 = dst_stride << 1;
+ int32_t stride3 = stride2 + dst_stride;
+ int32_t stride4 = stride2 << 1;
+
+ out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS);
+ out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
+ out = ROUND_POWER_OF_TWO(out, 6);
+ input[0] = 0;
+ vec = __lsx_vreplgr2vr_h(out);
+
+ for (i = 4; i--;) {
+ dst0 = __lsx_vld(dst, 0);
+ DUP2_ARG2(__lsx_vldx, dst, dst_stride, dst, stride2, dst1, dst2);
+ dst3 = __lsx_vldx(dst, stride3);
+ VP9_UNPCK_UB_SH(dst0, res4, res0);
+ VP9_UNPCK_UB_SH(dst1, res5, res1);
+ VP9_UNPCK_UB_SH(dst2, res6, res2);
+ VP9_UNPCK_UB_SH(dst3, res7, res3);
+ DUP4_ARG2(__lsx_vadd_h, res0, vec, res1, vec, res2, vec, res3, vec,
+ res0, res1, res2, res3);
+ DUP4_ARG2(__lsx_vadd_h, res4, vec, res5, vec, res6, vec, res7, vec,
+ res4, res5, res6, res7);
+ DUP4_ARG1(__lsx_vclip255_h, res0, res1, res2, res3,
+ res0, res1, res2, res3);
+ DUP4_ARG1(__lsx_vclip255_h, res4, res5, res6, res7,
+ res4, res5, res6, res7);
+ DUP4_ARG2(__lsx_vpickev_b, res4, res0, res5, res1, res6,
+ res2, res7, res3, tmp0, tmp1, tmp2, tmp3);
+ __lsx_vst(tmp0, dst, 0);
+ __lsx_vstx(tmp1, dst, dst_stride);
+ __lsx_vstx(tmp2, dst, stride2);
+ __lsx_vstx(tmp3, dst, stride3);
+ dst += stride4;
+ }
+}
+
+static void vp9_idct16x16_10_colcol_addblk_lsx(int16_t *input, uint8_t *dst,
+ int32_t dst_stride)
+{
+ int32_t i;
+ int16_t out_arr[16 * 16] ALLOC_ALIGNED(16);
+ int16_t *out = out_arr;
+ __m128i zero = __lsx_vldi(0);
+
+ /* transform rows */
+ vp9_idct16_1d_columns_lsx(input, out);
+
+ /* short case just considers top 4 rows as valid output */
+ out += 4 * 16;
+ for (i = 3; i--;) {
+ __lsx_vst(zero, out, 0);
+ __lsx_vst(zero, out, 16);
+ __lsx_vst(zero, out, 32);
+ __lsx_vst(zero, out, 48);
+ __lsx_vst(zero, out, 64);
+ __lsx_vst(zero, out, 80);
+ __lsx_vst(zero, out, 96);
+ __lsx_vst(zero, out, 112);
+ out += 64;
+ }
+
+ out = out_arr;
+
+ /* transform columns */
+ for (i = 0; i < 2; i++) {
+ /* process 8 * 16 block */
+ vp9_idct16_1d_columns_addblk_lsx((out + (i << 3)), (dst + (i << 3)),
+ dst_stride);
+ }
+}
+
+static void vp9_idct16x16_colcol_addblk_lsx(int16_t *input, uint8_t *dst,
+ int32_t dst_stride)
+{
+ int32_t i;
+ int16_t out_arr[16 * 16] ALLOC_ALIGNED(16);
+ int16_t *out = out_arr;
+
+ /* transform rows */
+ for (i = 0; i < 2; i++) {
+ /* process 8 * 16 block */
+ vp9_idct16_1d_columns_lsx((input + (i << 3)), (out + (i << 7)));
+ }
+
+ /* transform columns */
+ for (i = 0; i < 2; i++) {
+ /* process 8 * 16 block */
+ vp9_idct16_1d_columns_addblk_lsx((out + (i << 3)), (dst + (i << 3)),
+ dst_stride);
+ }
+}
+
+static void vp9_idct_butterfly_transpose_store(int16_t *tmp_buf,
+ int16_t *tmp_eve_buf,
+ int16_t *tmp_odd_buf,
+ int16_t *dst)
+{
+ __m128i vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
+ __m128i m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
+
+ /* FINAL BUTTERFLY : Dependency on Even & Odd */
+ vec0 = __lsx_vld(tmp_odd_buf, 0);
+ vec1 = __lsx_vld(tmp_odd_buf, 9 * 16);
+ vec2 = __lsx_vld(tmp_odd_buf, 14 * 16);
+ vec3 = __lsx_vld(tmp_odd_buf, 6 * 16);
+ loc0 = __lsx_vld(tmp_eve_buf, 0);
+ loc1 = __lsx_vld(tmp_eve_buf, 8 * 16);
+ loc2 = __lsx_vld(tmp_eve_buf, 4 * 16);
+ loc3 = __lsx_vld(tmp_eve_buf, 12 * 16);
+
+ DUP4_ARG2(__lsx_vadd_h,loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
+ m0, m4, m2, m6);
+
+ #define SUB(a, b) __lsx_vsub_h(a, b)
+
+ __lsx_vst(SUB(loc0, vec3), tmp_buf, 31 * 16);
+ __lsx_vst(SUB(loc1, vec2), tmp_buf, 23 * 16);
+ __lsx_vst(SUB(loc2, vec1), tmp_buf, 27 * 16);
+ __lsx_vst(SUB(loc3, vec0), tmp_buf, 19 * 16);
+
+ /* Load 8 & Store 8 */
+ vec0 = __lsx_vld(tmp_odd_buf, 4 * 16);
+ vec1 = __lsx_vld(tmp_odd_buf, 13 * 16);
+ vec2 = __lsx_vld(tmp_odd_buf, 10 * 16);
+ vec3 = __lsx_vld(tmp_odd_buf, 3 * 16);
+ loc0 = __lsx_vld(tmp_eve_buf, 2 * 16);
+ loc1 = __lsx_vld(tmp_eve_buf, 10 * 16);
+ loc2 = __lsx_vld(tmp_eve_buf, 6 * 16);
+ loc3 = __lsx_vld(tmp_eve_buf, 14 * 16);
+
+ DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
+ m1, m5, m3, m7);
+
+ __lsx_vst(SUB(loc0, vec3), tmp_buf, 29 * 16);
+ __lsx_vst(SUB(loc1, vec2), tmp_buf, 21 * 16);
+ __lsx_vst(SUB(loc2, vec1), tmp_buf, 25 * 16);
+ __lsx_vst(SUB(loc3, vec0), tmp_buf, 17 * 16);
+
+ /* Load 8 & Store 8 */
+ vec0 = __lsx_vld(tmp_odd_buf, 2 * 16);
+ vec1 = __lsx_vld(tmp_odd_buf, 11 * 16);
+ vec2 = __lsx_vld(tmp_odd_buf, 12 * 16);
+ vec3 = __lsx_vld(tmp_odd_buf, 7 * 16);
+ loc0 = __lsx_vld(tmp_eve_buf, 1 * 16);
+ loc1 = __lsx_vld(tmp_eve_buf, 9 * 16);
+ loc2 = __lsx_vld(tmp_eve_buf, 5 * 16);
+ loc3 = __lsx_vld(tmp_eve_buf, 13 * 16);
+
+ DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
+ n0, n4, n2, n6);
+
+ __lsx_vst(SUB(loc0, vec3), tmp_buf, 30 * 16);
+ __lsx_vst(SUB(loc1, vec2), tmp_buf, 22 * 16);
+ __lsx_vst(SUB(loc2, vec1), tmp_buf, 26 * 16);
+ __lsx_vst(SUB(loc3, vec0), tmp_buf, 18 * 16);
+
+ /* Load 8 & Store 8 */
+ vec0 = __lsx_vld(tmp_odd_buf, 5 * 16);
+ vec1 = __lsx_vld(tmp_odd_buf, 15 * 16);
+ vec2 = __lsx_vld(tmp_odd_buf, 8 * 16);
+ vec3 = __lsx_vld(tmp_odd_buf, 1 * 16);
+ loc0 = __lsx_vld(tmp_eve_buf, 3 * 16);
+ loc1 = __lsx_vld(tmp_eve_buf, 11 * 16);
+ loc2 = __lsx_vld(tmp_eve_buf, 7 * 16);
+ loc3 = __lsx_vld(tmp_eve_buf, 15 * 16);
+
+ DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
+ n1, n5, n3, n7);
+
+ __lsx_vst(SUB(loc0, vec3), tmp_buf, 28 * 16);
+ __lsx_vst(SUB(loc1, vec2), tmp_buf, 20 * 16);
+ __lsx_vst(SUB(loc2, vec1), tmp_buf, 24 * 16);
+ __lsx_vst(SUB(loc3, vec0), tmp_buf, 16 * 16);
+
+ /* Transpose : 16 vectors */
+ /* 1st & 2nd 8x8 */
+ LSX_TRANSPOSE8x8_H(m0, n0, m1, n1, m2, n2, m3, n3,
+ m0, n0, m1, n1, m2, n2, m3, n3);
+ __lsx_vst(m0, dst, 0);
+ __lsx_vst(n0, dst, 32 * 2);
+ __lsx_vst(m1, dst, 32 * 4);
+ __lsx_vst(n1, dst, 32 * 6);
+ __lsx_vst(m2, dst, 32 * 8);
+ __lsx_vst(n2, dst, 32 * 10);
+ __lsx_vst(m3, dst, 32 * 12);
+ __lsx_vst(n3, dst, 32 * 14);
+
+ LSX_TRANSPOSE8x8_H(m4, n4, m5, n5, m6, n6, m7, n7,
+ m4, n4, m5, n5, m6, n6, m7, n7);
+
+ __lsx_vst(m4, dst, 16);
+ __lsx_vst(n4, dst, 16 + 32 * 2);
+ __lsx_vst(m5, dst, 16 + 32 * 4);
+ __lsx_vst(n5, dst, 16 + 32 * 6);
+ __lsx_vst(m6, dst, 16 + 32 * 8);
+ __lsx_vst(n6, dst, 16 + 32 * 10);
+ __lsx_vst(m7, dst, 16 + 32 * 12);
+ __lsx_vst(n7, dst, 16 + 32 * 14);
+
+ /* 3rd & 4th 8x8 */
+ DUP4_ARG2(__lsx_vld, tmp_buf, 16 * 16, tmp_buf, 16 * 17,
+ tmp_buf, 16 * 18, tmp_buf, 16 * 19, m0, n0, m1, n1);
+ DUP4_ARG2(__lsx_vld, tmp_buf, 16 * 20, tmp_buf, 16 * 21,
+ tmp_buf, 16 * 22, tmp_buf, 16 * 23, m2, n2, m3, n3);
+
+ DUP4_ARG2(__lsx_vld, tmp_buf, 16 * 24, tmp_buf, 16 * 25,
+ tmp_buf, 16 * 26, tmp_buf, 16 * 27, m4, n4, m5, n5);
+ DUP4_ARG2(__lsx_vld, tmp_buf, 16 * 28, tmp_buf, 16 * 29,
+ tmp_buf, 16 * 30, tmp_buf, 16 * 31, m6, n6, m7, n7);
+
+ LSX_TRANSPOSE8x8_H(m0, n0, m1, n1, m2, n2, m3, n3,
+ m0, n0, m1, n1, m2, n2, m3, n3);
+
+ __lsx_vst(m0, dst, 32);
+ __lsx_vst(n0, dst, 32 + 32 * 2);
+ __lsx_vst(m1, dst, 32 + 32 * 4);
+ __lsx_vst(n1, dst, 32 + 32 * 6);
+ __lsx_vst(m2, dst, 32 + 32 * 8);
+ __lsx_vst(n2, dst, 32 + 32 * 10);
+ __lsx_vst(m3, dst, 32 + 32 * 12);
+ __lsx_vst(n3, dst, 32 + 32 * 14);
+
+ LSX_TRANSPOSE8x8_H(m4, n4, m5, n5, m6, n6, m7, n7,
+ m4, n4, m5, n5, m6, n6, m7, n7);
+
+ __lsx_vst(m4, dst, 48);
+ __lsx_vst(n4, dst, 48 + 32 * 2);
+ __lsx_vst(m5, dst, 48 + 32 * 4);
+ __lsx_vst(n5, dst, 48 + 32 * 6);
+ __lsx_vst(m6, dst, 48 + 32 * 8);
+ __lsx_vst(n6, dst, 48 + 32 * 10);
+ __lsx_vst(m7, dst, 48 + 32 * 12);
+ __lsx_vst(n7, dst, 48 + 32 * 14);
+}
+
+static void vp9_idct8x32_column_even_process_store(int16_t *tmp_buf,
+ int16_t *tmp_eve_buf)
+{
+ __m128i vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
+ __m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
+ __m128i stp0, stp1, stp2, stp3, stp4, stp5, stp6, stp7;
+ __m128i zero = __lsx_vldi(0);
+
+ /* Even stage 1 */
+ DUP4_ARG2(__lsx_vld, tmp_buf, 0, tmp_buf, 32 * 8,
+ tmp_buf, 32 * 16, tmp_buf, 32 * 24, reg0, reg1, reg2, reg3);
+ DUP4_ARG2(__lsx_vld, tmp_buf, 32 * 32, tmp_buf, 32 * 40,
+ tmp_buf, 32 * 48, tmp_buf, 32 * 56, reg4, reg5, reg6, reg7);
+
+ __lsx_vst(zero, tmp_buf, 0);
+ __lsx_vst(zero, tmp_buf, 32 * 8);
+ __lsx_vst(zero, tmp_buf, 32 * 16);
+ __lsx_vst(zero, tmp_buf, 32 * 24);
+ __lsx_vst(zero, tmp_buf, 32 * 32);
+ __lsx_vst(zero, tmp_buf, 32 * 40);
+ __lsx_vst(zero, tmp_buf, 32 * 48);
+ __lsx_vst(zero, tmp_buf, 32 * 56);
+
+ tmp_buf += (2 * 32);
+
+ VP9_DOTP_CONST_PAIR(reg1, reg7, cospi_28_64, cospi_4_64, reg1, reg7);
+ VP9_DOTP_CONST_PAIR(reg5, reg3, cospi_12_64, cospi_20_64, reg5, reg3);
+ LSX_BUTTERFLY_4_H(reg1, reg7, reg3, reg5, vec1, vec3, vec2, vec0);
+ VP9_DOTP_CONST_PAIR(vec2, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
+
+ loc1 = vec3;
+ loc0 = vec1;
+
+ VP9_DOTP_CONST_PAIR(reg0, reg4, cospi_16_64, cospi_16_64, reg0, reg4);
+ VP9_DOTP_CONST_PAIR(reg2, reg6, cospi_24_64, cospi_8_64, reg2, reg6);
+ LSX_BUTTERFLY_4_H(reg4, reg0, reg2, reg6, vec1, vec3, vec2, vec0);
+ LSX_BUTTERFLY_4_H(vec0, vec1, loc1, loc0, stp3, stp0, stp7, stp4);
+ LSX_BUTTERFLY_4_H(vec2, vec3, loc3, loc2, stp2, stp1, stp6, stp5);
+
+ /* Even stage 2 */
+ /* Load 8 */
+ DUP4_ARG2(__lsx_vld, tmp_buf, 0, tmp_buf, 32 * 8,
+ tmp_buf, 32 * 16, tmp_buf, 32 * 24, reg0, reg1, reg2, reg3);
+ DUP4_ARG2(__lsx_vld, tmp_buf, 32 * 32, tmp_buf, 32 * 40,
+ tmp_buf, 32 * 48, tmp_buf, 32 * 56, reg4, reg5, reg6, reg7);
+
+ __lsx_vst(zero, tmp_buf, 0);
+ __lsx_vst(zero, tmp_buf, 32 * 8);
+ __lsx_vst(zero, tmp_buf, 32 * 16);
+ __lsx_vst(zero, tmp_buf, 32 * 24);
+ __lsx_vst(zero, tmp_buf, 32 * 32);
+ __lsx_vst(zero, tmp_buf, 32 * 40);
+ __lsx_vst(zero, tmp_buf, 32 * 48);
+ __lsx_vst(zero, tmp_buf, 32 * 56);
+
+ VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_30_64, cospi_2_64, reg0, reg7);
+ VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_14_64, cospi_18_64, reg4, reg3);
+ VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_22_64, cospi_10_64, reg2, reg5);
+ VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_6_64, cospi_26_64, reg6, reg1);
+
+ vec0 = __lsx_vadd_h(reg0, reg4);
+ reg0 = __lsx_vsub_h(reg0, reg4);
+ reg4 = __lsx_vadd_h(reg6, reg2);
+ reg6 = __lsx_vsub_h(reg6, reg2);
+ reg2 = __lsx_vadd_h(reg1, reg5);
+ reg1 = __lsx_vsub_h(reg1, reg5);
+ reg5 = __lsx_vadd_h(reg7, reg3);
+ reg7 = __lsx_vsub_h(reg7, reg3);
+ reg3 = vec0;
+
+ vec1 = reg2;
+ reg2 = __lsx_vadd_h(reg3, reg4);
+ reg3 = __lsx_vsub_h(reg3, reg4);
+ reg4 = __lsx_vsub_h(reg5, vec1);
+ reg5 = __lsx_vadd_h(reg5, vec1);
+
+ VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_24_64, cospi_8_64, reg0, reg7);
+ VP9_DOTP_CONST_PAIR(__lsx_vneg_h(reg6), reg1, cospi_24_64, cospi_8_64,
+ reg6, reg1);
+
+ vec0 = __lsx_vsub_h(reg0, reg6);
+ reg0 = __lsx_vadd_h(reg0, reg6);
+ vec1 = __lsx_vsub_h(reg7, reg1);
+ reg7 = __lsx_vadd_h(reg7, reg1);
+
+ VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, reg6, reg1);
+ VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_16_64, cospi_16_64, reg3, reg4);
+
+ /* Even stage 3 : Dependency on Even stage 1 & Even stage 2 */
+ /* Store 8 */
+ LSX_BUTTERFLY_4_H(stp0, stp1, reg7, reg5, loc1, loc3, loc2, loc0);
+ __lsx_vst(loc1, tmp_eve_buf, 0);
+ __lsx_vst(loc3, tmp_eve_buf, 16);
+ __lsx_vst(loc2, tmp_eve_buf, 14 * 16);
+ __lsx_vst(loc0, tmp_eve_buf, 14 * 16 + 16);
+ LSX_BUTTERFLY_4_H(stp2, stp3, reg4, reg1, loc1, loc3, loc2, loc0);
+ __lsx_vst(loc1, tmp_eve_buf, 2 * 16);
+ __lsx_vst(loc3, tmp_eve_buf, 2 * 16 + 16);
+ __lsx_vst(loc2, tmp_eve_buf, 12 * 16);
+ __lsx_vst(loc0, tmp_eve_buf, 12 * 16 + 16);
+
+ /* Store 8 */
+ LSX_BUTTERFLY_4_H(stp4, stp5, reg6, reg3, loc1, loc3, loc2, loc0);
+ __lsx_vst(loc1, tmp_eve_buf, 4 * 16);
+ __lsx_vst(loc3, tmp_eve_buf, 4 * 16 + 16);
+ __lsx_vst(loc2, tmp_eve_buf, 10 * 16);
+ __lsx_vst(loc0, tmp_eve_buf, 10 * 16 + 16);
+
+ LSX_BUTTERFLY_4_H(stp6, stp7, reg2, reg0, loc1, loc3, loc2, loc0);
+ __lsx_vst(loc1, tmp_eve_buf, 6 * 16);
+ __lsx_vst(loc3, tmp_eve_buf, 6 * 16 + 16);
+ __lsx_vst(loc2, tmp_eve_buf, 8 * 16);
+ __lsx_vst(loc0, tmp_eve_buf, 8 * 16 + 16);
+}
+
+static void vp9_idct8x32_column_odd_process_store(int16_t *tmp_buf,
+ int16_t *tmp_odd_buf)
+{
+ __m128i vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
+ __m128i reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7;
+ __m128i zero = __lsx_vldi(0);
+
+ /* Odd stage 1 */
+ reg0 = __lsx_vld(tmp_buf, 64);
+ reg1 = __lsx_vld(tmp_buf, 7 * 64);
+ reg2 = __lsx_vld(tmp_buf, 9 * 64);
+ reg3 = __lsx_vld(tmp_buf, 15 * 64);
+ reg4 = __lsx_vld(tmp_buf, 17 * 64);
+ reg5 = __lsx_vld(tmp_buf, 23 * 64);
+ reg6 = __lsx_vld(tmp_buf, 25 * 64);
+ reg7 = __lsx_vld(tmp_buf, 31 * 64);
+
+ __lsx_vst(zero, tmp_buf, 64);
+ __lsx_vst(zero, tmp_buf, 7 * 64);
+ __lsx_vst(zero, tmp_buf, 9 * 64);
+ __lsx_vst(zero, tmp_buf, 15 * 64);
+ __lsx_vst(zero, tmp_buf, 17 * 64);
+ __lsx_vst(zero, tmp_buf, 23 * 64);
+ __lsx_vst(zero, tmp_buf, 25 * 64);
+ __lsx_vst(zero, tmp_buf, 31 * 64);
+
+ VP9_DOTP_CONST_PAIR(reg0, reg7, cospi_31_64, cospi_1_64, reg0, reg7);
+ VP9_DOTP_CONST_PAIR(reg4, reg3, cospi_15_64, cospi_17_64, reg3, reg4);
+ VP9_DOTP_CONST_PAIR(reg2, reg5, cospi_23_64, cospi_9_64, reg2, reg5);
+ VP9_DOTP_CONST_PAIR(reg6, reg1, cospi_7_64, cospi_25_64, reg1, reg6);
+
+ vec0 = __lsx_vadd_h(reg0, reg3);
+ reg0 = __lsx_vsub_h(reg0, reg3);
+ reg3 = __lsx_vadd_h(reg7, reg4);
+ reg7 = __lsx_vsub_h(reg7, reg4);
+ reg4 = __lsx_vadd_h(reg1, reg2);
+ reg1 = __lsx_vsub_h(reg1, reg2);
+ reg2 = __lsx_vadd_h(reg6, reg5);
+ reg6 = __lsx_vsub_h(reg6, reg5);
+ reg5 = vec0;
+
+ /* 4 Stores */
+ DUP2_ARG2(__lsx_vadd_h, reg5, reg4, reg3, reg2, vec0, vec1);
+ __lsx_vst(vec0, tmp_odd_buf, 4 * 16);
+ __lsx_vst(vec1, tmp_odd_buf, 4 * 16 + 16);
+ DUP2_ARG2(__lsx_vsub_h, reg5, reg4, reg3, reg2, vec0, vec1);
+ VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_24_64, cospi_8_64, vec0, vec1);
+ __lsx_vst(vec0, tmp_odd_buf, 0);
+ __lsx_vst(vec1, tmp_odd_buf, 16);
+
+ /* 4 Stores */
+ VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_28_64, cospi_4_64, reg0, reg7);
+ VP9_DOTP_CONST_PAIR(reg6, reg1, -cospi_4_64, cospi_28_64, reg1, reg6);
+ LSX_BUTTERFLY_4_H(reg0, reg7, reg6, reg1, vec0, vec1, vec2, vec3);
+ __lsx_vst(vec0, tmp_odd_buf, 6 * 16);
+ __lsx_vst(vec1, tmp_odd_buf, 6 * 16 + 16);
+ VP9_DOTP_CONST_PAIR(vec2, vec3, cospi_24_64, cospi_8_64, vec2, vec3);
+ __lsx_vst(vec2, tmp_odd_buf, 2 * 16);
+ __lsx_vst(vec3, tmp_odd_buf, 2 * 16 + 16);
+
+ /* Odd stage 2 */
+ /* 8 loads */
+ reg0 = __lsx_vld(tmp_buf, 3 * 64);
+ reg1 = __lsx_vld(tmp_buf, 5 * 64);
+ reg2 = __lsx_vld(tmp_buf, 11 * 64);
+ reg3 = __lsx_vld(tmp_buf, 13 * 64);
+ reg4 = __lsx_vld(tmp_buf, 19 * 64);
+ reg5 = __lsx_vld(tmp_buf, 21 * 64);
+ reg6 = __lsx_vld(tmp_buf, 27 * 64);
+ reg7 = __lsx_vld(tmp_buf, 29 * 64);
+
+ __lsx_vst(zero, tmp_buf, 3 * 64);
+ __lsx_vst(zero, tmp_buf, 5 * 64);
+ __lsx_vst(zero, tmp_buf, 11 * 64);
+ __lsx_vst(zero, tmp_buf, 13 * 64);
+ __lsx_vst(zero, tmp_buf, 19 * 64);
+ __lsx_vst(zero, tmp_buf, 21 * 64);
+ __lsx_vst(zero, tmp_buf, 27 * 64);
+ __lsx_vst(zero, tmp_buf, 29 * 64);
+
+ VP9_DOTP_CONST_PAIR(reg1, reg6, cospi_27_64, cospi_5_64, reg1, reg6);
+ VP9_DOTP_CONST_PAIR(reg5, reg2, cospi_11_64, cospi_21_64, reg2, reg5);
+ VP9_DOTP_CONST_PAIR(reg3, reg4, cospi_19_64, cospi_13_64, reg3, reg4);
+ VP9_DOTP_CONST_PAIR(reg7, reg0, cospi_3_64, cospi_29_64, reg0, reg7);
+
+ /* 4 Stores */
+ DUP4_ARG2(__lsx_vsub_h,reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4,
+ vec0, vec1, vec2, vec3);
+ VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_12_64, cospi_20_64, loc0, loc1);
+ VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_20_64, cospi_12_64, loc2, loc3);
+ LSX_BUTTERFLY_4_H(loc2, loc3, loc1, loc0, vec0, vec1, vec3, vec2);
+ __lsx_vst(vec0, tmp_odd_buf, 12 * 16);
+ __lsx_vst(vec1, tmp_odd_buf, 12 * 16 + 3 * 16);
+ VP9_DOTP_CONST_PAIR(vec3, vec2, -cospi_8_64, cospi_24_64, vec0, vec1);
+ __lsx_vst(vec0, tmp_odd_buf, 10 * 16);
+ __lsx_vst(vec1, tmp_odd_buf, 10 * 16 + 16);
+
+ /* 4 Stores */
+ DUP4_ARG2(__lsx_vadd_h, reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7,
+ vec0, vec1, vec2, vec3);
+ LSX_BUTTERFLY_4_H(vec0, vec3, vec2, vec1, reg0, reg1, reg3, reg2);
+ __lsx_vst(reg0, tmp_odd_buf, 13 * 16);
+ __lsx_vst(reg1, tmp_odd_buf, 13 * 16 + 16);
+ VP9_DOTP_CONST_PAIR(reg3, reg2, -cospi_8_64, cospi_24_64,
+ reg0, reg1);
+ __lsx_vst(reg0, tmp_odd_buf, 8 * 16);
+ __lsx_vst(reg1, tmp_odd_buf, 8 * 16 + 16);
+
+ /* Odd stage 3 : Dependency on Odd stage 1 & Odd stage 2 */
+ /* Load 8 & Store 8 */
+ DUP4_ARG2(__lsx_vld, tmp_odd_buf, 0, tmp_odd_buf, 16,
+ tmp_odd_buf, 32, tmp_odd_buf, 48, reg0, reg1, reg2, reg3);
+ DUP4_ARG2(__lsx_vld, tmp_odd_buf, 8 * 16, tmp_odd_buf, 8 * 16 + 16,
+ tmp_odd_buf, 8 * 16 + 32, tmp_odd_buf, 8 * 16 + 48,
+ reg4, reg5, reg6, reg7);
+
+ DUP4_ARG2(__lsx_vadd_h, reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
+ loc0, loc1, loc2, loc3);
+ __lsx_vst(loc0, tmp_odd_buf, 0);
+ __lsx_vst(loc1, tmp_odd_buf, 16);
+ __lsx_vst(loc2, tmp_odd_buf, 32);
+ __lsx_vst(loc3, tmp_odd_buf, 48);
+ DUP2_ARG2(__lsx_vsub_h, reg0, reg4, reg1, reg5, vec0, vec1);
+ VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
+
+ DUP2_ARG2(__lsx_vsub_h, reg2, reg6, reg3, reg7, vec0, vec1);
+ VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
+ __lsx_vst(loc0, tmp_odd_buf, 8 * 16);
+ __lsx_vst(loc1, tmp_odd_buf, 8 * 16 + 16);
+ __lsx_vst(loc2, tmp_odd_buf, 8 * 16 + 32);
+ __lsx_vst(loc3, tmp_odd_buf, 8 * 16 + 48);
+
+ /* Load 8 & Store 8 */
+ DUP4_ARG2(__lsx_vld, tmp_odd_buf, 4 * 16, tmp_odd_buf, 4 * 16 + 16,
+ tmp_odd_buf, 4 * 16 + 32, tmp_odd_buf, 4 * 16 + 48,
+ reg1, reg2, reg0, reg3);
+ DUP4_ARG2(__lsx_vld, tmp_odd_buf, 12 * 16, tmp_odd_buf, 12 * 16 + 16,
+ tmp_odd_buf, 12 * 16 + 32, tmp_odd_buf, 12 * 16 + 48,
+ reg4, reg5, reg6, reg7);
+
+ DUP4_ARG2(__lsx_vadd_h, reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7,
+ loc0, loc1, loc2, loc3);
+ __lsx_vst(loc0, tmp_odd_buf, 4 * 16);
+ __lsx_vst(loc1, tmp_odd_buf, 4 * 16 + 16);
+ __lsx_vst(loc2, tmp_odd_buf, 4 * 16 + 32);
+ __lsx_vst(loc3, tmp_odd_buf, 4 * 16 + 48);
+
+ DUP2_ARG2(__lsx_vsub_h, reg0, reg4, reg3, reg7, vec0, vec1);
+ VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc0, loc1);
+
+ DUP2_ARG2(__lsx_vsub_h, reg1, reg5, reg2, reg6, vec0, vec1);
+ VP9_DOTP_CONST_PAIR(vec1, vec0, cospi_16_64, cospi_16_64, loc2, loc3);
+ __lsx_vst(loc0, tmp_odd_buf, 12 * 16);
+ __lsx_vst(loc1, tmp_odd_buf, 12 * 16 + 16);
+ __lsx_vst(loc2, tmp_odd_buf, 12 * 16 + 32);
+ __lsx_vst(loc3, tmp_odd_buf, 12 * 16 + 48);
+}
+
+static void vp9_idct8x32_column_butterfly_addblk(int16_t *tmp_eve_buf,
+ int16_t *tmp_odd_buf,
+ uint8_t *dst,
+ int32_t dst_stride)
+{
+ __m128i vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3;
+ __m128i m0, m1, m2, m3, m4, m5, m6, m7, n0, n1, n2, n3, n4, n5, n6, n7;
+
+ /* FINAL BUTTERFLY : Dependency on Even & Odd */
+ vec0 = __lsx_vld(tmp_odd_buf, 0);
+ vec1 = __lsx_vld(tmp_odd_buf, 9 * 16);
+ vec2 = __lsx_vld(tmp_odd_buf, 14 * 16);
+ vec3 = __lsx_vld(tmp_odd_buf, 6 * 16);
+ loc0 = __lsx_vld(tmp_eve_buf, 0);
+ loc1 = __lsx_vld(tmp_eve_buf, 8 * 16);
+ loc2 = __lsx_vld(tmp_eve_buf, 4 * 16);
+ loc3 = __lsx_vld(tmp_eve_buf, 12 * 16);
+
+ DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
+ m0, m4, m2, m6);
+ DUP4_ARG2(__lsx_vsrari_h, m0, 6, m2, 6, m4, 6, m6, 6, m0, m2, m4, m6);
+ VP9_ADDBLK_ST8x4_UB(dst, (4 * dst_stride), m0, m2, m4, m6);
+
+ DUP4_ARG2(__lsx_vsub_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
+ m6, m2, m4, m0);
+ DUP4_ARG2(__lsx_vsrari_h, m0, 6, m2, 6, m4, 6, m6, 6, m0, m2, m4, m6);
+ VP9_ADDBLK_ST8x4_UB((dst + 19 * dst_stride), (4 * dst_stride),
+ m0, m2, m4, m6);
+
+ /* Load 8 & Store 8 */
+ vec0 = __lsx_vld(tmp_odd_buf, 4 * 16);
+ vec1 = __lsx_vld(tmp_odd_buf, 13 * 16);
+ vec2 = __lsx_vld(tmp_odd_buf, 10 * 16);
+ vec3 = __lsx_vld(tmp_odd_buf, 3 * 16);
+ loc0 = __lsx_vld(tmp_eve_buf, 2 * 16);
+ loc1 = __lsx_vld(tmp_eve_buf, 10 * 16);
+ loc2 = __lsx_vld(tmp_eve_buf, 6 * 16);
+ loc3 = __lsx_vld(tmp_eve_buf, 14 * 16);
+
+ DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
+ m1, m5, m3, m7);
+ DUP4_ARG2(__lsx_vsrari_h, m1, 6, m3, 6, m5, 6, m7, 6, m1, m3, m5, m7);
+ VP9_ADDBLK_ST8x4_UB((dst + 2 * dst_stride), (4 * dst_stride),
+ m1, m3, m5, m7);
+
+ DUP4_ARG2(__lsx_vsub_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
+ m7, m3, m5, m1);
+ DUP4_ARG2(__lsx_vsrari_h, m1, 6, m3, 6, m5, 6, m7, 6, m1, m3, m5, m7);
+ VP9_ADDBLK_ST8x4_UB((dst + 17 * dst_stride), (4 * dst_stride),
+ m1, m3, m5, m7);
+
+ /* Load 8 & Store 8 */
+ vec0 = __lsx_vld(tmp_odd_buf, 2 * 16);
+ vec1 = __lsx_vld(tmp_odd_buf, 11 * 16);
+ vec2 = __lsx_vld(tmp_odd_buf, 12 * 16);
+ vec3 = __lsx_vld(tmp_odd_buf, 7 * 16);
+ loc0 = __lsx_vld(tmp_eve_buf, 1 * 16);
+ loc1 = __lsx_vld(tmp_eve_buf, 9 * 16);
+ loc2 = __lsx_vld(tmp_eve_buf, 5 * 16);
+ loc3 = __lsx_vld(tmp_eve_buf, 13 * 16);
+
+ DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
+ n0, n4, n2, n6);
+ DUP4_ARG2(__lsx_vsrari_h, n0, 6, n2, 6, n4, 6, n6, 6, n0, n2, n4, n6);
+ VP9_ADDBLK_ST8x4_UB((dst + 1 * dst_stride), (4 * dst_stride),
+ n0, n2, n4, n6);
+ DUP4_ARG2(__lsx_vsub_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
+ n6, n2, n4, n0);
+ DUP4_ARG2(__lsx_vsrari_h, n0, 6, n2, 6, n4, 6, n6, 6, n0, n2, n4, n6);
+ VP9_ADDBLK_ST8x4_UB((dst + 18 * dst_stride), (4 * dst_stride),
+ n0, n2, n4, n6);
+
+ /* Load 8 & Store 8 */
+ vec0 = __lsx_vld(tmp_odd_buf, 5 * 16);
+ vec1 = __lsx_vld(tmp_odd_buf, 15 * 16);
+ vec2 = __lsx_vld(tmp_odd_buf, 8 * 16);
+ vec3 = __lsx_vld(tmp_odd_buf, 1 * 16);
+ loc0 = __lsx_vld(tmp_eve_buf, 3 * 16);
+ loc1 = __lsx_vld(tmp_eve_buf, 11 * 16);
+ loc2 = __lsx_vld(tmp_eve_buf, 7 * 16);
+ loc3 = __lsx_vld(tmp_eve_buf, 15 * 16);
+
+ DUP4_ARG2(__lsx_vadd_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
+ n1, n5, n3, n7);
+ DUP4_ARG2(__lsx_vsrari_h, n1, 6, n3, 6, n5, 6, n7, 6, n1, n3, n5, n7);
+ VP9_ADDBLK_ST8x4_UB((dst + 3 * dst_stride), (4 * dst_stride),
+ n1, n3, n5, n7);
+ DUP4_ARG2(__lsx_vsub_h, loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0,
+ n7, n3, n5, n1);
+ DUP4_ARG2(__lsx_vsrari_h, n1, 6, n3, 6, n5, 6, n7, 6, n1, n3, n5, n7);
+ VP9_ADDBLK_ST8x4_UB((dst + 16 * dst_stride), (4 * dst_stride),
+ n1, n3, n5, n7);
+}
+
+static void vp9_idct8x32_1d_columns_addblk_lsx(int16_t *input, uint8_t *dst,
+ int32_t dst_stride)
+{
+ int16_t tmp_odd_buf[16 * 8] ALLOC_ALIGNED(16);
+ int16_t tmp_eve_buf[16 * 8] ALLOC_ALIGNED(16);
+
+ vp9_idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
+ vp9_idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
+ vp9_idct8x32_column_butterfly_addblk(&tmp_eve_buf[0], &tmp_odd_buf[0],
+ dst, dst_stride);
+}
+
+static void vp9_idct8x32_1d_columns_lsx(int16_t *input, int16_t *output,
+ int16_t *tmp_buf)
+{
+ int16_t tmp_odd_buf[16 * 8] ALLOC_ALIGNED(16);
+ int16_t tmp_eve_buf[16 * 8] ALLOC_ALIGNED(16);
+
+ vp9_idct8x32_column_even_process_store(input, &tmp_eve_buf[0]);
+ vp9_idct8x32_column_odd_process_store(input, &tmp_odd_buf[0]);
+ vp9_idct_butterfly_transpose_store(tmp_buf, &tmp_eve_buf[0],
+ &tmp_odd_buf[0], output);
+}
+
+static void vp9_idct32x32_1_add_lsx(int16_t *input, uint8_t *dst,
+ int32_t dst_stride)
+{
+ int32_t i;
+ int16_t out;
+ uint8_t *dst_tmp = dst + dst_stride;
+ __m128i zero = __lsx_vldi(0);
+ __m128i dst0, dst1, dst2, dst3, tmp0, tmp1, tmp2, tmp3;
+ __m128i res0, res1, res2, res3, res4, res5, res6, res7, vec;
+
+ out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), VP9_DCT_CONST_BITS);
+ out = ROUND_POWER_OF_TWO((out * cospi_16_64), VP9_DCT_CONST_BITS);
+ out = ROUND_POWER_OF_TWO(out, 6);
+ input[0] = 0;
+
+ vec = __lsx_vreplgr2vr_h(out);
+
+ for (i = 16; i--;) {
+ DUP2_ARG2(__lsx_vld, dst, 0, dst, 16, dst0, dst1);
+ DUP2_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst2, dst3);
+
+ DUP4_ARG2(__lsx_vilvl_b, zero, dst0, zero, dst1, zero, dst2, zero, dst3,
+ res0, res1, res2, res3);
+ DUP4_ARG2(__lsx_vilvh_b, zero, dst0, zero, dst1, zero, dst2, zero, dst3,
+ res4, res5, res6, res7);
+ DUP4_ARG2(__lsx_vadd_h, res0, vec, res1, vec, res2, vec, res3, vec,
+ res0, res1, res2, res3);
+ DUP4_ARG2(__lsx_vadd_h, res4, vec, res5, vec, res6, vec, res7, vec,
+ res4, res5, res6, res7);
+ DUP4_ARG1(__lsx_vclip255_h, res0, res1, res2, res3, res0, res1, res2, res3);
+ DUP4_ARG1(__lsx_vclip255_h, res4, res5, res6, res7, res4, res5, res6, res7);
+ DUP4_ARG2(__lsx_vpickev_b, res4, res0, res5, res1, res6, res2, res7, res3,
+ tmp0, tmp1, tmp2, tmp3);
+
+ __lsx_vst(tmp0, dst, 0);
+ __lsx_vst(tmp1, dst, 16);
+ __lsx_vst(tmp2, dst_tmp, 0);
+ __lsx_vst(tmp3, dst_tmp, 16);
+ dst = dst_tmp + dst_stride;
+ dst_tmp = dst + dst_stride;
+ }
+}
+
+static void vp9_idct32x32_34_colcol_addblk_lsx(int16_t *input, uint8_t *dst,
+ int32_t dst_stride)
+{
+ int32_t i;
+ int16_t out_arr[32 * 32] ALLOC_ALIGNED(16);
+ int16_t *out_ptr = out_arr;
+ int16_t tmp_buf[8 * 32] ALLOC_ALIGNED(16);
+ __m128i zero = __lsx_vldi(0);
+
+ for (i = 16; i--;) {
+ __lsx_vst(zero, out_ptr, 0);
+ __lsx_vst(zero, out_ptr, 16);
+ __lsx_vst(zero, out_ptr, 32);
+ __lsx_vst(zero, out_ptr, 48);
+ __lsx_vst(zero, out_ptr, 64);
+ __lsx_vst(zero, out_ptr, 80);
+ __lsx_vst(zero, out_ptr, 96);
+ __lsx_vst(zero, out_ptr, 112);
+ out_ptr += 64;
+ }
+
+ out_ptr = out_arr;
+
+ /* process 8*32 block */
+ vp9_idct8x32_1d_columns_lsx(input, out_ptr, &tmp_buf[0]);
+
+ /* transform columns */
+ for (i = 0; i < 4; i++) {
+ /* process 8*32 block */
+ vp9_idct8x32_1d_columns_addblk_lsx((out_ptr + (i << 3)),
+ (dst + (i << 3)), dst_stride);
+ }
+}
+
+static void vp9_idct32x32_colcol_addblk_lsx(int16_t *input, uint8_t *dst,
+ int32_t dst_stride)
+{
+ int32_t i;
+ int16_t out_arr[32 * 32] ALLOC_ALIGNED(16);
+ int16_t *out_ptr = out_arr;
+ int16_t tmp_buf[8 * 32] ALLOC_ALIGNED(16);
+
+ /* transform rows */
+ for (i = 0; i < 4; i++) {
+ /* process 8*32 block */
+ vp9_idct8x32_1d_columns_lsx((input + (i << 3)), (out_ptr + (i << 8)),
+ &tmp_buf[0]);
+ }
+
+ /* transform columns */
+ for (i = 0; i < 4; i++) {
+ /* process 8*32 block */
+ vp9_idct8x32_1d_columns_addblk_lsx((out_ptr + (i << 3)),
+ (dst + (i << 3)), dst_stride);
+ }
+}
+
+void ff_idct_idct_8x8_add_lsx(uint8_t *dst, ptrdiff_t stride,
+ int16_t *block, int eob)
+{
+ if (eob == 1) {
+ vp9_idct8x8_1_add_lsx(block, dst, stride);
+ }
+ else if (eob <= 12) {
+ vp9_idct8x8_12_colcol_addblk_lsx(block, dst, stride);
+ }
+ else {
+ vp9_idct8x8_colcol_addblk_lsx(block, dst, stride);
+ }
+}
+
+void ff_idct_idct_16x16_add_lsx(uint8_t *dst, ptrdiff_t stride,
+ int16_t *block, int eob)
+{
+ if (eob == 1) {
+ /* DC only DCT coefficient. */
+ vp9_idct16x16_1_add_lsx(block, dst, stride);
+ }
+ else if (eob <= 10) {
+ vp9_idct16x16_10_colcol_addblk_lsx(block, dst, stride);
+ }
+ else {
+ vp9_idct16x16_colcol_addblk_lsx(block, dst, stride);
+ }
+}
+
+void ff_idct_idct_32x32_add_lsx(uint8_t *dst, ptrdiff_t stride,
+ int16_t *block, int eob)
+{
+ if (eob == 1) {
+ vp9_idct32x32_1_add_lsx(block, dst, stride);
+ }
+ else if (eob <= 34) {
+ vp9_idct32x32_34_colcol_addblk_lsx(block, dst, stride);
+ }
+ else {
+ vp9_idct32x32_colcol_addblk_lsx(block, dst, stride);
+ }
+}
diff --git a/libavcodec/loongarch/vp9_lpf_lsx.c b/libavcodec/loongarch/vp9_lpf_lsx.c
new file mode 100644
index 0000000000..8e1915b888
--- /dev/null
+++ b/libavcodec/loongarch/vp9_lpf_lsx.c
@@ -0,0 +1,3141 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Jin Bo <jinbo@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavcodec/vp9dsp.h"
+#include "libavutil/loongarch/loongson_intrinsics.h"
+#include "libavutil/common.h"
+#include "vp9dsp_loongarch.h"
+
+#define LSX_LD_8(_src, _stride, _stride2, _stride3, _stride4, _in0, _in1, _in2, \
+ _in3, _in4, _in5, _in6, _in7) \
+{ \
+ _in0 = __lsx_vld(_src, 0); \
+ _in1 = __lsx_vldx(_src, _stride); \
+ _in2 = __lsx_vldx(_src, _stride2); \
+ _in3 = __lsx_vldx(_src, _stride3); \
+ _src += _stride4; \
+ _in4 = __lsx_vld(_src, 0); \
+ _in5 = __lsx_vldx(_src, _stride); \
+ _in6 = __lsx_vldx(_src, _stride2); \
+ _in7 = __lsx_vldx(_src, _stride3); \
+}
+
+#define LSX_ST_8(_dst0, _dst1, _dst2, _dst3, _dst4, _dst5, _dst6, _dst7, \
+ _dst, _stride, _stride2, _stride3, _stride4) \
+{ \
+ __lsx_vst(_dst0, _dst, 0); \
+ __lsx_vstx(_dst1, _dst, _stride); \
+ __lsx_vstx(_dst2, _dst, _stride2); \
+ __lsx_vstx(_dst3, _dst, _stride3); \
+ _dst += _stride4; \
+ __lsx_vst(_dst4, _dst, 0); \
+ __lsx_vstx(_dst5, _dst, _stride); \
+ __lsx_vstx(_dst6, _dst, _stride2); \
+ __lsx_vstx(_dst7, _dst, _stride3); \
+}
+
+#define VP9_LPF_FILTER4_4W(p1_src, p0_src, q0_src, q1_src, mask_src, hev_src, \
+ p1_dst, p0_dst, q0_dst, q1_dst) \
+{ \
+ __m128i p1_tmp, p0_tmp, q0_tmp, q1_tmp, q0_sub_p0, filt, filt1, filt2; \
+ const __m128i cnst3b = __lsx_vldi(3); \
+ const __m128i cnst4b = __lsx_vldi(4); \
+ \
+ p1_tmp = __lsx_vxori_b(p1_src, 0x80); \
+ p0_tmp = __lsx_vxori_b(p0_src, 0x80); \
+ q0_tmp = __lsx_vxori_b(q0_src, 0x80); \
+ q1_tmp = __lsx_vxori_b(q1_src, 0x80); \
+ \
+ filt = __lsx_vssub_b(p1_tmp, q1_tmp); \
+ \
+ filt = filt & hev_src; \
+ \
+ q0_sub_p0 = __lsx_vssub_b(q0_tmp, p0_tmp); \
+ filt = __lsx_vsadd_b(filt, q0_sub_p0); \
+ filt = __lsx_vsadd_b(filt, q0_sub_p0); \
+ filt = __lsx_vsadd_b(filt, q0_sub_p0); \
+ filt = filt & mask_src; \
+ \
+ filt1 = __lsx_vsadd_b(filt, cnst4b); \
+ filt1 = __lsx_vsrai_b(filt1, 3); \
+ \
+ filt2 = __lsx_vsadd_b(filt, cnst3b); \
+ filt2 = __lsx_vsrai_b(filt2, 3); \
+ \
+ q0_tmp = __lsx_vssub_b(q0_tmp, filt1); \
+ q0_dst = __lsx_vxori_b(q0_tmp, 0x80); \
+ p0_tmp = __lsx_vsadd_b(p0_tmp, filt2); \
+ p0_dst = __lsx_vxori_b(p0_tmp, 0x80); \
+ \
+ filt = __lsx_vsrari_b(filt1, 1); \
+ hev_src = __lsx_vxori_b(hev_src, 0xff); \
+ filt = filt & hev_src; \
+ \
+ q1_tmp = __lsx_vssub_b(q1_tmp, filt); \
+ q1_dst = __lsx_vxori_b(q1_tmp, 0x80); \
+ p1_tmp = __lsx_vsadd_b(p1_tmp, filt); \
+ p1_dst = __lsx_vxori_b(p1_tmp, 0x80); \
+}
+
+#define VP9_FLAT4(p3_src, p2_src, p0_src, q0_src, q2_src, q3_src, flat_dst) \
+{ \
+ __m128i f_tmp = __lsx_vldi(1); \
+ __m128i p2_a_sub_p0, q2_a_sub_q0, p3_a_sub_p0, q3_a_sub_q0; \
+ \
+ p2_a_sub_p0 = __lsx_vabsd_bu(p2_src, p0_src); \
+ q2_a_sub_q0 = __lsx_vabsd_bu(q2_src, q0_src); \
+ p3_a_sub_p0 = __lsx_vabsd_bu(p3_src, p0_src); \
+ q3_a_sub_q0 = __lsx_vabsd_bu(q3_src, q0_src); \
+ \
+ p2_a_sub_p0 = __lsx_vmax_bu(p2_a_sub_p0, q2_a_sub_q0); \
+ flat_dst = __lsx_vmax_bu(p2_a_sub_p0, flat_dst); \
+ p3_a_sub_p0 = __lsx_vmax_bu(p3_a_sub_p0, q3_a_sub_q0); \
+ flat_dst = __lsx_vmax_bu(p3_a_sub_p0, flat_dst); \
+ \
+ flat_dst = __lsx_vslt_bu(f_tmp, flat_dst); \
+ flat_dst = __lsx_vxori_b(flat_dst, 0xff); \
+ flat_dst = flat_dst & mask; \
+}
+
+#define VP9_FLAT5(p7_src, p6_src, p5_src, p4_src, p0_src, q0_src, q4_src, \
+ q5_src, q6_src, q7_src, flat_src, flat2_dst) \
+{ \
+ __m128i f_tmp = __lsx_vldi(1); \
+ __m128i p4_a_sub_p0, q4_a_sub_q0, p5_a_sub_p0, q5_a_sub_q0; \
+ __m128i p6_a_sub_p0, q6_a_sub_q0, p7_a_sub_p0, q7_a_sub_q0; \
+ \
+ p4_a_sub_p0 = __lsx_vabsd_bu(p4_src, p0_src); \
+ q4_a_sub_q0 = __lsx_vabsd_bu(q4_src, q0_src); \
+ p5_a_sub_p0 = __lsx_vabsd_bu(p5_src, p0_src); \
+ q5_a_sub_q0 = __lsx_vabsd_bu(q5_src, q0_src); \
+ p6_a_sub_p0 = __lsx_vabsd_bu(p6_src, p0_src); \
+ q6_a_sub_q0 = __lsx_vabsd_bu(q6_src, q0_src); \
+ p7_a_sub_p0 = __lsx_vabsd_bu(p7_src, p0_src); \
+ q7_a_sub_q0 = __lsx_vabsd_bu(q7_src, q0_src); \
+ \
+ p4_a_sub_p0 = __lsx_vmax_bu(p4_a_sub_p0, q4_a_sub_q0); \
+ flat2_dst = __lsx_vmax_bu(p5_a_sub_p0, q5_a_sub_q0); \
+ flat2_dst = __lsx_vmax_bu(p4_a_sub_p0, flat2_dst); \
+ p6_a_sub_p0 = __lsx_vmax_bu(p6_a_sub_p0, q6_a_sub_q0); \
+ flat2_dst = __lsx_vmax_bu(p6_a_sub_p0, flat2_dst); \
+ p7_a_sub_p0 = __lsx_vmax_bu(p7_a_sub_p0, q7_a_sub_q0); \
+ flat2_dst = __lsx_vmax_bu(p7_a_sub_p0, flat2_dst); \
+ \
+ flat2_dst = __lsx_vslt_bu(f_tmp, flat2_dst); \
+ flat2_dst = __lsx_vxori_b(flat2_dst, 0xff); \
+ flat2_dst = flat2_dst & flat_src; \
+}
+
+#define VP9_FILTER8(p3_src, p2_src, p1_src, p0_src, \
+ q0_src, q1_src, q2_src, q3_src, \
+ p2_filt8_dst, p1_filt8_dst, p0_filt8_dst, \
+ q0_filt8_dst, q1_filt8_dst, q2_filt8_dst) \
+{ \
+ __m128i tmp0, tmp1, tmp2; \
+ \
+ tmp2 = __lsx_vadd_h(p2_src, p1_src); \
+ tmp2 = __lsx_vadd_h(tmp2, p0_src); \
+ tmp0 = __lsx_vslli_h(p3_src, 1); \
+ \
+ tmp0 = __lsx_vadd_h(tmp0, tmp2); \
+ tmp0 = __lsx_vadd_h(tmp0, q0_src); \
+ tmp1 = __lsx_vadd_h(tmp0, p3_src); \
+ tmp1 = __lsx_vadd_h(tmp1, p2_src); \
+ p2_filt8_dst = __lsx_vsrari_h(tmp1, 3); \
+ \
+ tmp1 = __lsx_vadd_h(tmp0, p1_src); \
+ tmp1 = __lsx_vadd_h(tmp1, q1_src); \
+ p1_filt8_dst = __lsx_vsrari_h(tmp1, 3); \
+ \
+ tmp1 = __lsx_vadd_h(q2_src, q1_src); \
+ tmp1 = __lsx_vadd_h(tmp1, q0_src); \
+ tmp2 = __lsx_vadd_h(tmp2, tmp1); \
+ tmp0 = __lsx_vadd_h(tmp2, p0_src); \
+ tmp0 = __lsx_vadd_h(tmp0, p3_src); \
+ p0_filt8_dst = __lsx_vsrari_h(tmp0, 3); \
+ \
+ tmp0 = __lsx_vadd_h(q2_src, q3_src); \
+ tmp0 = __lsx_vadd_h(tmp0, p0_src); \
+ tmp0 = __lsx_vadd_h(tmp0, tmp1); \
+ tmp1 = __lsx_vadd_h(q3_src, q3_src); \
+ tmp1 = __lsx_vadd_h(tmp1, tmp0); \
+ q2_filt8_dst = __lsx_vsrari_h(tmp1, 3); \
+ \
+ tmp0 = __lsx_vadd_h(tmp2, q3_src); \
+ tmp1 = __lsx_vadd_h(tmp0, q0_src); \
+ q0_filt8_dst = __lsx_vsrari_h(tmp1, 3); \
+ \
+ tmp1 = __lsx_vsub_h(tmp0, p2_src); \
+ tmp0 = __lsx_vadd_h(q1_src, q3_src); \
+ tmp1 = __lsx_vadd_h(tmp0, tmp1); \
+ q1_filt8_dst = __lsx_vsrari_h(tmp1, 3); \
+}
+
+#define LPF_MASK_HEV(p3_src, p2_src, p1_src, p0_src, q0_src, q1_src, \
+ q2_src, q3_src, limit_src, b_limit_src, thresh_src, \
+ hev_dst, mask_dst, flat_dst) \
+{ \
+ __m128i p3_asub_p2_tmp, p2_asub_p1_tmp, p1_asub_p0_tmp, q1_asub_q0_tmp; \
+ __m128i p1_asub_q1_tmp, p0_asub_q0_tmp, q3_asub_q2_tmp, q2_asub_q1_tmp; \
+ \
+ /* absolute subtraction of pixel values */ \
+ p3_asub_p2_tmp = __lsx_vabsd_bu(p3_src, p2_src); \
+ p2_asub_p1_tmp = __lsx_vabsd_bu(p2_src, p1_src); \
+ p1_asub_p0_tmp = __lsx_vabsd_bu(p1_src, p0_src); \
+ q1_asub_q0_tmp = __lsx_vabsd_bu(q1_src, q0_src); \
+ q2_asub_q1_tmp = __lsx_vabsd_bu(q2_src, q1_src); \
+ q3_asub_q2_tmp = __lsx_vabsd_bu(q3_src, q2_src); \
+ p0_asub_q0_tmp = __lsx_vabsd_bu(p0_src, q0_src); \
+ p1_asub_q1_tmp = __lsx_vabsd_bu(p1_src, q1_src); \
+ \
+ /* calculation of hev */ \
+ flat_dst = __lsx_vmax_bu(p1_asub_p0_tmp, q1_asub_q0_tmp); \
+ hev_dst = __lsx_vslt_bu(thresh_src, flat_dst); \
+ \
+ /* calculation of mask */ \
+ p0_asub_q0_tmp = __lsx_vsadd_bu(p0_asub_q0_tmp, p0_asub_q0_tmp); \
+ p1_asub_q1_tmp = __lsx_vsrli_b(p1_asub_q1_tmp, 1); \
+ p0_asub_q0_tmp = __lsx_vsadd_bu(p0_asub_q0_tmp, p1_asub_q1_tmp); \
+ \
+ mask_dst = __lsx_vslt_bu(b_limit_src, p0_asub_q0_tmp); \
+ mask_dst = __lsx_vmax_bu(flat_dst, mask_dst); \
+ p3_asub_p2_tmp = __lsx_vmax_bu(p3_asub_p2_tmp, p2_asub_p1_tmp); \
+ mask_dst = __lsx_vmax_bu(p3_asub_p2_tmp, mask_dst); \
+ q2_asub_q1_tmp = __lsx_vmax_bu(q2_asub_q1_tmp, q3_asub_q2_tmp); \
+ mask_dst = __lsx_vmax_bu(q2_asub_q1_tmp, mask_dst); \
+ \
+ mask_dst = __lsx_vslt_bu(limit_src, mask_dst); \
+ mask_dst = __lsx_vxori_b(mask_dst, 0xff); \
+}
+
+void ff_loop_filter_v_4_8_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ __m128i mask, hev, flat, thresh, b_limit, limit;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0, p1_out, p0_out, q0_out, q1_out;
+
+ DUP4_ARG2(__lsx_vldx, dst, -stride4, dst, -stride3, dst, -stride2,
+ dst, -stride, p3, p2, p1, p0);
+ q0 = __lsx_vld(dst, 0);
+ DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
+ q3 = __lsx_vldx(dst, stride3);
+
+ thresh = __lsx_vreplgr2vr_b(thresh_ptr);
+ b_limit = __lsx_vreplgr2vr_b(b_limit_ptr);
+ limit = __lsx_vreplgr2vr_b(limit_ptr);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+ q1_out);
+
+ __lsx_vstelm_d(p1_out, dst - stride2, 0, 0);
+ __lsx_vstelm_d(p0_out, dst - stride, 0, 0);
+ __lsx_vstelm_d(q0_out, dst , 0, 0);
+ __lsx_vstelm_d(q1_out, dst + stride, 0, 0);
+}
+
+void ff_loop_filter_v_44_16_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ __m128i mask, hev, flat, thresh0, b_limit0;
+ __m128i limit0, thresh1, b_limit1, limit1;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+
+ DUP4_ARG2(__lsx_vldx, dst, -stride4, dst, -stride3, dst, -stride2,
+ dst, -stride, p3, p2, p1, p0);
+ q0 = __lsx_vld(dst, 0);
+ DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
+ q3 = __lsx_vldx(dst, stride3);
+
+ thresh0 = __lsx_vreplgr2vr_b(thresh_ptr);
+ thresh1 = __lsx_vreplgr2vr_b(thresh_ptr >> 8);
+ thresh0 = __lsx_vilvl_d(thresh1, thresh0);
+
+ b_limit0 = __lsx_vreplgr2vr_b(b_limit_ptr);
+ b_limit1 = __lsx_vreplgr2vr_b(b_limit_ptr >> 8);
+ b_limit0 = __lsx_vilvl_d(b_limit1, b_limit0);
+
+ limit0 = __lsx_vreplgr2vr_b(limit_ptr);
+ limit1 = __lsx_vreplgr2vr_b(limit_ptr >> 8);
+ limit0 = __lsx_vilvl_d(limit1, limit0);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0,
+ hev, mask, flat);
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
+
+ __lsx_vst(p1, dst - stride2, 0);
+ __lsx_vst(p0, dst - stride, 0);
+ __lsx_vst(q0, dst , 0);
+ __lsx_vst(q1, dst + stride, 0);
+}
+
+void ff_loop_filter_v_8_8_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ __m128i mask, hev, flat, thresh, b_limit, limit;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
+ __m128i p2_filter8, p1_filter8, p0_filter8;
+ __m128i q0_filter8, q1_filter8, q2_filter8;
+ __m128i p3_l, p2_l, p1_l, p0_l, q3_l, q2_l, q1_l, q0_l;
+ __m128i zero = __lsx_vldi(0);
+
+ DUP4_ARG2(__lsx_vldx, dst, -stride4, dst, -stride3, dst, -stride2,
+ dst, -stride, p3, p2, p1, p0);
+ q0 = __lsx_vld(dst, 0);
+ DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
+ q3 = __lsx_vldx(dst, stride3);
+
+ thresh = __lsx_vreplgr2vr_b(thresh_ptr);
+ b_limit = __lsx_vreplgr2vr_b(b_limit_ptr);
+ limit = __lsx_vreplgr2vr_b(limit_ptr);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+ q1_out);
+
+ flat = __lsx_vilvl_d(zero, flat);
+
+ /* if flat is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat)) {
+ __lsx_vstelm_d(p1_out, dst - stride2, 0, 0);
+ __lsx_vstelm_d(p0_out, dst - stride, 0, 0);
+ __lsx_vstelm_d(q0_out, dst , 0, 0);
+ __lsx_vstelm_d(q1_out, dst + stride, 0, 0);
+ } else {
+ DUP4_ARG2(__lsx_vilvl_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_l, p2_l, p1_l, p0_l);
+ DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_l, q1_l, q2_l, q3_l);
+ VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filter8,
+ p1_filter8, p0_filter8, q0_filter8, q1_filter8, q2_filter8);
+
+ /* convert 16 bit output data into 8 bit */
+ DUP4_ARG2(__lsx_vpickev_b, zero, p2_filter8, zero, p1_filter8,
+ zero, p0_filter8, zero, q0_filter8, p2_filter8,
+ p1_filter8, p0_filter8, q0_filter8);
+ DUP2_ARG2(__lsx_vpickev_b, zero, q1_filter8, zero, q2_filter8,
+ q1_filter8, q2_filter8);
+
+ /* store pixel values */
+ p2_out = __lsx_vbitsel_v(p2, p2_filter8, flat);
+ p1_out = __lsx_vbitsel_v(p1_out, p1_filter8, flat);
+ p0_out = __lsx_vbitsel_v(p0_out, p0_filter8, flat);
+ q0_out = __lsx_vbitsel_v(q0_out, q0_filter8, flat);
+ q1_out = __lsx_vbitsel_v(q1_out, q1_filter8, flat);
+ q2_out = __lsx_vbitsel_v(q2, q2_filter8, flat);
+
+ __lsx_vstelm_d(p2_out, dst - stride3, 0, 0);
+ __lsx_vstelm_d(p1_out, dst - stride2, 0, 0);
+ __lsx_vstelm_d(p0_out, dst - stride, 0, 0);
+ __lsx_vstelm_d(q0_out, dst, 0, 0);
+ __lsx_vstelm_d(q1_out, dst + stride, 0, 0);
+ __lsx_vstelm_d(q2_out, dst + stride2, 0, 0);
+ }
+}
+
+void ff_loop_filter_v_88_16_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
+ __m128i flat, mask, hev, tmp, thresh, b_limit, limit;
+ __m128i p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
+ __m128i p3_h, p2_h, p1_h, p0_h, q0_h, q1_h, q2_h, q3_h;
+ __m128i p2_filt8_l, p1_filt8_l, p0_filt8_l;
+ __m128i q0_filt8_l, q1_filt8_l, q2_filt8_l;
+ __m128i p2_filt8_h, p1_filt8_h, p0_filt8_h;
+ __m128i q0_filt8_h, q1_filt8_h, q2_filt8_h;
+ __m128i zero = __lsx_vldi(0);
+
+ /* load vector elements */
+ DUP4_ARG2(__lsx_vldx, dst, -stride4, dst, -stride3, dst, -stride2,
+ dst, -stride, p3, p2, p1, p0);
+ q0 = __lsx_vld(dst, 0);
+ DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
+ q3 = __lsx_vldx(dst, stride3);
+
+ thresh = __lsx_vreplgr2vr_b(thresh_ptr);
+ tmp = __lsx_vreplgr2vr_b(thresh_ptr >> 8);
+ thresh = __lsx_vilvl_d(tmp, thresh);
+
+ b_limit = __lsx_vreplgr2vr_b(b_limit_ptr);
+ tmp = __lsx_vreplgr2vr_b(b_limit_ptr >> 8);
+ b_limit = __lsx_vilvl_d(tmp, b_limit);
+
+ limit = __lsx_vreplgr2vr_b(limit_ptr);
+ tmp = __lsx_vreplgr2vr_b(limit_ptr >> 8);
+ limit = __lsx_vilvl_d(tmp, limit);
+
+ /* mask and hev */
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+ q1_out);
+
+ /* if flat is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat)) {
+ __lsx_vst(p1_out, dst - stride2, 0);
+ __lsx_vst(p0_out, dst - stride, 0);
+ __lsx_vst(q0_out, dst, 0);
+ __lsx_vst(q1_out, dst + stride, 0);
+ } else {
+ DUP4_ARG2(__lsx_vilvl_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_l, p2_l, p1_l, p0_l);
+ DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_l, q1_l, q2_l, q3_l);
+ VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+ p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
+
+ DUP4_ARG2(__lsx_vilvh_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_h, p2_h, p1_h, p0_h);
+ DUP4_ARG2(__lsx_vilvh_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_h, q1_h, q2_h, q3_h);
+ VP9_FILTER8(p3_h, p2_h, p1_h, p0_h, q0_h, q1_h, q2_h, q3_h, p2_filt8_h,
+ p1_filt8_h, p0_filt8_h, q0_filt8_h, q1_filt8_h, q2_filt8_h);
+
+ /* convert 16 bit output data into 8 bit */
+ DUP4_ARG2(__lsx_vpickev_b, p2_filt8_h, p2_filt8_l, p1_filt8_h,
+ p1_filt8_l, p0_filt8_h, p0_filt8_l, q0_filt8_h, q0_filt8_l,
+ p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l);
+ DUP2_ARG2(__lsx_vpickev_b, q1_filt8_h, q1_filt8_l, q2_filt8_h,
+ q2_filt8_l, q1_filt8_l, q2_filt8_l);
+
+ /* store pixel values */
+ p2_out = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
+ p1_out = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
+ p0_out = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
+ q0_out = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
+ q1_out = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
+ q2_out = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
+
+
+ __lsx_vstx(p2_out, dst, -stride3);
+ __lsx_vstx(p1_out, dst, -stride2);
+ __lsx_vstx(p0_out, dst, -stride);
+ __lsx_vst(q0_out, dst, 0);
+ __lsx_vstx(q1_out, dst, stride);
+ __lsx_vstx(q2_out, dst, stride2);
+ }
+}
+
+void ff_loop_filter_v_84_16_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
+ __m128i flat, mask, hev, tmp, thresh, b_limit, limit;
+ __m128i p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
+ __m128i p2_filt8_l, p1_filt8_l, p0_filt8_l;
+ __m128i q0_filt8_l, q1_filt8_l, q2_filt8_l;
+ __m128i zero = __lsx_vldi(0);
+
+ /* load vector elements */
+ DUP4_ARG2(__lsx_vldx, dst, -stride4, dst, -stride3, dst, -stride2,
+ dst, -stride, p3, p2, p1, p0);
+ q0 = __lsx_vld(dst, 0);
+ DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
+ q3 = __lsx_vldx(dst, stride3);
+
+ thresh = __lsx_vreplgr2vr_b(thresh_ptr);
+ tmp = __lsx_vreplgr2vr_b(thresh_ptr >> 8);
+ thresh = __lsx_vilvl_d(tmp, thresh);
+
+ b_limit = __lsx_vreplgr2vr_b(b_limit_ptr);
+ tmp = __lsx_vreplgr2vr_b(b_limit_ptr >> 8);
+ b_limit = __lsx_vilvl_d(tmp, b_limit);
+
+ limit = __lsx_vreplgr2vr_b(limit_ptr);
+ tmp = __lsx_vreplgr2vr_b(limit_ptr >> 8);
+ limit = __lsx_vilvl_d(tmp, limit);
+
+ /* mask and hev */
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+ q1_out);
+
+ flat = __lsx_vilvl_d(zero, flat);
+
+ /* if flat is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat)) {
+ __lsx_vstx(p1_out, dst, -stride2);
+ __lsx_vstx(p0_out, dst, -stride);
+ __lsx_vst(q0_out, dst, 0);
+ __lsx_vstx(q1_out, dst, stride);
+ } else {
+ DUP4_ARG2(__lsx_vilvl_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_l, p2_l, p1_l, p0_l);
+ DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_l, q1_l, q2_l, q3_l);
+ VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+ p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
+
+ /* convert 16 bit output data into 8 bit */
+ DUP4_ARG2(__lsx_vpickev_b, p2_filt8_l, p2_filt8_l, p1_filt8_l,
+ p1_filt8_l, p0_filt8_l, p0_filt8_l, q0_filt8_l, q0_filt8_l,
+ p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l);
+ DUP2_ARG2(__lsx_vpickev_b, q1_filt8_l, q1_filt8_l, q2_filt8_l,
+ q2_filt8_l, q1_filt8_l, q2_filt8_l);
+
+ /* store pixel values */
+ p2_out = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
+ p1_out = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
+ p0_out = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
+ q0_out = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
+ q1_out = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
+ q2_out = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
+
+ __lsx_vstx(p2_out, dst, -stride3);
+ __lsx_vstx(p1_out, dst, -stride2);
+ __lsx_vstx(p0_out, dst, -stride);
+ __lsx_vst(q0_out, dst, 0);
+ __lsx_vstx(q1_out, dst, stride);
+ __lsx_vstx(q2_out, dst, stride2);
+ }
+}
+
+void ff_loop_filter_v_48_16_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
+ __m128i flat, mask, hev, tmp, thresh, b_limit, limit;
+ __m128i p3_h, p2_h, p1_h, p0_h, q0_h, q1_h, q2_h, q3_h;
+ __m128i p2_filt8_h, p1_filt8_h, p0_filt8_h;
+ __m128i q0_filt8_h, q1_filt8_h, q2_filt8_h;
+ __m128i zero = { 0 };
+
+ /* load vector elements */
+ DUP4_ARG2(__lsx_vldx, dst, -stride4, dst, -stride3, dst, -stride2,
+ dst, -stride, p3, p2, p1, p0);
+ q0 = __lsx_vld(dst, 0);
+ DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
+ q3 = __lsx_vldx(dst, stride3);
+
+ thresh = __lsx_vreplgr2vr_b(thresh_ptr);
+ tmp = __lsx_vreplgr2vr_b(thresh_ptr >> 8);
+ thresh = __lsx_vilvl_d(tmp, thresh);
+
+ b_limit = __lsx_vreplgr2vr_b(b_limit_ptr);
+ tmp = __lsx_vreplgr2vr_b(b_limit_ptr >> 8);
+ b_limit = __lsx_vilvl_d(tmp, b_limit);
+
+ limit = __lsx_vreplgr2vr_b(limit_ptr);
+ tmp = __lsx_vreplgr2vr_b(limit_ptr >> 8);
+ limit = __lsx_vilvl_d(tmp, limit);
+
+ /* mask and hev */
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+ q1_out);
+
+ flat = __lsx_vilvh_d(flat, zero);
+
+ /* if flat is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat)) {
+ __lsx_vstx(p1_out, dst, -stride2);
+ __lsx_vstx(p0_out, dst, -stride);
+ __lsx_vst(q0_out, dst, 0);
+ __lsx_vstx(q1_out, dst, stride);
+ } else {
+ DUP4_ARG2(__lsx_vilvh_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_h, p2_h, p1_h, p0_h);
+ DUP4_ARG2(__lsx_vilvh_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_h, q1_h, q2_h, q3_h);
+ VP9_FILTER8(p3_h, p2_h, p1_h, p0_h, q0_h, q1_h, q2_h, q3_h, p2_filt8_h,
+ p1_filt8_h, p0_filt8_h, q0_filt8_h, q1_filt8_h, q2_filt8_h);
+
+ /* convert 16 bit output data into 8 bit */
+ DUP4_ARG2(__lsx_vpickev_b, p2_filt8_h, p2_filt8_h, p1_filt8_h,
+ p1_filt8_h, p0_filt8_h, p0_filt8_h, q0_filt8_h, q0_filt8_h,
+ p2_filt8_h, p1_filt8_h, p0_filt8_h, q0_filt8_h);
+ DUP2_ARG2(__lsx_vpickev_b, q1_filt8_h, q1_filt8_h, q2_filt8_h,
+ q2_filt8_h, q1_filt8_h, q2_filt8_h);
+
+ /* store pixel values */
+ p2_out = __lsx_vbitsel_v(p2, p2_filt8_h, flat);
+ p1_out = __lsx_vbitsel_v(p1_out, p1_filt8_h, flat);
+ p0_out = __lsx_vbitsel_v(p0_out, p0_filt8_h, flat);
+ q0_out = __lsx_vbitsel_v(q0_out, q0_filt8_h, flat);
+ q1_out = __lsx_vbitsel_v(q1_out, q1_filt8_h, flat);
+ q2_out = __lsx_vbitsel_v(q2, q2_filt8_h, flat);
+
+ __lsx_vstx(p2_out, dst, -stride3);
+ __lsx_vstx(p1_out, dst, -stride2);
+ __lsx_vstx(p0_out, dst, -stride);
+ __lsx_vst(q0_out, dst, 0);
+ __lsx_vstx(q1_out, dst, stride);
+ __lsx_vstx(q2_out, dst, stride2);
+ }
+}
+
+static int32_t vp9_hz_lpf_t4_and_t8_16w(uint8_t *dst, ptrdiff_t stride,
+ uint8_t *filter48,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
+ __m128i flat, mask, hev, thresh, b_limit, limit;
+ __m128i p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
+ __m128i p3_h, p2_h, p1_h, p0_h, q0_h, q1_h, q2_h, q3_h;
+ __m128i p2_filt8_l, p1_filt8_l, p0_filt8_l;
+ __m128i q0_filt8_l, q1_filt8_l, q2_filt8_l;
+ __m128i p2_filt8_h, p1_filt8_h, p0_filt8_h;
+ __m128i q0_filt8_h, q1_filt8_h, q2_filt8_h;
+ __m128i zero = __lsx_vldi(0);
+
+ /* load vector elements */
+ DUP4_ARG2(__lsx_vldx, dst, -stride4, dst, -stride3, dst, -stride2,
+ dst, -stride, p3, p2, p1, p0);
+ q0 = __lsx_vld(dst, 0);
+ DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
+ q3 = __lsx_vldx(dst, stride3);
+
+ thresh = __lsx_vreplgr2vr_b(thresh_ptr);
+ b_limit = __lsx_vreplgr2vr_b(b_limit_ptr);
+ limit = __lsx_vreplgr2vr_b(limit_ptr);
+
+ /* mask and hev */
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+ q1_out);
+
+ /* if flat is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat)) {
+ __lsx_vstx(p1_out, dst, -stride2);
+ __lsx_vstx(p0_out, dst, -stride);
+ __lsx_vst(q0_out, dst, 0);
+ __lsx_vstx(q1_out, dst, stride);
+ return 1;
+ } else {
+ DUP4_ARG2(__lsx_vilvl_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_l, p2_l, p1_l, p0_l);
+ DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_l, q1_l, q2_l, q3_l);
+ VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+ p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
+
+ DUP4_ARG2(__lsx_vilvh_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_h, p2_h, p1_h, p0_h);
+ DUP4_ARG2(__lsx_vilvh_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_h, q1_h, q2_h, q3_h);
+ VP9_FILTER8(p3_h, p2_h, p1_h, p0_h, q0_h, q1_h, q2_h, q3_h, p2_filt8_h,
+ p1_filt8_h, p0_filt8_h, q0_filt8_h, q1_filt8_h, q2_filt8_h);
+
+ /* convert 16 bit output data into 8 bit */
+ DUP4_ARG2(__lsx_vpickev_b, p2_filt8_h, p2_filt8_l, p1_filt8_h,
+ p1_filt8_l, p0_filt8_h, p0_filt8_l, q0_filt8_h, q0_filt8_l,
+ p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l);
+ DUP2_ARG2(__lsx_vpickev_b, q1_filt8_h, q1_filt8_l, q2_filt8_h,
+ q2_filt8_l, q1_filt8_l, q2_filt8_l);
+
+ /* store pixel values */
+ p2_out = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
+ p1_out = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
+ p0_out = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
+ q0_out = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
+ q1_out = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
+ q2_out = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
+
+ __lsx_vst(p2_out, filter48, 0);
+ __lsx_vst(p1_out, filter48, 16);
+ __lsx_vst(p0_out, filter48, 32);
+ __lsx_vst(q0_out, filter48, 48);
+ __lsx_vst(q1_out, filter48, 64);
+ __lsx_vst(q2_out, filter48, 80);
+ __lsx_vst(flat, filter48, 96);
+
+ return 0;
+ }
+}
+
+static void vp9_hz_lpf_t16_16w(uint8_t *dst, ptrdiff_t stride,
+ uint8_t *filter48)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ uint8_t *dst_tmp = dst - stride4;
+ uint8_t *dst_tmp1 = dst + stride4;
+ __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
+ __m128i flat, flat2, filter8;
+ __m128i zero = __lsx_vldi(0);
+ __m128i out_h, out_l;
+ v8u16 p7_l_in, p6_l_in, p5_l_in, p4_l_in;
+ v8u16 p3_l_in, p2_l_in, p1_l_in, p0_l_in;
+ v8u16 q7_l_in, q6_l_in, q5_l_in, q4_l_in;
+ v8u16 q3_l_in, q2_l_in, q1_l_in, q0_l_in;
+ v8u16 p7_h_in, p6_h_in, p5_h_in, p4_h_in;
+ v8u16 p3_h_in, p2_h_in, p1_h_in, p0_h_in;
+ v8u16 q7_h_in, q6_h_in, q5_h_in, q4_h_in;
+ v8u16 q3_h_in, q2_h_in, q1_h_in, q0_h_in;
+ v8u16 tmp0_l, tmp1_l, tmp0_h, tmp1_h;
+
+ flat = __lsx_vld(filter48, 96);
+
+ DUP4_ARG2(__lsx_vldx, dst_tmp, -stride4, dst_tmp, -stride3, dst_tmp,
+ -stride2, dst_tmp, -stride, p7, p6, p5, p4);
+ p3 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, p2, p1);
+ p0 = __lsx_vldx(dst_tmp, stride3);
+
+ q0 = __lsx_vld(dst, 0);
+ DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
+ q3 = __lsx_vldx(dst, stride3);
+
+ q4 = __lsx_vld(dst_tmp1, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp1, stride, dst_tmp1, stride2, q5, q6);
+ q7 = __lsx_vldx(dst_tmp1, stride3);
+ VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+
+ /* if flat2 is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat2)) {
+ DUP4_ARG2(__lsx_vld, filter48, 0, filter48, 16, filter48, 32, filter48,
+ 48, p2, p1, p0, q0);
+ DUP2_ARG2(__lsx_vld, filter48, 64, filter48, 80, q1, q2);
+
+ __lsx_vstx(p2, dst, -stride3);
+ __lsx_vstx(p1, dst, -stride2);
+ __lsx_vstx(p0, dst, -stride);
+ __lsx_vst(q0, dst, 0);
+ __lsx_vstx(q1, dst, stride);
+ __lsx_vstx(q2, dst, stride2);
+ } else {
+ dst = dst_tmp - stride3;
+
+ p7_l_in = (v8u16)__lsx_vilvl_b(zero, p7);
+ p6_l_in = (v8u16)__lsx_vilvl_b(zero, p6);
+ p5_l_in = (v8u16)__lsx_vilvl_b(zero, p5);
+ p4_l_in = (v8u16)__lsx_vilvl_b(zero, p4);
+ p3_l_in = (v8u16)__lsx_vilvl_b(zero, p3);
+ p2_l_in = (v8u16)__lsx_vilvl_b(zero, p2);
+ p1_l_in = (v8u16)__lsx_vilvl_b(zero, p1);
+ p0_l_in = (v8u16)__lsx_vilvl_b(zero, p0);
+
+ q0_l_in = (v8u16)__lsx_vilvl_b(zero, q0);
+
+ tmp0_l = p7_l_in << 3;
+ tmp0_l -= p7_l_in;
+ tmp0_l += p6_l_in;
+ tmp0_l += q0_l_in;
+ tmp1_l = p6_l_in + p5_l_in;
+ tmp1_l += p4_l_in;
+ tmp1_l += p3_l_in;
+ tmp1_l += p2_l_in;
+ tmp1_l += p1_l_in;
+ tmp1_l += p0_l_in;
+ tmp1_l += tmp0_l;
+
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ p7_h_in = (v8u16)__lsx_vilvh_b(zero, p7);
+ p6_h_in = (v8u16)__lsx_vilvh_b(zero, p6);
+ p5_h_in = (v8u16)__lsx_vilvh_b(zero, p5);
+ p4_h_in = (v8u16)__lsx_vilvh_b(zero, p4);
+
+ p3_h_in = (v8u16)__lsx_vilvh_b(zero, p3);
+ p2_h_in = (v8u16)__lsx_vilvh_b(zero, p2);
+ p1_h_in = (v8u16)__lsx_vilvh_b(zero, p1);
+ p0_h_in = (v8u16)__lsx_vilvh_b(zero, p0);
+ q0_h_in = (v8u16)__lsx_vilvh_b(zero, q0);
+
+ tmp0_h = p7_h_in << 3;
+ tmp0_h -= p7_h_in;
+ tmp0_h += p6_h_in;
+ tmp0_h += q0_h_in;
+ tmp1_h = p6_h_in + p5_h_in;
+ tmp1_h += p4_h_in;
+ tmp1_h += p3_h_in;
+ tmp1_h += p2_h_in;
+ tmp1_h += p1_h_in;
+ tmp1_h += p0_h_in;
+ tmp1_h += tmp0_h;
+
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ p6 = __lsx_vbitsel_v(p6, out_l, flat2);
+ __lsx_vst(p6, dst, 0);
+ dst += stride;
+
+ /* p5 */
+ q1_l_in = (v8u16)__lsx_vilvl_b(zero, q1);
+ tmp0_l = p5_l_in - p6_l_in;
+ tmp0_l += q1_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ q1_h_in = (v8u16)__lsx_vilvh_b(zero, q1);
+ tmp0_h = p5_h_in - p6_h_in;
+ tmp0_h += q1_h_in;
+ tmp0_h -= p7_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ p5 = __lsx_vbitsel_v(p5, out_l, flat2);
+ __lsx_vst(p5, dst, 0);
+ dst += stride;
+
+ /* p4 */
+ q2_l_in = (v8u16)__lsx_vilvl_b(zero, q2);
+ tmp0_l = p4_l_in - p5_l_in;
+ tmp0_l += q2_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ q2_h_in = (v8u16)__lsx_vilvh_b(zero, q2);
+ tmp0_h = p4_h_in - p5_h_in;
+ tmp0_h += q2_h_in;
+ tmp0_h -= p7_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ p4 = __lsx_vbitsel_v(p4, out_l, flat2);
+ __lsx_vst(p4, dst, 0);
+ dst += stride;
+
+ /* p3 */
+ q3_l_in = (v8u16)__lsx_vilvl_b(zero, q3);
+ tmp0_l = p3_l_in - p4_l_in;
+ tmp0_l += q3_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ q3_h_in = (v8u16)__lsx_vilvh_b(zero, q3);
+ tmp0_h = p3_h_in - p4_h_in;
+ tmp0_h += q3_h_in;
+ tmp0_h -= p7_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ p3 = __lsx_vbitsel_v(p3, out_l, flat2);
+ __lsx_vst(p3, dst, 0);
+ dst += stride;
+
+ /* p2 */
+ q4_l_in = (v8u16)__lsx_vilvl_b(zero, q4);
+ filter8 = __lsx_vld(filter48, 0);
+ tmp0_l = p2_l_in - p3_l_in;
+ tmp0_l += q4_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ q4_h_in = (v8u16)__lsx_vilvh_b(zero, q4);
+ tmp0_h = p2_h_in - p3_h_in;
+ tmp0_h += q4_h_in;
+ tmp0_h -= p7_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vst(filter8, dst, 0);
+ dst += stride;
+
+ /* p1 */
+ q5_l_in = (v8u16)__lsx_vilvl_b(zero, q5);
+ filter8 = __lsx_vld(filter48, 16);
+ tmp0_l = p1_l_in - p2_l_in;
+ tmp0_l += q5_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ q5_h_in = (v8u16)__lsx_vilvh_b(zero, q5);
+ tmp0_h = p1_h_in - p2_h_in;
+ tmp0_h += q5_h_in;
+ tmp0_h -= p7_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vst(filter8, dst, 0);
+ dst += stride;
+
+ /* p0 */
+ q6_l_in = (v8u16)__lsx_vilvl_b(zero, q6);
+ filter8 = __lsx_vld(filter48, 32);
+ tmp0_l = p0_l_in - p1_l_in;
+ tmp0_l += q6_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ q6_h_in = (v8u16)__lsx_vilvh_b(zero, q6);
+ tmp0_h = p0_h_in - p1_h_in;
+ tmp0_h += q6_h_in;
+ tmp0_h -= p7_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vst(filter8, dst, 0);
+ dst += stride;
+
+ /* q0 */
+ q7_l_in = (v8u16)__lsx_vilvl_b(zero, q7);
+ filter8 = __lsx_vld(filter48, 48);
+ tmp0_l = q7_l_in - p0_l_in;
+ tmp0_l += q0_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ q7_h_in = (v8u16)__lsx_vilvh_b(zero, q7);
+ tmp0_h = q7_h_in - p0_h_in;
+ tmp0_h += q0_h_in;
+ tmp0_h -= p7_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vst(filter8, dst, 0);
+ dst += stride;
+
+ /* q1 */
+ filter8 = __lsx_vld(filter48, 64);
+ tmp0_l = q7_l_in - q0_l_in;
+ tmp0_l += q1_l_in;
+ tmp0_l -= p6_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ tmp0_h = q7_h_in - q0_h_in;
+ tmp0_h += q1_h_in;
+ tmp0_h -= p6_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vst(filter8, dst, 0);
+ dst += stride;
+
+ /* q2 */
+ filter8 = __lsx_vld(filter48, 80);
+ tmp0_l = q7_l_in - q1_l_in;
+ tmp0_l += q2_l_in;
+ tmp0_l -= p5_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ tmp0_h = q7_h_in - q1_h_in;
+ tmp0_h += q2_h_in;
+ tmp0_h -= p5_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vst(filter8, dst, 0);
+ dst += stride;
+
+ /* q3 */
+ tmp0_l = q7_l_in - q2_l_in;
+ tmp0_l += q3_l_in;
+ tmp0_l -= p4_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ tmp0_h = q7_h_in - q2_h_in;
+ tmp0_h += q3_h_in;
+ tmp0_h -= p4_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ q3 = __lsx_vbitsel_v(q3, out_l, flat2);
+ __lsx_vst(q3, dst, 0);
+ dst += stride;
+
+ /* q4 */
+ tmp0_l = q7_l_in - q3_l_in;
+ tmp0_l += q4_l_in;
+ tmp0_l -= p3_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ tmp0_h = q7_h_in - q3_h_in;
+ tmp0_h += q4_h_in;
+ tmp0_h -= p3_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ q4 = __lsx_vbitsel_v(q4, out_l, flat2);
+ __lsx_vst(q4, dst, 0);
+ dst += stride;
+
+ /* q5 */
+ tmp0_l = q7_l_in - q4_l_in;
+ tmp0_l += q5_l_in;
+ tmp0_l -= p2_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ tmp0_h = q7_h_in - q4_h_in;
+ tmp0_h += q5_h_in;
+ tmp0_h -= p2_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ q5 = __lsx_vbitsel_v(q5, out_l, flat2);
+ __lsx_vst(q5, dst, 0);
+ dst += stride;
+
+ /* q6 */
+ tmp0_l = q7_l_in - q5_l_in;
+ tmp0_l += q6_l_in;
+ tmp0_l -= p1_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ tmp0_h = q7_h_in - q5_h_in;
+ tmp0_h += q6_h_in;
+ tmp0_h -= p1_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ q6 = __lsx_vbitsel_v(q6, out_l, flat2);
+ __lsx_vst(q6, dst, 0);
+ }
+}
+
+void ff_loop_filter_v_16_16_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ uint8_t filter48[16 * 8] __attribute__ ((aligned(16)));
+ uint8_t early_exit = 0;
+
+ early_exit = vp9_hz_lpf_t4_and_t8_16w(dst, stride, &filter48[0],
+ b_limit_ptr, limit_ptr, thresh_ptr);
+
+ if (0 == early_exit) {
+ vp9_hz_lpf_t16_16w(dst, stride, filter48);
+ }
+}
+
+void ff_loop_filter_v_16_8_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ uint8_t *dst_tmp = dst - stride4;
+ uint8_t *dst_tmp1 = dst + stride4;
+ __m128i zero = __lsx_vldi(0);
+ __m128i flat2, mask, hev, flat, thresh, b_limit, limit;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0, p7, p6, p5, p4, q4, q5, q6, q7;
+ __m128i p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
+ __m128i p0_filter16, p1_filter16;
+ __m128i p2_filter8, p1_filter8, p0_filter8;
+ __m128i q0_filter8, q1_filter8, q2_filter8;
+ __m128i p7_l, p6_l, p5_l, p4_l, q7_l, q6_l, q5_l, q4_l;
+ __m128i p3_l, p2_l, p1_l, p0_l, q3_l, q2_l, q1_l, q0_l;
+ __m128i tmp0, tmp1, tmp2;
+
+ /* load vector elements */
+ DUP4_ARG2(__lsx_vldx, dst, -stride4, dst, -stride3, dst, -stride2,
+ dst, -stride, p3, p2, p1, p0);
+ q0 = __lsx_vld(dst, 0);
+ DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2);
+ q3 = __lsx_vldx(dst, stride3);
+
+ thresh = __lsx_vreplgr2vr_b(thresh_ptr);
+ b_limit = __lsx_vreplgr2vr_b(b_limit_ptr);
+ limit = __lsx_vreplgr2vr_b(limit_ptr);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+ q1_out);
+
+ flat = __lsx_vilvl_d(zero, flat);
+
+ /* if flat is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat)) {
+ __lsx_vstelm_d(p1_out, dst - stride2, 0, 0);
+ __lsx_vstelm_d(p0_out, dst - stride, 0, 0);
+ __lsx_vstelm_d(q0_out, dst , 0, 0);
+ __lsx_vstelm_d(q1_out, dst + stride, 0, 0);
+ } else {
+ /* convert 8 bit input data into 16 bit */
+ DUP4_ARG2(__lsx_vilvl_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_l, p2_l, p1_l, p0_l);
+ DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_l, q1_l, q2_l, q3_l);
+ VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l,
+ p2_filter8, p1_filter8, p0_filter8, q0_filter8,
+ q1_filter8, q2_filter8);
+
+ /* convert 16 bit output data into 8 bit */
+ DUP4_ARG2(__lsx_vpickev_b, zero, p2_filter8, zero, p1_filter8,
+ zero, p0_filter8, zero, q0_filter8, p2_filter8,
+ p1_filter8, p0_filter8, q0_filter8);
+ DUP2_ARG2(__lsx_vpickev_b, zero, q1_filter8, zero, q2_filter8,
+ q1_filter8, q2_filter8);
+
+ /* store pixel values */
+ p2_out = __lsx_vbitsel_v(p2, p2_filter8, flat);
+ p1_out = __lsx_vbitsel_v(p1_out, p1_filter8, flat);
+ p0_out = __lsx_vbitsel_v(p0_out, p0_filter8, flat);
+ q0_out = __lsx_vbitsel_v(q0_out, q0_filter8, flat);
+ q1_out = __lsx_vbitsel_v(q1_out, q1_filter8, flat);
+ q2_out = __lsx_vbitsel_v(q2, q2_filter8, flat);
+
+ /* load 16 vector elements */
+ DUP4_ARG2(__lsx_vld, dst_tmp - stride4, 0, dst_tmp - stride3, 0,
+ dst_tmp - stride2, 0, dst_tmp - stride, 0, p7, p6, p5, p4);
+ DUP4_ARG2(__lsx_vld, dst_tmp1, 0, dst_tmp1 + stride, 0,
+ dst_tmp1 + stride2, 0, dst_tmp1 + stride3, 0, q4, q5, q6, q7);
+
+ VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+
+ /* if flat2 is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat2)) {
+ dst -= stride3;
+ __lsx_vstelm_d(p2_out, dst, 0, 0);
+ dst += stride;
+ __lsx_vstelm_d(p1_out, dst, 0, 0);
+ dst += stride;
+ __lsx_vstelm_d(p0_out, dst, 0, 0);
+ dst += stride;
+ __lsx_vstelm_d(q0_out, dst, 0, 0);
+ dst += stride;
+ __lsx_vstelm_d(q1_out, dst, 0, 0);
+ dst += stride;
+ __lsx_vstelm_d(q2_out, dst, 0, 0);
+ } else {
+ /* LSB(right) 8 pixel operation */
+ DUP4_ARG2(__lsx_vilvl_b, zero, p7, zero, p6, zero, p5, zero, p4,
+ p7_l, p6_l, p5_l, p4_l);
+ DUP4_ARG2(__lsx_vilvl_b, zero, q4, zero, q5, zero, q6, zero, q7,
+ q4_l, q5_l, q6_l, q7_l);
+
+ tmp0 = __lsx_vslli_h(p7_l, 3);
+ tmp0 = __lsx_vsub_h(tmp0, p7_l);
+ tmp0 = __lsx_vadd_h(tmp0, p6_l);
+ tmp0 = __lsx_vadd_h(tmp0, q0_l);
+
+ dst = dst_tmp - stride3;
+
+ /* calculation of p6 and p5 */
+ tmp1 = __lsx_vadd_h(p6_l, p5_l);
+ tmp1 = __lsx_vadd_h(tmp1, p4_l);
+ tmp1 = __lsx_vadd_h(tmp1, p3_l);
+ tmp1 = __lsx_vadd_h(tmp1, p2_l);
+ tmp1 = __lsx_vadd_h(tmp1, p1_l);
+ tmp1 = __lsx_vadd_h(tmp1, p0_l);
+ tmp1 = __lsx_vadd_h(tmp1, tmp0);
+
+ p0_filter16 = __lsx_vsrari_h(tmp1, 4);
+ tmp0 = __lsx_vsub_h(p5_l, p6_l);
+ tmp0 = __lsx_vadd_h(tmp0, q1_l);
+ tmp0 = __lsx_vsub_h(tmp0, p7_l);
+ tmp1 = __lsx_vadd_h(tmp1, tmp0);
+
+ p1_filter16 = __lsx_vsrari_h(tmp1, 4);
+ DUP2_ARG2(__lsx_vpickev_b, zero, p0_filter16, zero,
+ p1_filter16, p0_filter16, p1_filter16);
+ p0_filter16 = __lsx_vbitsel_v(p6, p0_filter16, flat2);
+ p1_filter16 = __lsx_vbitsel_v(p5, p1_filter16, flat2);
+ __lsx_vstelm_d(p0_filter16, dst, 0, 0);
+ dst += stride;
+ __lsx_vstelm_d(p1_filter16, dst, 0, 0);
+ dst += stride;
+
+ /* calculation of p4 and p3 */
+ tmp0 = __lsx_vsub_h(p4_l, p5_l);
+ tmp0 = __lsx_vadd_h(tmp0, q2_l);
+ tmp0 = __lsx_vsub_h(tmp0, p7_l);
+ tmp2 = __lsx_vsub_h(p3_l, p4_l);
+ tmp2 = __lsx_vadd_h(tmp2, q3_l);
+ tmp2 = __lsx_vsub_h(tmp2, p7_l);
+ tmp1 = __lsx_vadd_h(tmp1, tmp0);
+ p0_filter16 = __lsx_vsrari_h(tmp1, 4);
+ tmp1 = __lsx_vadd_h(tmp1, tmp2);
+ p1_filter16 = __lsx_vsrari_h(tmp1, 4);
+ DUP2_ARG2(__lsx_vpickev_b, zero, p0_filter16, zero,
+ p1_filter16, p0_filter16, p1_filter16);
+ p0_filter16 = __lsx_vbitsel_v(p4, p0_filter16, flat2);
+ p1_filter16 = __lsx_vbitsel_v(p3, p1_filter16, flat2);
+ __lsx_vstelm_d(p0_filter16, dst, 0, 0);
+ dst += stride;
+ __lsx_vstelm_d(p1_filter16, dst, 0, 0);
+ dst += stride;
+
+ /* calculation of p2 and p1 */
+ tmp0 = __lsx_vsub_h(p2_l, p3_l);
+ tmp0 = __lsx_vadd_h(tmp0, q4_l);
+ tmp0 = __lsx_vsub_h(tmp0, p7_l);
+ tmp2 = __lsx_vsub_h(p1_l, p2_l);
+ tmp2 = __lsx_vadd_h(tmp2, q5_l);
+ tmp2 = __lsx_vsub_h(tmp2, p7_l);
+ tmp1 = __lsx_vadd_h(tmp1, tmp0);
+ p0_filter16 = __lsx_vsrari_h(tmp1, 4);
+ tmp1 = __lsx_vadd_h(tmp1, tmp2);
+ p1_filter16 = __lsx_vsrari_h(tmp1, 4);
+ DUP2_ARG2(__lsx_vpickev_b, zero, p0_filter16, zero,
+ p1_filter16, p0_filter16, p1_filter16);
+ p0_filter16 = __lsx_vbitsel_v(p2_out, p0_filter16, flat2);
+ p1_filter16 = __lsx_vbitsel_v(p1_out, p1_filter16, flat2);
+ __lsx_vstelm_d(p0_filter16, dst, 0, 0);
+ dst += stride;
+ __lsx_vstelm_d(p1_filter16, dst, 0, 0);
+ dst += stride;
+
+ /* calculation of p0 and q0 */
+ tmp0 = __lsx_vsub_h(p0_l, p1_l);
+ tmp0 = __lsx_vadd_h(tmp0, q6_l);
+ tmp0 = __lsx_vsub_h(tmp0, p7_l);
+ tmp2 = __lsx_vsub_h(q7_l, p0_l);
+ tmp2 = __lsx_vadd_h(tmp2, q0_l);
+ tmp2 = __lsx_vsub_h(tmp2, p7_l);
+ tmp1 = __lsx_vadd_h(tmp1, tmp0);
+ p0_filter16 = __lsx_vsrari_h((__m128i)tmp1, 4);
+ tmp1 = __lsx_vadd_h(tmp1, tmp2);
+ p1_filter16 = __lsx_vsrari_h((__m128i)tmp1, 4);
+ DUP2_ARG2(__lsx_vpickev_b, zero, p0_filter16, zero,
+ p1_filter16, p0_filter16, p1_filter16);
+ p0_filter16 = __lsx_vbitsel_v(p0_out, p0_filter16, flat2);
+ p1_filter16 = __lsx_vbitsel_v(q0_out, p1_filter16, flat2);
+ __lsx_vstelm_d(p0_filter16, dst, 0, 0);
+ dst += stride;
+ __lsx_vstelm_d(p1_filter16, dst, 0, 0);
+ dst += stride;
+
+ /* calculation of q1 and q2 */
+ tmp0 = __lsx_vsub_h(q7_l, q0_l);
+ tmp0 = __lsx_vadd_h(tmp0, q1_l);
+ tmp0 = __lsx_vsub_h(tmp0, p6_l);
+ tmp2 = __lsx_vsub_h(q7_l, q1_l);
+ tmp2 = __lsx_vadd_h(tmp2, q2_l);
+ tmp2 = __lsx_vsub_h(tmp2, p5_l);
+ tmp1 = __lsx_vadd_h(tmp1, tmp0);
+ p0_filter16 = __lsx_vsrari_h(tmp1, 4);
+ tmp1 = __lsx_vadd_h(tmp1, tmp2);
+ p1_filter16 = __lsx_vsrari_h(tmp1, 4);
+ DUP2_ARG2(__lsx_vpickev_b, zero, p0_filter16, zero,
+ p1_filter16, p0_filter16, p1_filter16);
+ p0_filter16 = __lsx_vbitsel_v(q1_out, p0_filter16, flat2);
+ p1_filter16 = __lsx_vbitsel_v(q2_out, p1_filter16, flat2);
+ __lsx_vstelm_d(p0_filter16, dst, 0, 0);
+ dst += stride;
+ __lsx_vstelm_d(p1_filter16, dst, 0, 0);
+ dst += stride;
+
+ /* calculation of q3 and q4 */
+ tmp0 = __lsx_vsub_h(q7_l, q2_l);
+ tmp0 = __lsx_vadd_h(tmp0, q3_l);
+ tmp0 = __lsx_vsub_h(tmp0, p4_l);
+ tmp2 = __lsx_vsub_h(q7_l, q3_l);
+ tmp2 = __lsx_vadd_h(tmp2, q4_l);
+ tmp2 = __lsx_vsub_h(tmp2, p3_l);
+ tmp1 = __lsx_vadd_h(tmp1, tmp0);
+ p0_filter16 = __lsx_vsrari_h(tmp1, 4);
+ tmp1 = __lsx_vadd_h(tmp1, tmp2);
+ p1_filter16 = __lsx_vsrari_h(tmp1, 4);
+ DUP2_ARG2(__lsx_vpickev_b, zero, p0_filter16, zero,
+ p1_filter16, p0_filter16, p1_filter16);
+ p0_filter16 = __lsx_vbitsel_v(q3, p0_filter16, flat2);
+ p1_filter16 = __lsx_vbitsel_v(q4, p1_filter16, flat2);
+ __lsx_vstelm_d(p0_filter16, dst, 0, 0);
+ dst += stride;
+ __lsx_vstelm_d(p1_filter16, dst, 0, 0);
+ dst += stride;
+
+ /* calculation of q5 and q6 */
+ tmp0 = __lsx_vsub_h(q7_l, q4_l);
+ tmp0 = __lsx_vadd_h(tmp0, q5_l);
+ tmp0 = __lsx_vsub_h(tmp0, p2_l);
+ tmp2 = __lsx_vsub_h(q7_l, q5_l);
+ tmp2 = __lsx_vadd_h(tmp2, q6_l);
+ tmp2 = __lsx_vsub_h(tmp2, p1_l);
+ tmp1 = __lsx_vadd_h(tmp1, tmp0);
+ p0_filter16 = __lsx_vsrari_h(tmp1, 4);
+ tmp1 = __lsx_vadd_h(tmp1, tmp2);
+ p1_filter16 = __lsx_vsrari_h(tmp1, 4);
+ DUP2_ARG2(__lsx_vpickev_b, zero, p0_filter16, zero,
+ p1_filter16, p0_filter16, p1_filter16);
+ p0_filter16 = __lsx_vbitsel_v(q5, p0_filter16, flat2);
+ p1_filter16 = __lsx_vbitsel_v(q6, p1_filter16, flat2);
+ __lsx_vstelm_d(p0_filter16, dst, 0, 0);
+ dst += stride;
+ __lsx_vstelm_d(p1_filter16, dst, 0, 0);
+ }
+ }
+}
+
+void ff_loop_filter_h_4_8_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ uint8_t *dst_tmp1 = dst - 4;
+ uint8_t *dst_tmp2 = dst_tmp1 + stride4;
+ __m128i mask, hev, flat, limit, thresh, b_limit;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i vec0, vec1, vec2, vec3;
+
+ p3 = __lsx_vld(dst_tmp1, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp1, stride, dst_tmp1, stride2, p2, p1);
+ p0 = __lsx_vldx(dst_tmp1, stride3);
+ q0 = __lsx_vld(dst_tmp2, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp2, stride, dst_tmp2, stride2, q1, q2);
+ q3 = __lsx_vldx(dst_tmp2, stride3);
+
+ thresh = __lsx_vreplgr2vr_b(thresh_ptr);
+ b_limit = __lsx_vreplgr2vr_b(b_limit_ptr);
+ limit = __lsx_vreplgr2vr_b(limit_ptr);
+
+ LSX_TRANSPOSE8x8_B(p3, p2, p1, p0, q0, q1, q2, q3,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
+ DUP2_ARG2(__lsx_vilvl_b, p0, p1, q1, q0, vec0, vec1);
+ vec2 = __lsx_vilvl_h(vec1, vec0);
+ vec3 = __lsx_vilvh_h(vec1, vec0);
+
+ dst -= 2;
+ __lsx_vstelm_w(vec2, dst, 0, 0);
+ __lsx_vstelm_w(vec2, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec2, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec2, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(vec3, dst, 0, 0);
+ __lsx_vstelm_w(vec3, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec3, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec3, dst + stride3, 0, 3);
+}
+
+void ff_loop_filter_h_44_16_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ uint8_t *dst_tmp = dst - 4;
+ __m128i mask, hev, flat;
+ __m128i thresh0, b_limit0, limit0, thresh1, b_limit1, limit1;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i row0, row1, row2, row3, row4, row5, row6, row7;
+ __m128i row8, row9, row10, row11, row12, row13, row14, row15;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
+
+ row0 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, row1, row2);
+ row3 = __lsx_vldx(dst_tmp, stride3);
+ dst_tmp += stride4;
+ row4 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, row5, row6);
+ row7 = __lsx_vldx(dst_tmp, stride3);
+ dst_tmp += stride4;
+ row8 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, row9, row10);
+ row11 = __lsx_vldx(dst_tmp, stride3);
+ dst_tmp += stride4;
+ row12 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, row13, row14);
+ row15 = __lsx_vldx(dst_tmp, stride3);
+
+ LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7,
+ row8, row9, row10, row11, row12, row13, row14, row15,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+
+ thresh0 = __lsx_vreplgr2vr_b(thresh_ptr);
+ thresh1 = __lsx_vreplgr2vr_b(thresh_ptr >> 8);
+ thresh0 = __lsx_vilvl_d(thresh1, thresh0);
+
+ b_limit0 = __lsx_vreplgr2vr_b(b_limit_ptr);
+ b_limit1 = __lsx_vreplgr2vr_b(b_limit_ptr >> 8);
+ b_limit0 = __lsx_vilvl_d(b_limit1, b_limit0);
+
+ limit0 = __lsx_vreplgr2vr_b(limit_ptr);
+ limit1 = __lsx_vreplgr2vr_b(limit_ptr >> 8);
+ limit0 = __lsx_vilvl_d(limit1, limit0);
+
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0,
+ hev, mask, flat);
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1, p0, q0, q1);
+ DUP2_ARG2(__lsx_vilvl_b, p0, p1, q1, q0, tmp0, tmp1);
+ tmp2 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp3 = __lsx_vilvh_h(tmp1, tmp0);
+ DUP2_ARG2(__lsx_vilvh_b, p0, p1, q1, q0, tmp0, tmp1);
+ tmp4 = __lsx_vilvl_h(tmp1, tmp0);
+ tmp5 = __lsx_vilvh_h(tmp1, tmp0);
+
+ dst -= 2;
+ __lsx_vstelm_w(tmp2, dst, 0, 0);
+ __lsx_vstelm_w(tmp2, dst + stride, 0, 1);
+ __lsx_vstelm_w(tmp2, dst + stride2, 0, 2);
+ __lsx_vstelm_w(tmp2, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(tmp3, dst, 0, 0);
+ __lsx_vstelm_w(tmp3, dst + stride, 0, 1);
+ __lsx_vstelm_w(tmp3, dst + stride2, 0, 2);
+ __lsx_vstelm_w(tmp3, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(tmp4, dst, 0, 0);
+ __lsx_vstelm_w(tmp4, dst + stride, 0, 1);
+ __lsx_vstelm_w(tmp4, dst + stride2, 0, 2);
+ __lsx_vstelm_w(tmp4, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(tmp5, dst, 0, 0);
+ __lsx_vstelm_w(tmp5, dst + stride, 0, 1);
+ __lsx_vstelm_w(tmp5, dst + stride2, 0, 2);
+ __lsx_vstelm_w(tmp5, dst + stride3, 0, 3);
+}
+
+void ff_loop_filter_h_8_8_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ uint8_t *dst_tmp = dst - 4;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i p1_out, p0_out, q0_out, q1_out;
+ __m128i flat, mask, hev, thresh, b_limit, limit;
+ __m128i p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
+ __m128i p2_filt8_l, p1_filt8_l, p0_filt8_l;
+ __m128i q0_filt8_l, q1_filt8_l, q2_filt8_l;
+ __m128i vec0, vec1, vec2, vec3, vec4;
+ __m128i zero = __lsx_vldi(0);
+
+ /* load vector elements */
+ p3 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, p2, p1);
+ p0 = __lsx_vldx(dst_tmp, stride3);
+ dst_tmp += stride4;
+ q0 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, q1, q2);
+ q3 = __lsx_vldx(dst_tmp, stride3);
+
+ LSX_TRANSPOSE8x8_B(p3, p2, p1, p0, q0, q1, q2, q3,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+
+ thresh = __lsx_vreplgr2vr_b(thresh_ptr);
+ b_limit = __lsx_vreplgr2vr_b(b_limit_ptr);
+ limit = __lsx_vreplgr2vr_b(limit_ptr);
+
+ /* mask and hev */
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ /* flat4 */
+ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ /* filter4 */
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+ q1_out);
+
+ flat = __lsx_vilvl_d(zero, flat);
+
+ /* if flat is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat)) {
+ /* Store 4 pixels p1-_q1 */
+ DUP2_ARG2(__lsx_vilvl_b, p0_out, p1_out, q1_out, q0_out, vec0, vec1);
+ vec2 = __lsx_vilvl_h(vec1, vec0);
+ vec3 = __lsx_vilvh_h(vec1, vec0);
+
+ dst -= 2;
+ __lsx_vstelm_w(vec2, dst, 0, 0);
+ __lsx_vstelm_w(vec2, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec2, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec2, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(vec3, dst, 0, 0);
+ __lsx_vstelm_w(vec3, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec3, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec3, dst + stride3, 0, 3);
+ } else {
+ DUP4_ARG2(__lsx_vilvl_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_l, p2_l, p1_l, p0_l);
+ DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_l, q1_l, q2_l, q3_l);
+ VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+ p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
+ /* convert 16 bit output data into 8 bit */
+ DUP4_ARG2(__lsx_vpickev_b, p2_filt8_l, p2_filt8_l, p1_filt8_l,
+ p1_filt8_l, p0_filt8_l, p0_filt8_l, q0_filt8_l,
+ q0_filt8_l, p2_filt8_l, p1_filt8_l, p0_filt8_l,
+ q0_filt8_l);
+ DUP2_ARG2(__lsx_vpickev_b, q1_filt8_l, q1_filt8_l, q2_filt8_l,
+ q2_filt8_l, q1_filt8_l, q2_filt8_l);
+
+ /* store pixel values */
+ p2 = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
+ p1 = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
+ p0 = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
+ q0 = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
+ q1 = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
+ q2 = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
+
+ /* Store 6 pixels p2-_q2 */
+ DUP2_ARG2(__lsx_vilvl_b, p1, p2, q0, p0, vec0, vec1);
+ vec2 = __lsx_vilvl_h(vec1, vec0);
+ vec3 = __lsx_vilvh_h(vec1, vec0);
+ vec4 = __lsx_vilvl_b(q2, q1);
+
+ dst -= 3;
+ __lsx_vstelm_w(vec2, dst, 0, 0);
+ __lsx_vstelm_h(vec4, dst, 4, 0);
+ dst += stride;
+ __lsx_vstelm_w(vec2, dst, 0, 1);
+ __lsx_vstelm_h(vec4, dst, 4, 1);
+ dst += stride;
+ __lsx_vstelm_w(vec2, dst, 0, 2);
+ __lsx_vstelm_h(vec4, dst, 4, 2);
+ dst += stride;
+ __lsx_vstelm_w(vec2, dst, 0, 3);
+ __lsx_vstelm_h(vec4, dst, 4, 3);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 0);
+ __lsx_vstelm_h(vec4, dst, 4, 4);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 1);
+ __lsx_vstelm_h(vec4, dst, 4, 5);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 2);
+ __lsx_vstelm_h(vec4, dst, 4, 6);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 3);
+ __lsx_vstelm_h(vec4, dst, 4, 7);
+ }
+}
+
+void ff_loop_filter_h_88_16_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ uint8_t *dst_tmp = dst - 4;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i p1_out, p0_out, q0_out, q1_out;
+ __m128i flat, mask, hev, thresh, b_limit, limit;
+ __m128i row4, row5, row6, row7, row12, row13, row14, row15;
+ __m128i p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
+ __m128i p3_h, p2_h, p1_h, p0_h, q0_h, q1_h, q2_h, q3_h;
+ __m128i p2_filt8_l, p1_filt8_l, p0_filt8_l;
+ __m128i q0_filt8_l, q1_filt8_l, q2_filt8_l;
+ __m128i p2_filt8_h, p1_filt8_h, p0_filt8_h;
+ __m128i q0_filt8_h, q1_filt8_h, q2_filt8_h;
+ __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ __m128i zero = __lsx_vldi(0);
+
+ p0 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, p1, p2);
+ p3 = __lsx_vldx(dst_tmp, stride3);
+ dst_tmp += stride4;
+ row4 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, row5, row6);
+ row7 = __lsx_vldx(dst_tmp, stride3);
+ dst_tmp += stride4;
+ q3 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, q2, q1);
+ q0 = __lsx_vldx(dst_tmp, stride3);
+ dst_tmp += stride4;
+ row12 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, row13, row14);
+ row15 = __lsx_vldx(dst_tmp, stride3);
+
+ /* transpose 16x8 matrix into 8x16 */
+ LSX_TRANSPOSE16x8_B(p0, p1, p2, p3, row4, row5, row6, row7,
+ q3, q2, q1, q0, row12, row13, row14, row15,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+
+ thresh = __lsx_vreplgr2vr_b(thresh_ptr);
+ vec0 = __lsx_vreplgr2vr_b(thresh_ptr >> 8);
+ thresh = __lsx_vilvl_d(vec0, thresh);
+
+ b_limit = __lsx_vreplgr2vr_b(b_limit_ptr);
+ vec0 = __lsx_vreplgr2vr_b(b_limit_ptr >> 8);
+ b_limit = __lsx_vilvl_d(vec0, b_limit);
+
+ limit = __lsx_vreplgr2vr_b(limit_ptr);
+ vec0 = __lsx_vreplgr2vr_b(limit_ptr >> 8);
+ limit = __lsx_vilvl_d(vec0, limit);
+
+ /* mask and hev */
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ /* flat4 */
+ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ /* filter4 */
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+ q1_out);
+
+ /* if flat is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat)) {
+ DUP2_ARG2(__lsx_vilvl_b, p0_out, p1_out, q1_out, q0_out, vec0, vec1);
+ vec2 = __lsx_vilvl_h(vec1, vec0);
+ vec3 = __lsx_vilvh_h(vec1, vec0);
+ DUP2_ARG2(__lsx_vilvh_b, p0_out, p1_out, q1_out, q0_out, vec0, vec1);
+ vec4 = __lsx_vilvl_h(vec1, vec0);
+ vec5 = __lsx_vilvh_h(vec1, vec0);
+
+ dst -= 2;
+ __lsx_vstelm_w(vec2, dst, 0, 0);
+ __lsx_vstelm_w(vec2, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec2, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec2, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(vec3, dst, 0, 0);
+ __lsx_vstelm_w(vec3, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec3, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec3, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(vec4, dst, 0, 0);
+ __lsx_vstelm_w(vec4, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec4, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec4, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(vec5, dst, 0, 0);
+ __lsx_vstelm_w(vec5, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec5, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec5, dst + stride3, 0, 3);
+ } else {
+ DUP4_ARG2(__lsx_vilvl_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_l, p2_l, p1_l, p0_l);
+ DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_l, q1_l, q2_l, q3_l);
+ VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+ p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
+
+ DUP4_ARG2(__lsx_vilvh_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_h, p2_h, p1_h, p0_h);
+ DUP4_ARG2(__lsx_vilvh_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_h, q1_h, q2_h, q3_h);
+
+ /* filter8 */
+ VP9_FILTER8(p3_h, p2_h, p1_h, p0_h, q0_h, q1_h, q2_h, q3_h, p2_filt8_h,
+ p1_filt8_h, p0_filt8_h, q0_filt8_h, q1_filt8_h, q2_filt8_h);
+
+ /* convert 16 bit output data into 8 bit */
+ DUP4_ARG2(__lsx_vpickev_b, p2_filt8_h, p2_filt8_l, p1_filt8_h,
+ p1_filt8_l, p0_filt8_h, p0_filt8_l, q0_filt8_h, q0_filt8_l,
+ p2_filt8_l, p1_filt8_l, p0_filt8_l, q0_filt8_l);
+ DUP2_ARG2(__lsx_vpickev_b, q1_filt8_h, q1_filt8_l, q2_filt8_h,
+ q2_filt8_l, q1_filt8_l, q2_filt8_l);
+
+ /* store pixel values */
+ p2 = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
+ p1 = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
+ p0 = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
+ q0 = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
+ q1 = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
+ q2 = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
+
+ DUP2_ARG2(__lsx_vilvl_b, p1, p2, q0, p0, vec0, vec1);
+ vec3 = __lsx_vilvl_h(vec1, vec0);
+ vec4 = __lsx_vilvh_h(vec1, vec0);
+ DUP2_ARG2(__lsx_vilvh_b, p1, p2, q0, p0, vec0, vec1);
+ vec6 = __lsx_vilvl_h(vec1, vec0);
+ vec7 = __lsx_vilvh_h(vec1, vec0);
+ vec2 = __lsx_vilvl_b(q2, q1);
+ vec5 = __lsx_vilvh_b(q2, q1);
+
+ dst -= 3;
+ __lsx_vstelm_w(vec3, dst, 0, 0);
+ __lsx_vstelm_h(vec2, dst, 4, 0);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 1);
+ __lsx_vstelm_h(vec2, dst, 4, 1);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 2);
+ __lsx_vstelm_h(vec2, dst, 4, 2);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 3);
+ __lsx_vstelm_h(vec2, dst, 4, 3);
+ dst += stride;
+ __lsx_vstelm_w(vec4, dst, 0, 0);
+ __lsx_vstelm_h(vec2, dst, 4, 4);
+ dst += stride;
+ __lsx_vstelm_w(vec4, dst, 0, 1);
+ __lsx_vstelm_h(vec2, dst, 4, 5);
+ dst += stride;
+ __lsx_vstelm_w(vec4, dst, 0, 2);
+ __lsx_vstelm_h(vec2, dst, 4, 6);
+ dst += stride;
+ __lsx_vstelm_w(vec4, dst, 0, 3);
+ __lsx_vstelm_h(vec2, dst, 4, 7);
+ dst += stride;
+ __lsx_vstelm_w(vec6, dst, 0, 0);
+ __lsx_vstelm_h(vec5, dst, 4, 0);
+ dst += stride;
+ __lsx_vstelm_w(vec6, dst, 0, 1);
+ __lsx_vstelm_h(vec5, dst, 4, 1);
+ dst += stride;
+ __lsx_vstelm_w(vec6, dst, 0, 2);
+ __lsx_vstelm_h(vec5, dst, 4, 2);
+ dst += stride;
+ __lsx_vstelm_w(vec6, dst, 0, 3);
+ __lsx_vstelm_h(vec5, dst, 4, 3);
+ dst += stride;
+ __lsx_vstelm_w(vec7, dst, 0, 0);
+ __lsx_vstelm_h(vec5, dst, 4, 4);
+ dst += stride;
+ __lsx_vstelm_w(vec7, dst, 0, 1);
+ __lsx_vstelm_h(vec5, dst, 4, 5);
+ dst += stride;
+ __lsx_vstelm_w(vec7, dst, 0, 2);
+ __lsx_vstelm_h(vec5, dst, 4, 6);
+ dst += stride;
+ __lsx_vstelm_w(vec7, dst, 0, 3);
+ __lsx_vstelm_h(vec5, dst, 4, 7);
+ }
+}
+
+void ff_loop_filter_h_84_16_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ uint8_t *dst_tmp = dst - 4;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i p1_out, p0_out, q0_out, q1_out;
+ __m128i flat, mask, hev, thresh, b_limit, limit;
+ __m128i row4, row5, row6, row7, row12, row13, row14, row15;
+ __m128i p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
+ __m128i p2_filt8_l, p1_filt8_l, p0_filt8_l;
+ __m128i q0_filt8_l, q1_filt8_l, q2_filt8_l;
+ __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ __m128i zero = __lsx_vldi(0);
+
+ p0 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, p1, p2);
+ p3 = __lsx_vldx(dst_tmp, stride3);
+ dst_tmp += stride4;
+ row4 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, row5, row6);
+ row7 = __lsx_vldx(dst_tmp, stride3);
+ dst_tmp += stride4;
+ q3 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, q2, q1);
+ q0 = __lsx_vldx(dst_tmp, stride3);
+ dst_tmp += stride4;
+ row12 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, row13, row14);
+ row15 = __lsx_vldx(dst_tmp, stride3);
+
+ /* transpose 16x8 matrix into 8x16 */
+ LSX_TRANSPOSE16x8_B(p0, p1, p2, p3, row4, row5, row6, row7,
+ q3, q2, q1, q0, row12, row13, row14, row15,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+
+ thresh = __lsx_vreplgr2vr_b(thresh_ptr);
+ vec0 = __lsx_vreplgr2vr_b(thresh_ptr >> 8);
+ thresh = __lsx_vilvl_d(vec0, thresh);
+
+ b_limit = __lsx_vreplgr2vr_b(b_limit_ptr);
+ vec0 = __lsx_vreplgr2vr_b(b_limit_ptr >> 8);
+ b_limit = __lsx_vilvl_d(vec0, b_limit);
+
+ limit = __lsx_vreplgr2vr_b(limit_ptr);
+ vec0 = __lsx_vreplgr2vr_b(limit_ptr >> 8);
+ limit = __lsx_vilvl_d(vec0, limit);
+
+ /* mask and hev */
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ /* flat4 */
+ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ /* filter4 */
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+ q1_out);
+
+ flat = __lsx_vilvl_d(zero, flat);
+
+ /* if flat is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat)) {
+ DUP2_ARG2(__lsx_vilvl_b, p0_out, p1_out, q1_out, q0_out, vec0, vec1);
+ vec2 = __lsx_vilvl_h(vec1, vec0);
+ vec3 = __lsx_vilvh_h(vec1, vec0);
+ DUP2_ARG2(__lsx_vilvh_b, p0_out, p1_out, q1_out, q0_out, vec0, vec1);
+ vec4 = __lsx_vilvl_h(vec1, vec0);
+ vec5 = __lsx_vilvh_h(vec1, vec0);
+
+ dst -= 2;
+ __lsx_vstelm_w(vec2, dst, 0, 0);
+ __lsx_vstelm_w(vec2, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec2, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec2, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(vec3, dst, 0, 0);
+ __lsx_vstelm_w(vec3, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec3, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec3, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(vec4, dst, 0, 0);
+ __lsx_vstelm_w(vec4, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec4, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec4, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(vec5, dst, 0, 0);
+ __lsx_vstelm_w(vec5, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec5, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec5, dst + stride3, 0, 3);
+ } else {
+ DUP4_ARG2(__lsx_vilvl_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_l, p2_l, p1_l, p0_l);
+ DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_l, q1_l, q2_l, q3_l);
+ VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+ p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
+
+ /* convert 16 bit output data into 8 bit */
+ DUP4_ARG2(__lsx_vpickev_b, p2_filt8_l, p2_filt8_l, p1_filt8_l, p1_filt8_l,
+ p0_filt8_l, p0_filt8_l, q0_filt8_l, q0_filt8_l, p2_filt8_l,
+ p1_filt8_l, p0_filt8_l, q0_filt8_l);
+ DUP2_ARG2(__lsx_vpickev_b, q1_filt8_l, q1_filt8_l, q2_filt8_l, q2_filt8_l,
+ q1_filt8_l, q2_filt8_l);
+
+ /* store pixel values */
+ p2 = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
+ p1 = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
+ p0 = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
+ q0 = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
+ q1 = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
+ q2 = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
+
+ DUP2_ARG2(__lsx_vilvl_b, p1, p2, q0, p0, vec0, vec1);
+ vec3 = __lsx_vilvl_h(vec1, vec0);
+ vec4 = __lsx_vilvh_h(vec1, vec0);
+ DUP2_ARG2(__lsx_vilvh_b, p1, p2, q0, p0, vec0, vec1);
+ vec6 = __lsx_vilvl_h(vec1, vec0);
+ vec7 = __lsx_vilvh_h(vec1, vec0);
+ vec2 = __lsx_vilvl_b(q2, q1);
+ vec5 = __lsx_vilvh_b(q2, q1);
+
+ dst -= 3;
+ __lsx_vstelm_w(vec3, dst, 0, 0);
+ __lsx_vstelm_h(vec2, dst, 4, 0);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 1);
+ __lsx_vstelm_h(vec2, dst, 4, 1);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 2);
+ __lsx_vstelm_h(vec2, dst, 4, 2);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 3);
+ __lsx_vstelm_h(vec2, dst, 4, 3);
+ dst += stride;
+ __lsx_vstelm_w(vec4, dst, 0, 0);
+ __lsx_vstelm_h(vec2, dst, 4, 4);
+ dst += stride;
+ __lsx_vstelm_w(vec4, dst, 0, 1);
+ __lsx_vstelm_h(vec2, dst, 4, 5);
+ dst += stride;
+ __lsx_vstelm_w(vec4, dst, 0, 2);
+ __lsx_vstelm_h(vec2, dst, 4, 6);
+ dst += stride;
+ __lsx_vstelm_w(vec4, dst, 0, 3);
+ __lsx_vstelm_h(vec2, dst, 4, 7);
+ dst += stride;
+ __lsx_vstelm_w(vec6, dst, 0, 0);
+ __lsx_vstelm_h(vec5, dst, 4, 0);
+ dst += stride;
+ __lsx_vstelm_w(vec6, dst, 0, 1);
+ __lsx_vstelm_h(vec5, dst, 4, 1);
+ dst += stride;
+ __lsx_vstelm_w(vec6, dst, 0, 2);
+ __lsx_vstelm_h(vec5, dst, 4, 2);
+ dst += stride;
+ __lsx_vstelm_w(vec6, dst, 0, 3);
+ __lsx_vstelm_h(vec5, dst, 4, 3);
+ dst += stride;
+ __lsx_vstelm_w(vec7, dst, 0, 0);
+ __lsx_vstelm_h(vec5, dst, 4, 4);
+ dst += stride;
+ __lsx_vstelm_w(vec7, dst, 0, 1);
+ __lsx_vstelm_h(vec5, dst, 4, 5);
+ dst += stride;
+ __lsx_vstelm_w(vec7, dst, 0, 2);
+ __lsx_vstelm_h(vec5, dst, 4, 6);
+ dst += stride;
+ __lsx_vstelm_w(vec7, dst, 0, 3);
+ __lsx_vstelm_h(vec5, dst, 4, 7);
+ }
+}
+
+void ff_loop_filter_h_48_16_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ uint8_t *dst_tmp = dst - 4;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i p1_out, p0_out, q0_out, q1_out;
+ __m128i flat, mask, hev, thresh, b_limit, limit;
+ __m128i row4, row5, row6, row7, row12, row13, row14, row15;
+ __m128i p3_h, p2_h, p1_h, p0_h, q0_h, q1_h, q2_h, q3_h;
+ __m128i p2_filt8_h, p1_filt8_h, p0_filt8_h;
+ __m128i q0_filt8_h, q1_filt8_h, q2_filt8_h;
+ __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+ __m128i zero = __lsx_vldi(0);
+
+ p0 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, p1, p2);
+ p3 = __lsx_vldx(dst_tmp, stride3);
+ dst_tmp += stride4;
+ row4 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, row5, row6);
+ row7 = __lsx_vldx(dst_tmp, stride3);
+ dst_tmp += stride4;
+ q3 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, q2, q1);
+ q0 = __lsx_vldx(dst_tmp, stride3);
+ dst_tmp += stride4;
+ row12 = __lsx_vld(dst_tmp, 0);
+ DUP2_ARG2(__lsx_vldx, dst_tmp, stride, dst_tmp, stride2, row13, row14);
+ row15 = __lsx_vldx(dst_tmp, stride3);
+
+ /* transpose 16x8 matrix into 8x16 */
+ LSX_TRANSPOSE16x8_B(p0, p1, p2, p3, row4, row5, row6, row7,
+ q3, q2, q1, q0, row12, row13, row14, row15,
+ p3, p2, p1, p0, q0, q1, q2, q3);
+
+ thresh = __lsx_vreplgr2vr_b(thresh_ptr);
+ vec0 = __lsx_vreplgr2vr_b(thresh_ptr >> 8);
+ thresh = __lsx_vilvl_d(vec0, thresh);
+
+ b_limit = __lsx_vreplgr2vr_b(b_limit_ptr);
+ vec0 = __lsx_vreplgr2vr_b(b_limit_ptr >> 8);
+ b_limit = __lsx_vilvl_d(vec0, b_limit);
+
+ limit = __lsx_vreplgr2vr_b(limit_ptr);
+ vec0 = __lsx_vreplgr2vr_b(limit_ptr >> 8);
+ limit = __lsx_vilvl_d(vec0, limit);
+
+ /* mask and hev */
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ /* flat4 */
+ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ /* filter4 */
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+ q1_out);
+
+ flat = __lsx_vilvh_d(flat, zero);
+
+ /* if flat is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat)) {
+ DUP2_ARG2(__lsx_vilvl_b, p0_out, p1_out, q1_out, q0_out, vec0, vec1);
+ vec2 = __lsx_vilvl_h(vec1, vec0);
+ vec3 = __lsx_vilvh_h(vec1, vec0);
+ DUP2_ARG2(__lsx_vilvh_b, p0_out, p1_out, q1_out, q0_out, vec0, vec1);
+ vec4 = __lsx_vilvl_h(vec1, vec0);
+ vec5 = __lsx_vilvh_h(vec1, vec0);
+
+ dst -= 2;
+ __lsx_vstelm_w(vec2, dst, 0, 0);
+ __lsx_vstelm_w(vec2, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec2, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec2, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(vec3, dst, 0, 0);
+ __lsx_vstelm_w(vec3, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec3, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec3, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(vec4, dst, 0, 0);
+ __lsx_vstelm_w(vec4, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec4, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec4, dst + stride3, 0, 3);
+ dst += stride4;
+ __lsx_vstelm_w(vec5, dst, 0, 0);
+ __lsx_vstelm_w(vec5, dst + stride, 0, 1);
+ __lsx_vstelm_w(vec5, dst + stride2, 0, 2);
+ __lsx_vstelm_w(vec5, dst + stride3, 0, 3);
+ } else {
+ DUP4_ARG2(__lsx_vilvh_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_h, p2_h, p1_h, p0_h);
+ DUP4_ARG2(__lsx_vilvh_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_h, q1_h, q2_h, q3_h);
+
+ VP9_FILTER8(p3_h, p2_h, p1_h, p0_h, q0_h, q1_h, q2_h, q3_h, p2_filt8_h,
+ p1_filt8_h, p0_filt8_h, q0_filt8_h, q1_filt8_h, q2_filt8_h);
+
+ /* convert 16 bit output data into 8 bit */
+ DUP4_ARG2(__lsx_vpickev_b, p2_filt8_h, p2_filt8_h, p1_filt8_h,
+ p1_filt8_h, p0_filt8_h, p0_filt8_h, q0_filt8_h, q0_filt8_h,
+ p2_filt8_h, p1_filt8_h, p0_filt8_h, q0_filt8_h);
+ DUP2_ARG2(__lsx_vpickev_b, q1_filt8_h, q1_filt8_h, q2_filt8_h,
+ q2_filt8_h, q1_filt8_h, q2_filt8_h);
+
+ /* store pixel values */
+ p2 = __lsx_vbitsel_v(p2, p2_filt8_h, flat);
+ p1 = __lsx_vbitsel_v(p1_out, p1_filt8_h, flat);
+ p0 = __lsx_vbitsel_v(p0_out, p0_filt8_h, flat);
+ q0 = __lsx_vbitsel_v(q0_out, q0_filt8_h, flat);
+ q1 = __lsx_vbitsel_v(q1_out, q1_filt8_h, flat);
+ q2 = __lsx_vbitsel_v(q2, q2_filt8_h, flat);
+
+ DUP2_ARG2(__lsx_vilvl_b, p1, p2, q0, p0, vec0, vec1);
+ vec3 = __lsx_vilvl_h(vec1, vec0);
+ vec4 = __lsx_vilvh_h(vec1, vec0);
+ DUP2_ARG2(__lsx_vilvh_b, p1, p2, q0, p0, vec0, vec1);
+ vec6 = __lsx_vilvl_h(vec1, vec0);
+ vec7 = __lsx_vilvh_h(vec1, vec0);
+ vec2 = __lsx_vilvl_b(q2, q1);
+ vec5 = __lsx_vilvh_b(q2, q1);
+
+ dst -= 3;
+ __lsx_vstelm_w(vec3, dst, 0, 0);
+ __lsx_vstelm_h(vec2, dst, 4, 0);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 1);
+ __lsx_vstelm_h(vec2, dst, 4, 1);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 2);
+ __lsx_vstelm_h(vec2, dst, 4, 2);
+ dst += stride;
+ __lsx_vstelm_w(vec3, dst, 0, 3);
+ __lsx_vstelm_h(vec2, dst, 4, 3);
+ dst += stride;
+ __lsx_vstelm_w(vec4, dst, 0, 0);
+ __lsx_vstelm_h(vec2, dst, 4, 4);
+ dst += stride;
+ __lsx_vstelm_w(vec4, dst, 0, 1);
+ __lsx_vstelm_h(vec2, dst, 4, 5);
+ dst += stride;
+ __lsx_vstelm_w(vec4, dst, 0, 2);
+ __lsx_vstelm_h(vec2, dst, 4, 6);
+ dst += stride;
+ __lsx_vstelm_w(vec4, dst, 0, 3);
+ __lsx_vstelm_h(vec2, dst, 4, 7);
+ dst += stride;
+ __lsx_vstelm_w(vec6, dst, 0, 0);
+ __lsx_vstelm_h(vec5, dst, 4, 0);
+ dst += stride;
+ __lsx_vstelm_w(vec6, dst, 0, 1);
+ __lsx_vstelm_h(vec5, dst, 4, 1);
+ dst += stride;
+ __lsx_vstelm_w(vec6, dst, 0, 2);
+ __lsx_vstelm_h(vec5, dst, 4, 2);
+ dst += stride;
+ __lsx_vstelm_w(vec6, dst, 0, 3);
+ __lsx_vstelm_h(vec5, dst, 4, 3);
+ dst += stride;
+ __lsx_vstelm_w(vec7, dst, 0, 0);
+ __lsx_vstelm_h(vec5, dst, 4, 4);
+ dst += stride;
+ __lsx_vstelm_w(vec7, dst, 0, 1);
+ __lsx_vstelm_h(vec5, dst, 4, 5);
+ dst += stride;
+ __lsx_vstelm_w(vec7, dst, 0, 2);
+ __lsx_vstelm_h(vec5, dst, 4, 6);
+ dst += stride;
+ __lsx_vstelm_w(vec7, dst, 0, 3);
+ __lsx_vstelm_h(vec5, dst, 4, 7);
+ }
+}
+
+static void vp9_transpose_16x8_to_8x16(uint8_t *input, ptrdiff_t in_pitch,
+ uint8_t *output)
+{
+ __m128i p7_org, p6_org, p5_org, p4_org, p3_org, p2_org, p1_org, p0_org;
+ __m128i tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
+ ptrdiff_t in_pitch2 = in_pitch << 1;
+ ptrdiff_t in_pitch3 = in_pitch2 + in_pitch;
+ ptrdiff_t in_pitch4 = in_pitch2 << 1;
+
+ LSX_LD_8(input, in_pitch, in_pitch2, in_pitch3, in_pitch4,
+ p7_org, p6_org, p5_org, p4_org, p3_org, p2_org, p1_org, p0_org);
+ /* 8x8 transpose */
+ LSX_TRANSPOSE8x8_B(p7_org, p6_org, p5_org, p4_org, p3_org, p2_org, p1_org,
+ p0_org, p7, p6, p5, p4, p3, p2, p1, p0);
+ /* 8x8 transpose */
+ DUP4_ARG2(__lsx_vilvh_b, p5_org, p7_org, p4_org, p6_org, p1_org,
+ p3_org, p0_org, p2_org, tmp0, tmp1, tmp2, tmp3);
+ DUP2_ARG2(__lsx_vilvl_b, tmp1, tmp0, tmp3, tmp2, tmp4, tmp6);
+ DUP2_ARG2(__lsx_vilvh_b, tmp1, tmp0, tmp3, tmp2, tmp5, tmp7);
+ DUP2_ARG2(__lsx_vilvl_w, tmp6, tmp4, tmp7, tmp5, q0, q4);
+ DUP2_ARG2(__lsx_vilvh_w, tmp6, tmp4, tmp7, tmp5, q2, q6);
+ DUP4_ARG2(__lsx_vbsrl_v, q0, 8, q2, 8, q4, 8, q6, 8, q1, q3, q5, q7);
+
+ __lsx_vst(p7, output, 0);
+ __lsx_vst(p6, output, 16);
+ __lsx_vst(p5, output, 32);
+ __lsx_vst(p4, output, 48);
+ __lsx_vst(p3, output, 64);
+ __lsx_vst(p2, output, 80);
+ __lsx_vst(p1, output, 96);
+ __lsx_vst(p0, output, 112);
+ __lsx_vst(q0, output, 128);
+ __lsx_vst(q1, output, 144);
+ __lsx_vst(q2, output, 160);
+ __lsx_vst(q3, output, 176);
+ __lsx_vst(q4, output, 192);
+ __lsx_vst(q5, output, 208);
+ __lsx_vst(q6, output, 224);
+ __lsx_vst(q7, output, 240);
+}
+
+static void vp9_transpose_8x16_to_16x8(uint8_t *input, uint8_t *output,
+ ptrdiff_t out_pitch)
+{
+ __m128i p7_o, p6_o, p5_o, p4_o, p3_o, p2_o, p1_o, p0_o;
+ __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
+ ptrdiff_t out_pitch2 = out_pitch << 1;
+ ptrdiff_t out_pitch3 = out_pitch2 + out_pitch;
+ ptrdiff_t out_pitch4 = out_pitch2 << 1;
+
+ DUP4_ARG2(__lsx_vld, input, 0, input, 16, input, 32, input, 48,
+ p7, p6, p5, p4);
+ DUP4_ARG2(__lsx_vld, input, 64, input, 80, input, 96, input, 112,
+ p3, p2, p1, p0);
+ DUP4_ARG2(__lsx_vld, input, 128, input, 144, input, 160, input, 176,
+ q0, q1, q2, q3);
+ DUP4_ARG2(__lsx_vld, input, 192, input, 208, input, 224, input, 240,
+ q4, q5, q6, q7);
+ LSX_TRANSPOSE16x8_B(p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5,
+ q6, q7, p7_o, p6_o, p5_o, p4_o, p3_o, p2_o, p1_o, p0_o);
+ LSX_ST_8(p7_o, p6_o, p5_o, p4_o, p3_o, p2_o, p1_o, p0_o,
+ output, out_pitch, out_pitch2, out_pitch3, out_pitch4);
+}
+
+static void vp9_transpose_16x16(uint8_t *input, int32_t in_stride,
+ uint8_t *output, int32_t out_stride)
+{
+ __m128i row0, row1, row2, row3, row4, row5, row6, row7;
+ __m128i row8, row9, row10, row11, row12, row13, row14, row15;
+ __m128i tmp0, tmp1, tmp4, tmp5, tmp6, tmp7;
+ __m128i tmp2, tmp3;
+ __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
+ int32_t in_stride2 = in_stride << 1;
+ int32_t in_stride3 = in_stride2 + in_stride;
+ int32_t in_stride4 = in_stride2 << 1;
+ int32_t out_stride2 = out_stride << 1;
+ int32_t out_stride3 = out_stride2 + out_stride;
+ int32_t out_stride4 = out_stride2 << 1;
+
+ LSX_LD_8(input, in_stride, in_stride2, in_stride3, in_stride4,
+ row0, row1, row2, row3, row4, row5, row6, row7);
+ input += in_stride4;
+ LSX_LD_8(input, in_stride, in_stride2, in_stride3, in_stride4,
+ row8, row9, row10, row11, row12, row13, row14, row15);
+
+ LSX_TRANSPOSE16x8_B(row0, row1, row2, row3, row4, row5, row6, row7,
+ row8, row9, row10, row11, row12, row13, row14, row15,
+ p7, p6, p5, p4, p3, p2, p1, p0);
+
+ /* transpose 16x8 matrix into 8x16 */
+ /* total 8 intermediate register and 32 instructions */
+ q7 = __lsx_vpackod_d(row8, row0);
+ q6 = __lsx_vpackod_d(row9, row1);
+ q5 = __lsx_vpackod_d(row10, row2);
+ q4 = __lsx_vpackod_d(row11, row3);
+ q3 = __lsx_vpackod_d(row12, row4);
+ q2 = __lsx_vpackod_d(row13, row5);
+ q1 = __lsx_vpackod_d(row14, row6);
+ q0 = __lsx_vpackod_d(row15, row7);
+
+ DUP2_ARG2(__lsx_vpackev_b, q6, q7, q4, q5, tmp0, tmp1);
+ DUP2_ARG2(__lsx_vpackod_b, q6, q7, q4, q5, tmp4, tmp5);
+
+ DUP2_ARG2(__lsx_vpackev_b, q2, q3, q0, q1, q5, q7);
+ DUP2_ARG2(__lsx_vpackod_b, q2, q3, q0, q1, tmp6, tmp7);
+
+ DUP2_ARG2(__lsx_vpackev_h, tmp1, tmp0, q7, q5, tmp2, tmp3);
+ q0 = __lsx_vpackev_w(tmp3, tmp2);
+ q4 = __lsx_vpackod_w(tmp3, tmp2);
+
+ tmp2 = __lsx_vpackod_h(tmp1, tmp0);
+ tmp3 = __lsx_vpackod_h(q7, q5);
+ q2 = __lsx_vpackev_w(tmp3, tmp2);
+ q6 = __lsx_vpackod_w(tmp3, tmp2);
+
+ DUP2_ARG2(__lsx_vpackev_h, tmp5, tmp4, tmp7, tmp6, tmp2, tmp3);
+ q1 = __lsx_vpackev_w(tmp3, tmp2);
+ q5 = __lsx_vpackod_w(tmp3, tmp2);
+
+ tmp2 = __lsx_vpackod_h(tmp5, tmp4);
+ tmp3 = __lsx_vpackod_h(tmp7, tmp6);
+ q3 = __lsx_vpackev_w(tmp3, tmp2);
+ q7 = __lsx_vpackod_w(tmp3, tmp2);
+
+ LSX_ST_8(p7, p6, p5, p4, p3, p2, p1, p0, output, out_stride,
+ out_stride2, out_stride3, out_stride4);
+ output += out_stride4;
+ LSX_ST_8(q0, q1, q2, q3, q4, q5, q6, q7, output, out_stride,
+ out_stride2, out_stride3, out_stride4);
+}
+
+static int32_t vp9_vt_lpf_t4_and_t8_8w(uint8_t *src, uint8_t *filter48,
+ uint8_t *src_org, int32_t pitch_org,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
+ __m128i flat, mask, hev, thresh, b_limit, limit;
+ __m128i p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
+ __m128i p2_filt8_l, p1_filt8_l, p0_filt8_l;
+ __m128i q0_filt8_l, q1_filt8_l, q2_filt8_l;
+ __m128i vec0, vec1, vec2, vec3;
+ __m128i zero = __lsx_vldi(0);
+
+ /* load vector elements */
+ DUP4_ARG2(__lsx_vld, src, -64, src, -48, src, -32, src, -16,
+ p3, p2, p1, p0);
+ DUP4_ARG2(__lsx_vld, src, 0, src, 16, src, 32, src, 48, q0, q1, q2, q3);
+
+ thresh = __lsx_vreplgr2vr_b(thresh_ptr);
+ b_limit = __lsx_vreplgr2vr_b(b_limit_ptr);
+ limit = __lsx_vreplgr2vr_b(limit_ptr);
+
+ /* mask and hev */
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ /* flat4 */
+ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ /* filter4 */
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+ q1_out);
+
+ flat = __lsx_vilvl_d(zero, flat);
+
+ /* if flat is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat)) {
+ DUP2_ARG2(__lsx_vilvl_b, p0_out, p1_out, q1_out, q0_out, vec0, vec1);
+ vec2 = __lsx_vilvl_h(vec1, vec0);
+ vec3 = __lsx_vilvh_h(vec1, vec0);
+
+ src_org -= 2;
+ __lsx_vstelm_w(vec2, src_org, 0, 0);
+ src_org += pitch_org;
+ __lsx_vstelm_w(vec2, src_org, 0, 1);
+ src_org += pitch_org;
+ __lsx_vstelm_w(vec2, src_org, 0, 2);
+ src_org += pitch_org;
+ __lsx_vstelm_w(vec2, src_org, 0, 3);
+ src_org += pitch_org;
+ __lsx_vstelm_w(vec3, src_org, 0, 0);
+ src_org += pitch_org;
+ __lsx_vstelm_w(vec3, src_org, 0, 1);
+ src_org += pitch_org;
+ __lsx_vstelm_w(vec3, src_org, 0, 2);
+ src_org += pitch_org;
+ __lsx_vstelm_w(vec3, src_org, 0, 3);
+ return 1;
+ } else {
+ DUP4_ARG2(__lsx_vilvl_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_l, p2_l, p1_l, p0_l);
+ DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_l, q1_l, q2_l, q3_l);
+ VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+ p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
+
+ /* convert 16 bit output data into 8 bit */
+ p2_l = __lsx_vpickev_b(p2_filt8_l, p2_filt8_l);
+ p1_l = __lsx_vpickev_b(p1_filt8_l, p1_filt8_l);
+ p0_l = __lsx_vpickev_b(p0_filt8_l, p0_filt8_l);
+ q0_l = __lsx_vpickev_b(q0_filt8_l, q0_filt8_l);
+ q1_l = __lsx_vpickev_b(q1_filt8_l, q1_filt8_l);
+ q2_l = __lsx_vpickev_b(q2_filt8_l, q2_filt8_l);
+
+ /* store pixel values */
+ p2_out = __lsx_vbitsel_v(p2, p2_l, flat);
+ p1_out = __lsx_vbitsel_v(p1_out, p1_l, flat);
+ p0_out = __lsx_vbitsel_v(p0_out, p0_l, flat);
+ q0_out = __lsx_vbitsel_v(q0_out, q0_l, flat);
+ q1_out = __lsx_vbitsel_v(q1_out, q1_l, flat);
+ q2_out = __lsx_vbitsel_v(q2, q2_l, flat);
+
+ __lsx_vst(p2_out, filter48, 0);
+ __lsx_vst(p1_out, filter48, 16);
+ __lsx_vst(p0_out, filter48, 32);
+ __lsx_vst(q0_out, filter48, 48);
+ __lsx_vst(q1_out, filter48, 64);
+ __lsx_vst(q2_out, filter48, 80);
+ __lsx_vst(flat, filter48, 96);
+
+ return 0;
+ }
+}
+
+static int32_t vp9_vt_lpf_t16_8w(uint8_t *dst, uint8_t *dst_org,
+ ptrdiff_t stride,
+ uint8_t *filter48)
+{
+ __m128i zero = __lsx_vldi(0);
+ __m128i filter8, flat, flat2;
+ __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
+ v8u16 p7_l_in, p6_l_in, p5_l_in, p4_l_in;
+ v8u16 p3_l_in, p2_l_in, p1_l_in, p0_l_in;
+ v8u16 q7_l_in, q6_l_in, q5_l_in, q4_l_in;
+ v8u16 q3_l_in, q2_l_in, q1_l_in, q0_l_in;
+ v8u16 tmp0_l, tmp1_l;
+ __m128i out_l;
+ uint8_t *dst_tmp = dst - 128;
+
+ /* load vector elements */
+ DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32,
+ dst_tmp, 48, p7, p6, p5, p4);
+ DUP4_ARG2(__lsx_vld, dst_tmp, 64, dst_tmp, 80, dst_tmp, 96,
+ dst_tmp, 112, p3, p2, p1, p0);
+ DUP4_ARG2(__lsx_vld, dst, 0, dst, 16, dst, 32, dst, 48, q0, q1, q2, q3);
+ DUP4_ARG2(__lsx_vld, dst, 64, dst, 80, dst, 96, dst, 112, q4, q5, q6, q7);
+
+ flat = __lsx_vld(filter48, 96);
+
+
+ VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+
+ /* if flat2 is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat2)) {
+ __m128i vec0, vec1, vec2, vec3, vec4;
+
+ DUP4_ARG2(__lsx_vld, filter48, 0, filter48, 16, filter48, 32,
+ filter48, 48, p2, p1, p0, q0);
+ DUP2_ARG2(__lsx_vld, filter48, 64, filter48, 80, q1, q2);
+
+ DUP2_ARG2(__lsx_vilvl_b, p1, p2, q0, p0, vec0, vec1);
+ vec3 = __lsx_vilvl_h(vec1, vec0);
+ vec4 = __lsx_vilvh_h(vec1, vec0);
+ vec2 = __lsx_vilvl_b(q2, q1);
+
+ dst_org -= 3;
+ __lsx_vstelm_w(vec3, dst_org, 0, 0);
+ __lsx_vstelm_h(vec2, dst_org, 4, 0);
+ dst_org += stride;
+ __lsx_vstelm_w(vec3, dst_org, 0, 1);
+ __lsx_vstelm_h(vec2, dst_org, 4, 1);
+ dst_org += stride;
+ __lsx_vstelm_w(vec3, dst_org, 0, 2);
+ __lsx_vstelm_h(vec2, dst_org, 4, 2);
+ dst_org += stride;
+ __lsx_vstelm_w(vec3, dst_org, 0, 3);
+ __lsx_vstelm_h(vec2, dst_org, 4, 3);
+ dst_org += stride;
+ __lsx_vstelm_w(vec4, dst_org, 0, 0);
+ __lsx_vstelm_h(vec2, dst_org, 4, 4);
+ dst_org += stride;
+ __lsx_vstelm_w(vec4, dst_org, 0, 1);
+ __lsx_vstelm_h(vec2, dst_org, 4, 5);
+ dst_org += stride;
+ __lsx_vstelm_w(vec4, dst_org, 0, 2);
+ __lsx_vstelm_h(vec2, dst_org, 4, 6);
+ dst_org += stride;
+ __lsx_vstelm_w(vec4, dst_org, 0, 3);
+ __lsx_vstelm_h(vec2, dst_org, 4, 7);
+ return 1;
+ } else {
+ dst -= 7 * 16;
+
+ p7_l_in = (v8u16)__lsx_vilvl_b(zero, p7);
+ p6_l_in = (v8u16)__lsx_vilvl_b(zero, p6);
+ p5_l_in = (v8u16)__lsx_vilvl_b(zero, p5);
+ p4_l_in = (v8u16)__lsx_vilvl_b(zero, p4);
+ p3_l_in = (v8u16)__lsx_vilvl_b(zero, p3);
+ p2_l_in = (v8u16)__lsx_vilvl_b(zero, p2);
+ p1_l_in = (v8u16)__lsx_vilvl_b(zero, p1);
+ p0_l_in = (v8u16)__lsx_vilvl_b(zero, p0);
+ q0_l_in = (v8u16)__lsx_vilvl_b(zero, q0);
+
+ tmp0_l = p7_l_in << 3;
+ tmp0_l -= p7_l_in;
+ tmp0_l += p6_l_in;
+ tmp0_l += q0_l_in;
+ tmp1_l = p6_l_in + p5_l_in;
+ tmp1_l += p4_l_in;
+ tmp1_l += p3_l_in;
+ tmp1_l += p2_l_in;
+ tmp1_l += p1_l_in;
+ tmp1_l += p0_l_in;
+ tmp1_l += tmp0_l;
+
+ out_l =__lsx_vsrari_h((__m128i)tmp1_l, 4);
+ out_l =__lsx_vpickev_b(out_l, out_l);
+ p6 = __lsx_vbitsel_v(p6, out_l, flat2);
+ __lsx_vstelm_d(p6, dst, 0, 0);
+ dst += 16;
+
+ /* p5 */
+ q1_l_in = (v8u16)__lsx_vilvl_b(zero, q1);
+ tmp0_l = p5_l_in - p6_l_in;
+ tmp0_l += q1_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ out_l = __lsx_vpickev_b(out_l, out_l);
+ p5 = __lsx_vbitsel_v(p5, out_l, flat2);
+ __lsx_vstelm_d(p5, dst, 0, 0);
+ dst += 16;
+
+ /* p4 */
+ q2_l_in = (v8u16)__lsx_vilvl_b(zero, q2);
+ tmp0_l = p4_l_in - p5_l_in;
+ tmp0_l += q2_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ out_l = __lsx_vpickev_b(out_l, out_l);
+ p4 = __lsx_vbitsel_v(p4, out_l, flat2);
+ __lsx_vstelm_d(p4, dst, 0, 0);
+ dst += 16;
+
+ /* p3 */
+ q3_l_in = (v8u16)__lsx_vilvl_b(zero, q3);
+ tmp0_l = p3_l_in - p4_l_in;
+ tmp0_l += q3_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ out_l = __lsx_vpickev_b(out_l, out_l);
+ p3 = __lsx_vbitsel_v(p3, out_l, flat2);
+ __lsx_vstelm_d(p3, dst, 0, 0);
+ dst += 16;
+
+ /* p2 */
+ q4_l_in = (v8u16)__lsx_vilvl_b(zero, q4);
+ filter8 = __lsx_vld(filter48, 0);
+ tmp0_l = p2_l_in - p3_l_in;
+ tmp0_l += q4_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ out_l = __lsx_vpickev_b(out_l, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vstelm_d(filter8, dst, 0, 0);
+ dst += 16;
+
+ /* p1 */
+ q5_l_in = (v8u16)__lsx_vilvl_b(zero, q5);
+ filter8 = __lsx_vld(filter48, 16);
+ tmp0_l = p1_l_in - p2_l_in;
+ tmp0_l += q5_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ out_l = __lsx_vpickev_b(out_l, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vstelm_d(filter8, dst, 0, 0);
+ dst += 16;
+
+ /* p0 */
+ q6_l_in = (v8u16)__lsx_vilvl_b(zero, q6);
+ filter8 = __lsx_vld(filter48, 32);
+ tmp0_l = p0_l_in - p1_l_in;
+ tmp0_l += q6_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ out_l = __lsx_vpickev_b(out_l, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vstelm_d(filter8, dst, 0, 0);
+ dst += 16;
+
+ /* q0 */
+ q7_l_in = (v8u16)__lsx_vilvl_b(zero, q7);
+ filter8 = __lsx_vld(filter48, 48);
+ tmp0_l = q7_l_in - p0_l_in;
+ tmp0_l += q0_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((v8i16) tmp1_l, 4);
+ out_l = __lsx_vpickev_b(out_l, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vstelm_d(filter8, dst, 0, 0);
+ dst += 16;
+
+ /* q1 */
+ filter8 = __lsx_vld(filter48, 64);
+ tmp0_l = q7_l_in - q0_l_in;
+ tmp0_l += q1_l_in;
+ tmp0_l -= p6_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ out_l = __lsx_vpickev_b(out_l, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vstelm_d(filter8, dst, 0, 0);
+ dst += 16;
+
+ /* q2 */
+ filter8 = __lsx_vld(filter48, 80);
+ tmp0_l = q7_l_in - q1_l_in;
+ tmp0_l += q2_l_in;
+ tmp0_l -= p5_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ out_l = __lsx_vpickev_b(out_l, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vstelm_d(filter8, dst, 0, 0);
+ dst += 16;
+
+ /* q3 */
+ tmp0_l = q7_l_in - q2_l_in;
+ tmp0_l += q3_l_in;
+ tmp0_l -= p4_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ out_l = __lsx_vpickev_b(out_l, out_l);
+ q3 = __lsx_vbitsel_v(q3, out_l, flat2);
+ __lsx_vstelm_d(q3, dst, 0, 0);
+ dst += 16;
+
+ /* q4 */
+ tmp0_l = q7_l_in - q3_l_in;
+ tmp0_l += q4_l_in;
+ tmp0_l -= p3_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ out_l = __lsx_vpickev_b(out_l, out_l);
+ q4 = __lsx_vbitsel_v(q4, out_l, flat2);
+ __lsx_vstelm_d(q4, dst, 0, 0);
+ dst += 16;
+
+ /* q5 */
+ tmp0_l = q7_l_in - q4_l_in;
+ tmp0_l += q5_l_in;
+ tmp0_l -= p2_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ out_l = __lsx_vpickev_b(out_l, out_l);
+ q5 = __lsx_vbitsel_v(q5, out_l, flat2);
+ __lsx_vstelm_d(q5, dst, 0, 0);
+ dst += 16;
+
+ /* q6 */
+ tmp0_l = q7_l_in - q5_l_in;
+ tmp0_l += q6_l_in;
+ tmp0_l -= p1_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ out_l = __lsx_vpickev_b(out_l, out_l);
+ q6 = __lsx_vbitsel_v(q6, out_l, flat2);
+ __lsx_vstelm_d(q6, dst, 0, 0);
+
+ return 0;
+ }
+}
+
+void ff_loop_filter_h_16_8_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ uint8_t early_exit = 0;
+ uint8_t transposed_input[16 * 24] __attribute__ ((aligned(16)));
+ uint8_t *filter48 = &transposed_input[16 * 16];
+
+ vp9_transpose_16x8_to_8x16(dst - 8, stride, transposed_input);
+
+ early_exit = vp9_vt_lpf_t4_and_t8_8w((transposed_input + 16 * 8),
+ &filter48[0], dst, stride,
+ b_limit_ptr, limit_ptr, thresh_ptr);
+
+ if (0 == early_exit) {
+ early_exit = vp9_vt_lpf_t16_8w((transposed_input + 16 * 8), dst, stride,
+ &filter48[0]);
+
+ if (0 == early_exit) {
+ vp9_transpose_8x16_to_16x8(transposed_input, dst - 8, stride);
+ }
+ }
+}
+
+static int32_t vp9_vt_lpf_t4_and_t8_16w(uint8_t *dst, uint8_t *filter48,
+ uint8_t *dst_org, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ ptrdiff_t stride4 = stride2 << 1;
+ __m128i p3, p2, p1, p0, q3, q2, q1, q0;
+ __m128i p2_out, p1_out, p0_out, q0_out, q1_out, q2_out;
+ __m128i flat, mask, hev, thresh, b_limit, limit;
+ __m128i p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l;
+ __m128i p3_h, p2_h, p1_h, p0_h, q0_h, q1_h, q2_h, q3_h;
+ __m128i p2_filt8_l, p1_filt8_l, p0_filt8_l;
+ __m128i q0_filt8_l, q1_filt8_l, q2_filt8_l;
+ __m128i p2_filt8_h, p1_filt8_h, p0_filt8_h;
+ __m128i q0_filt8_h, q1_filt8_h, q2_filt8_h;
+ __m128i vec0, vec1, vec2, vec3, vec4, vec5;
+ __m128i zero = __lsx_vldi(0);
+
+ /* load vector elements */
+ DUP4_ARG2(__lsx_vld, dst, -64, dst, -48, dst, -32, dst, -16,
+ p3, p2, p1, p0);
+ DUP4_ARG2(__lsx_vld, dst, 0, dst, 16, dst, 32, dst, 48, q0, q1, q2, q3);
+
+ thresh = __lsx_vreplgr2vr_b(thresh_ptr);
+ b_limit = __lsx_vreplgr2vr_b(b_limit_ptr);
+ limit = __lsx_vreplgr2vr_b(limit_ptr);
+
+ /* mask and hev */
+ LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
+ hev, mask, flat);
+ /* flat4 */
+ VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat);
+ /* filter4 */
+ VP9_LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev, p1_out, p0_out, q0_out,
+ q1_out);
+
+ /* if flat is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat)) {
+ DUP2_ARG2(__lsx_vilvl_b, p0_out, p1_out, q1_out, q0_out, vec0, vec1);
+ vec2 = __lsx_vilvl_h(vec1, vec0);
+ vec3 = __lsx_vilvh_h(vec1, vec0);
+ DUP2_ARG2(__lsx_vilvh_b, p0_out, p1_out, q1_out, q0_out, vec0, vec1);
+ vec4 = __lsx_vilvl_h(vec1, vec0);
+ vec5 = __lsx_vilvh_h(vec1, vec0);
+
+ dst_org -= 2;
+ __lsx_vstelm_w(vec2, dst_org, 0, 0);
+ __lsx_vstelm_w(vec2, dst_org + stride, 0, 1);
+ __lsx_vstelm_w(vec2, dst_org + stride2, 0, 2);
+ __lsx_vstelm_w(vec2, dst_org + stride3, 0, 3);
+ dst_org += stride4;
+ __lsx_vstelm_w(vec3, dst_org, 0, 0);
+ __lsx_vstelm_w(vec3, dst_org + stride, 0, 1);
+ __lsx_vstelm_w(vec3, dst_org + stride2, 0, 2);
+ __lsx_vstelm_w(vec3, dst_org + stride3, 0, 3);
+ dst_org += stride4;
+ __lsx_vstelm_w(vec4, dst_org, 0, 0);
+ __lsx_vstelm_w(vec4, dst_org + stride, 0, 1);
+ __lsx_vstelm_w(vec4, dst_org + stride2, 0, 2);
+ __lsx_vstelm_w(vec4, dst_org + stride3, 0, 3);
+ dst_org += stride4;
+ __lsx_vstelm_w(vec5, dst_org, 0, 0);
+ __lsx_vstelm_w(vec5, dst_org + stride, 0, 1);
+ __lsx_vstelm_w(vec5, dst_org + stride2, 0, 2);
+ __lsx_vstelm_w(vec5, dst_org + stride3, 0, 3);
+
+ return 1;
+ } else {
+ DUP4_ARG2(__lsx_vilvl_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_l, p2_l, p1_l, p0_l);
+ DUP4_ARG2(__lsx_vilvl_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_l, q1_l, q2_l, q3_l);
+ VP9_FILTER8(p3_l, p2_l, p1_l, p0_l, q0_l, q1_l, q2_l, q3_l, p2_filt8_l,
+ p1_filt8_l, p0_filt8_l, q0_filt8_l, q1_filt8_l, q2_filt8_l);
+ DUP4_ARG2(__lsx_vilvh_b, zero, p3, zero, p2, zero, p1, zero, p0,
+ p3_h, p2_h, p1_h, p0_h);
+ DUP4_ARG2(__lsx_vilvh_b, zero, q0, zero, q1, zero, q2, zero, q3,
+ q0_h, q1_h, q2_h, q3_h);
+ VP9_FILTER8(p3_h, p2_h, p1_h, p0_h, q0_h, q1_h, q2_h, q3_h, p2_filt8_h,
+ p1_filt8_h, p0_filt8_h, q0_filt8_h, q1_filt8_h, q2_filt8_h);
+
+ /* convert 16 bit output data into 8 bit */
+ DUP4_ARG2(__lsx_vpickev_b, p2_filt8_h, p2_filt8_l, p1_filt8_h,
+ p1_filt8_l, p0_filt8_h, p0_filt8_l, q0_filt8_h,
+ q0_filt8_l, p2_filt8_l, p1_filt8_l, p0_filt8_l,
+ q0_filt8_l);
+ DUP2_ARG2(__lsx_vpickev_b, q1_filt8_h, q1_filt8_l, q2_filt8_h,
+ q2_filt8_l, q1_filt8_l, q2_filt8_l);
+
+ /* store pixel values */
+ p2_out = __lsx_vbitsel_v(p2, p2_filt8_l, flat);
+ p1_out = __lsx_vbitsel_v(p1_out, p1_filt8_l, flat);
+ p0_out = __lsx_vbitsel_v(p0_out, p0_filt8_l, flat);
+ q0_out = __lsx_vbitsel_v(q0_out, q0_filt8_l, flat);
+ q1_out = __lsx_vbitsel_v(q1_out, q1_filt8_l, flat);
+ q2_out = __lsx_vbitsel_v(q2, q2_filt8_l, flat);
+
+ __lsx_vst(p2_out, filter48, 0);
+ __lsx_vst(p1_out, filter48, 16);
+ __lsx_vst(p0_out, filter48, 32);
+ __lsx_vst(q0_out, filter48, 48);
+ __lsx_vst(q1_out, filter48, 64);
+ __lsx_vst(q2_out, filter48, 80);
+ __lsx_vst(flat, filter48, 96);
+
+ return 0;
+ }
+}
+
+static int32_t vp9_vt_lpf_t16_16w(uint8_t *dst, uint8_t *dst_org,
+ ptrdiff_t stride,
+ uint8_t *filter48)
+{
+ __m128i zero = __lsx_vldi(0);
+ __m128i flat, flat2, filter8;
+ __m128i p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7;
+ v8u16 p7_l_in, p6_l_in, p5_l_in, p4_l_in;
+ v8u16 p3_l_in, p2_l_in, p1_l_in, p0_l_in;
+ v8u16 q7_l_in, q6_l_in, q5_l_in, q4_l_in;
+ v8u16 q3_l_in, q2_l_in, q1_l_in, q0_l_in;
+ v8u16 p7_h_in, p6_h_in, p5_h_in, p4_h_in;
+ v8u16 p3_h_in, p2_h_in, p1_h_in, p0_h_in;
+ v8u16 q7_h_in, q6_h_in, q5_h_in, q4_h_in;
+ v8u16 q3_h_in, q2_h_in, q1_h_in, q0_h_in;
+ v8u16 tmp0_l, tmp1_l, tmp0_h, tmp1_h;
+ __m128i out_l, out_h;
+ uint8_t *dst_tmp = dst - 128;
+
+ flat = __lsx_vld(filter48, 96);
+
+ DUP4_ARG2(__lsx_vld, dst_tmp, 0, dst_tmp, 16, dst_tmp, 32,
+ dst_tmp, 48, p7, p6, p5, p4);
+ DUP4_ARG2(__lsx_vld, dst_tmp, 64, dst_tmp, 80, dst_tmp, 96,
+ dst_tmp, 112, p3, p2, p1, p0);
+ DUP4_ARG2(__lsx_vld, dst, 0, dst, 16, dst, 32, dst, 48, q0, q1, q2, q3);
+ DUP4_ARG2(__lsx_vld, dst, 64, dst, 80, dst, 96, dst, 112, q4, q5, q6, q7);
+
+ VP9_FLAT5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, flat, flat2);
+
+ /* if flat2 is zero for all pixels, then no need to calculate other filter */
+ if (__lsx_bz_v(flat2)) {
+ __m128i vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7;
+
+ DUP4_ARG2(__lsx_vld, filter48, 0, filter48, 16, filter48, 32,
+ filter48, 48, p2, p1, p0, q0);
+ DUP2_ARG2(__lsx_vld, filter48, 64, filter48, 80, q1, q2);
+
+ DUP2_ARG2(__lsx_vilvl_b, p1, p2, q0, p0, vec0, vec1);
+ vec3 = __lsx_vilvl_h(vec1, vec0);
+ vec4 = __lsx_vilvh_h(vec1, vec0);
+ DUP2_ARG2(__lsx_vilvh_b, p1, p2, q0, p0, vec0, vec1);
+ vec6 = __lsx_vilvl_h(vec1, vec0);
+ vec7 = __lsx_vilvh_h(vec1, vec0);
+ vec2 = __lsx_vilvl_b(q2, q1);
+ vec5 = __lsx_vilvh_b(q2, q1);
+
+ dst_org -= 3;
+ __lsx_vstelm_w(vec3, dst_org, 0, 0);
+ __lsx_vstelm_h(vec2, dst_org, 4, 0);
+ dst_org += stride;
+ __lsx_vstelm_w(vec3, dst_org, 0, 1);
+ __lsx_vstelm_h(vec2, dst_org, 4, 1);
+ dst_org += stride;
+ __lsx_vstelm_w(vec3, dst_org, 0, 2);
+ __lsx_vstelm_h(vec2, dst_org, 4, 2);
+ dst_org += stride;
+ __lsx_vstelm_w(vec3, dst_org, 0, 3);
+ __lsx_vstelm_h(vec2, dst_org, 4, 3);
+ dst_org += stride;
+ __lsx_vstelm_w(vec4, dst_org, 0, 0);
+ __lsx_vstelm_h(vec2, dst_org, 4, 4);
+ dst_org += stride;
+ __lsx_vstelm_w(vec4, dst_org, 0, 1);
+ __lsx_vstelm_h(vec2, dst_org, 4, 5);
+ dst_org += stride;
+ __lsx_vstelm_w(vec4, dst_org, 0, 2);
+ __lsx_vstelm_h(vec2, dst_org, 4, 6);
+ dst_org += stride;
+ __lsx_vstelm_w(vec4, dst_org, 0, 3);
+ __lsx_vstelm_h(vec2, dst_org, 4, 7);
+ dst_org += stride;
+ __lsx_vstelm_w(vec6, dst_org, 0, 0);
+ __lsx_vstelm_h(vec5, dst_org, 4, 0);
+ dst_org += stride;
+ __lsx_vstelm_w(vec6, dst_org, 0, 1);
+ __lsx_vstelm_h(vec5, dst_org, 4, 1);
+ dst_org += stride;
+ __lsx_vstelm_w(vec6, dst_org, 0, 2);
+ __lsx_vstelm_h(vec5, dst_org, 4, 2);
+ dst_org += stride;
+ __lsx_vstelm_w(vec6, dst_org, 0, 3);
+ __lsx_vstelm_h(vec5, dst_org, 4, 3);
+ dst_org += stride;
+ __lsx_vstelm_w(vec7, dst_org, 0, 0);
+ __lsx_vstelm_h(vec5, dst_org, 4, 4);
+ dst_org += stride;
+ __lsx_vstelm_w(vec7, dst_org, 0, 1);
+ __lsx_vstelm_h(vec5, dst_org, 4, 5);
+ dst_org += stride;
+ __lsx_vstelm_w(vec7, dst_org, 0, 2);
+ __lsx_vstelm_h(vec5, dst_org, 4, 6);
+ dst_org += stride;
+ __lsx_vstelm_w(vec7, dst_org, 0, 3);
+ __lsx_vstelm_h(vec5, dst_org, 4, 7);
+
+ return 1;
+ } else {
+ dst -= 7 * 16;
+
+ p7_l_in = (v8u16)__lsx_vilvl_b(zero, p7);
+ p6_l_in = (v8u16)__lsx_vilvl_b(zero, p6);
+ p5_l_in = (v8u16)__lsx_vilvl_b(zero, p5);
+ p4_l_in = (v8u16)__lsx_vilvl_b(zero, p4);
+ p3_l_in = (v8u16)__lsx_vilvl_b(zero, p3);
+ p2_l_in = (v8u16)__lsx_vilvl_b(zero, p2);
+ p1_l_in = (v8u16)__lsx_vilvl_b(zero, p1);
+ p0_l_in = (v8u16)__lsx_vilvl_b(zero, p0);
+ q0_l_in = (v8u16)__lsx_vilvl_b(zero, q0);
+
+ tmp0_l = p7_l_in << 3;
+ tmp0_l -= p7_l_in;
+ tmp0_l += p6_l_in;
+ tmp0_l += q0_l_in;
+ tmp1_l = p6_l_in + p5_l_in;
+ tmp1_l += p4_l_in;
+ tmp1_l += p3_l_in;
+ tmp1_l += p2_l_in;
+ tmp1_l += p1_l_in;
+ tmp1_l += p0_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+
+ p7_h_in = (v8u16)__lsx_vilvh_b(zero, p7);
+ p6_h_in = (v8u16)__lsx_vilvh_b(zero, p6);
+ p5_h_in = (v8u16)__lsx_vilvh_b(zero, p5);
+ p4_h_in = (v8u16)__lsx_vilvh_b(zero, p4);
+ p3_h_in = (v8u16)__lsx_vilvh_b(zero, p3);
+ p2_h_in = (v8u16)__lsx_vilvh_b(zero, p2);
+ p1_h_in = (v8u16)__lsx_vilvh_b(zero, p1);
+ p0_h_in = (v8u16)__lsx_vilvh_b(zero, p0);
+ q0_h_in = (v8u16)__lsx_vilvh_b(zero, q0);
+
+ tmp0_h = p7_h_in << 3;
+ tmp0_h -= p7_h_in;
+ tmp0_h += p6_h_in;
+ tmp0_h += q0_h_in;
+ tmp1_h = p6_h_in + p5_h_in;
+ tmp1_h += p4_h_in;
+ tmp1_h += p3_h_in;
+ tmp1_h += p2_h_in;
+ tmp1_h += p1_h_in;
+ tmp1_h += p0_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ p6 = __lsx_vbitsel_v(p6, out_l, flat2);
+ __lsx_vst(p6, dst, 0);
+
+ /* p5 */
+ q1_l_in = (v8u16)__lsx_vilvl_b(zero, q1);
+ tmp0_l = p5_l_in - p6_l_in;
+ tmp0_l += q1_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ q1_h_in = (v8u16)__lsx_vilvh_b(zero, q1);
+ tmp0_h = p5_h_in - p6_h_in;
+ tmp0_h += q1_h_in;
+ tmp0_h -= p7_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ p5 = __lsx_vbitsel_v(p5, out_l, flat2);
+ __lsx_vst(p5, dst, 16);
+
+ /* p4 */
+ q2_l_in = (v8u16)__lsx_vilvl_b(zero, q2);
+ tmp0_l = p4_l_in - p5_l_in;
+ tmp0_l += q2_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ q2_h_in = (v8u16)__lsx_vilvh_b(zero, q2);
+ tmp0_h = p4_h_in - p5_h_in;
+ tmp0_h += q2_h_in;
+ tmp0_h -= p7_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ p4 = __lsx_vbitsel_v(p4, out_l, flat2);
+ __lsx_vst(p4, dst, 16*2);
+
+ /* p3 */
+ q3_l_in = (v8u16)__lsx_vilvl_b(zero, q3);
+ tmp0_l = p3_l_in - p4_l_in;
+ tmp0_l += q3_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ q3_h_in = (v8u16)__lsx_vilvh_b(zero, q3);
+ tmp0_h = p3_h_in - p4_h_in;
+ tmp0_h += q3_h_in;
+ tmp0_h -= p7_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ p3 = __lsx_vbitsel_v(p3, out_l, flat2);
+ __lsx_vst(p3, dst, 16*3);
+
+ /* p2 */
+ q4_l_in = (v8u16)__lsx_vilvl_b(zero, q4);
+ filter8 = __lsx_vld(filter48, 0);
+ tmp0_l = p2_l_in - p3_l_in;
+ tmp0_l += q4_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ q4_h_in = (v8u16)__lsx_vilvh_b(zero, q4);
+ tmp0_h = p2_h_in - p3_h_in;
+ tmp0_h += q4_h_in;
+ tmp0_h -= p7_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vst(filter8, dst, 16*4);
+
+ /* p1 */
+ q5_l_in = (v8u16)__lsx_vilvl_b(zero, q5);
+ filter8 = __lsx_vld(filter48, 16);
+ tmp0_l = p1_l_in - p2_l_in;
+ tmp0_l += q5_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ q5_h_in = (v8u16)__lsx_vilvh_b(zero, q5);
+ tmp0_h = p1_h_in - p2_h_in;
+ tmp0_h += q5_h_in;
+ tmp0_h -= p7_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)(tmp1_h), 4);
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vst(filter8, dst, 16*5);
+
+ /* p0 */
+ q6_l_in = (v8u16)__lsx_vilvl_b(zero, q6);
+ filter8 = __lsx_vld(filter48, 32);
+ tmp0_l = p0_l_in - p1_l_in;
+ tmp0_l += q6_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ q6_h_in = (v8u16)__lsx_vilvh_b(zero, q6);
+ tmp0_h = p0_h_in - p1_h_in;
+ tmp0_h += q6_h_in;
+ tmp0_h -= p7_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vst(filter8, dst, 16*6);
+
+ /* q0 */
+ q7_l_in = (v8u16)__lsx_vilvl_b(zero, q7);
+ filter8 = __lsx_vld(filter48, 48);
+ tmp0_l = q7_l_in - p0_l_in;
+ tmp0_l += q0_l_in;
+ tmp0_l -= p7_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ q7_h_in = (v8u16)__lsx_vilvh_b(zero, q7);
+ tmp0_h = q7_h_in - p0_h_in;
+ tmp0_h += q0_h_in;
+ tmp0_h -= p7_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vst(filter8, dst, 16*7);
+
+ /* q1 */
+ filter8 = __lsx_vld(filter48, 64);
+ tmp0_l = q7_l_in - q0_l_in;
+ tmp0_l += q1_l_in;
+ tmp0_l -= p6_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ tmp0_h = q7_h_in - q0_h_in;
+ tmp0_h += q1_h_in;
+ tmp0_h -= p6_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vst(filter8, dst, 16*8);
+
+ /* q2 */
+ filter8 = __lsx_vld(filter48, 80);
+ tmp0_l = q7_l_in - q1_l_in;
+ tmp0_l += q2_l_in;
+ tmp0_l -= p5_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ tmp0_h = q7_h_in - q1_h_in;
+ tmp0_h += q2_h_in;
+ tmp0_h -= p5_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ filter8 = __lsx_vbitsel_v(filter8, out_l, flat2);
+ __lsx_vst(filter8, dst, 16*9);
+
+ /* q3 */
+ tmp0_l = q7_l_in - q2_l_in;
+ tmp0_l += q3_l_in;
+ tmp0_l -= p4_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ tmp0_h = q7_h_in - q2_h_in;
+ tmp0_h += q3_h_in;
+ tmp0_h -= p4_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ q3 = __lsx_vbitsel_v(q3, out_l, flat2);
+ __lsx_vst(q3, dst, 16*10);
+
+ /* q4 */
+ tmp0_l = q7_l_in - q3_l_in;
+ tmp0_l += q4_l_in;
+ tmp0_l -= p3_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ tmp0_h = q7_h_in - q3_h_in;
+ tmp0_h += q4_h_in;
+ tmp0_h -= p3_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ q4 = __lsx_vbitsel_v(q4, out_l, flat2);
+ __lsx_vst(q4, dst, 16*11);
+
+ /* q5 */
+ tmp0_l = q7_l_in - q4_l_in;
+ tmp0_l += q5_l_in;
+ tmp0_l -= p2_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ tmp0_h = q7_h_in - q4_h_in;
+ tmp0_h += q5_h_in;
+ tmp0_h -= p2_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ q5 = __lsx_vbitsel_v(q5, out_l, flat2);
+ __lsx_vst(q5, dst, 16*12);
+
+ /* q6 */
+ tmp0_l = q7_l_in - q5_l_in;
+ tmp0_l += q6_l_in;
+ tmp0_l -= p1_l_in;
+ tmp1_l += tmp0_l;
+ out_l = __lsx_vsrari_h((__m128i)tmp1_l, 4);
+ tmp0_h = q7_h_in - q5_h_in;
+ tmp0_h += q6_h_in;
+ tmp0_h -= p1_h_in;
+ tmp1_h += tmp0_h;
+ out_h = __lsx_vsrari_h((__m128i)tmp1_h, 4);
+ out_l = __lsx_vpickev_b(out_h, out_l);
+ q6 = __lsx_vbitsel_v(q6, out_l, flat2);
+ __lsx_vst(q6, dst, 16*13);
+
+ return 0;
+ }
+}
+
+void ff_loop_filter_h_16_16_lsx(uint8_t *dst, ptrdiff_t stride,
+ int32_t b_limit_ptr,
+ int32_t limit_ptr,
+ int32_t thresh_ptr)
+{
+ uint8_t early_exit = 0;
+ uint8_t transposed_input[16 * 24] __attribute__ ((aligned(16)));
+ uint8_t *filter48 = &transposed_input[16 * 16];
+
+ vp9_transpose_16x16((dst - 8), stride, &transposed_input[0], 16);
+
+ early_exit = vp9_vt_lpf_t4_and_t8_16w((transposed_input + 16 * 8),
+ &filter48[0], dst, stride,
+ b_limit_ptr, limit_ptr, thresh_ptr);
+
+ if (0 == early_exit) {
+ early_exit = vp9_vt_lpf_t16_16w((transposed_input + 16 * 8), dst,
+ stride, &filter48[0]);
+
+ if (0 == early_exit) {
+ vp9_transpose_16x16(transposed_input, 16, (dst - 8), stride);
+ }
+ }
+}
diff --git a/libavcodec/loongarch/vp9dsp_init_loongarch.c b/libavcodec/loongarch/vp9dsp_init_loongarch.c
index c1e01b4558..e49625ad5f 100644
--- a/libavcodec/loongarch/vp9dsp_init_loongarch.c
+++ b/libavcodec/loongarch/vp9dsp_init_loongarch.c
@@ -71,6 +71,15 @@
dsp->intra_pred[tx][TOP_DC_PRED] = ff_dc_top_##sz##_lsx; \
dsp->intra_pred[tx][TM_VP8_PRED] = ff_tm_##sz##_lsx; \
+#define init_idct(tx, nm) \
+ dsp->itxfm_add[tx][DCT_DCT] = \
+ dsp->itxfm_add[tx][ADST_DCT] = \
+ dsp->itxfm_add[tx][DCT_ADST] = \
+ dsp->itxfm_add[tx][ADST_ADST] = nm##_add_lsx;
+
+#define init_itxfm(tx, sz) \
+ dsp->itxfm_add[tx][DCT_DCT] = ff_idct_idct_##sz##_add_lsx;
+
av_cold void ff_vp9dsp_init_loongarch(VP9DSPContext *dsp, int bpp)
{
int cpu_flags = av_get_cpu_flags();
@@ -86,8 +95,30 @@ av_cold void ff_vp9dsp_init_loongarch(VP9DSPContext *dsp, int bpp)
init_intra_pred1_lsx(TX_32X32, 32x32);
init_intra_pred2_lsx(TX_4X4, 4x4);
init_intra_pred2_lsx(TX_8X8, 8x8);
+ init_itxfm(TX_8X8, 8x8);
+ init_itxfm(TX_16X16, 16x16);
+ init_idct(TX_32X32, ff_idct_idct_32x32);
+ dsp->loop_filter_8[0][0] = ff_loop_filter_h_4_8_lsx;
+ dsp->loop_filter_8[0][1] = ff_loop_filter_v_4_8_lsx;
+ dsp->loop_filter_8[1][0] = ff_loop_filter_h_8_8_lsx;
+ dsp->loop_filter_8[1][1] = ff_loop_filter_v_8_8_lsx;
+ dsp->loop_filter_8[2][0] = ff_loop_filter_h_16_8_lsx;
+ dsp->loop_filter_8[2][1] = ff_loop_filter_v_16_8_lsx;
+
+ dsp->loop_filter_16[0] = ff_loop_filter_h_16_16_lsx;
+ dsp->loop_filter_16[1] = ff_loop_filter_v_16_16_lsx;
+
+ dsp->loop_filter_mix2[0][0][0] = ff_loop_filter_h_44_16_lsx;
+ dsp->loop_filter_mix2[0][0][1] = ff_loop_filter_v_44_16_lsx;
+ dsp->loop_filter_mix2[0][1][0] = ff_loop_filter_h_48_16_lsx;
+ dsp->loop_filter_mix2[0][1][1] = ff_loop_filter_v_48_16_lsx;
+ dsp->loop_filter_mix2[1][0][0] = ff_loop_filter_h_84_16_lsx;
+ dsp->loop_filter_mix2[1][0][1] = ff_loop_filter_v_84_16_lsx;
+ dsp->loop_filter_mix2[1][1][0] = ff_loop_filter_h_88_16_lsx;
+ dsp->loop_filter_mix2[1][1][1] = ff_loop_filter_v_88_16_lsx;
}
}
+
#undef init_subpel1
#undef init_subpel2
#undef init_subpel3
@@ -95,3 +126,5 @@ av_cold void ff_vp9dsp_init_loongarch(VP9DSPContext *dsp, int bpp)
#undef init_fpel
#undef init_intra_pred1_lsx
#undef init_intra_pred2_lsx
+#undef init_idct
+#undef init_itxfm
diff --git a/libavcodec/loongarch/vp9dsp_loongarch.h b/libavcodec/loongarch/vp9dsp_loongarch.h
index b469326fdc..3cc918a18c 100644
--- a/libavcodec/loongarch/vp9dsp_loongarch.h
+++ b/libavcodec/loongarch/vp9dsp_loongarch.h
@@ -140,5 +140,43 @@ void ff_tm_16x16_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
const uint8_t *top);
void ff_tm_32x32_lsx(uint8_t *dst, ptrdiff_t stride, const uint8_t *left,
const uint8_t *top);
+void ff_loop_filter_h_16_8_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_v_16_8_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_h_4_8_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_v_4_8_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_h_44_16_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_v_44_16_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_h_8_8_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_v_8_8_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_h_88_16_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_v_88_16_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_h_84_16_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_v_84_16_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_h_48_16_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_v_48_16_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_h_16_16_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_loop_filter_v_16_16_lsx(uint8_t *dst, ptrdiff_t stride, int32_t e,
+ int32_t i, int32_t h);
+void ff_idct_idct_8x8_add_lsx(uint8_t *dst, ptrdiff_t stride,
+ int16_t *block, int eob);
+void ff_idct_idct_16x16_add_lsx(uint8_t *dst, ptrdiff_t stride,
+ int16_t *block, int eob);
+void ff_idct_idct_32x32_add_lsx(uint8_t *dst, ptrdiff_t stride,
+ int16_t *block, int eob);
#endif /* AVCODEC_LOONGARCH_VP9DSP_LOONGARCH_H */
--
2.20.1
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 10+ messages in thread
* [FFmpeg-devel] [PATCH 4/4] avcodec: [loongarch] Optimize vc1dsp with LASX.
2021-12-18 14:27 [FFmpeg-devel] Optimize VP8,VP9,WMV3 decoding for loongarch Hao Chen
` (2 preceding siblings ...)
2021-12-18 14:27 ` [FFmpeg-devel] [PATCH 3/4] avcodec: [loongarch] Optimize vp9_lpf/idct " Hao Chen
@ 2021-12-18 14:27 ` Hao Chen
2021-12-20 8:37 ` [FFmpeg-devel] Optimize VP8,VP9,WMV3 decoding for loongarch Shiyou Yin
4 siblings, 0 replies; 10+ messages in thread
From: Hao Chen @ 2021-12-18 14:27 UTC (permalink / raw)
To: ffmpeg-devel
./ffmpeg -i 11_wmv3_720p_24fps_7Mbps.wmv -f rawvideo -y /dev/null -an
before:131fps
after :229fps
---
libavcodec/loongarch/Makefile | 2 +
libavcodec/loongarch/vc1dsp_init_loongarch.c | 67 ++
libavcodec/loongarch/vc1dsp_lasx.c | 1005 ++++++++++++++++++
libavcodec/loongarch/vc1dsp_loongarch.h | 79 ++
libavcodec/vc1dsp.c | 2 +
libavcodec/vc1dsp.h | 1 +
6 files changed, 1156 insertions(+)
create mode 100644 libavcodec/loongarch/vc1dsp_init_loongarch.c
create mode 100644 libavcodec/loongarch/vc1dsp_lasx.c
create mode 100644 libavcodec/loongarch/vc1dsp_loongarch.h
diff --git a/libavcodec/loongarch/Makefile b/libavcodec/loongarch/Makefile
index 4b83f20e92..baf5f92e84 100644
--- a/libavcodec/loongarch/Makefile
+++ b/libavcodec/loongarch/Makefile
@@ -4,12 +4,14 @@ OBJS-$(CONFIG_H264DSP) += loongarch/h264dsp_init_loongarch.o
OBJS-$(CONFIG_H264PRED) += loongarch/h264_intrapred_init_loongarch.o
OBJS-$(CONFIG_VP8_DECODER) += loongarch/vp8dsp_init_loongarch.o
OBJS-$(CONFIG_VP9_DECODER) += loongarch/vp9dsp_init_loongarch.o
+OBJS-$(CONFIG_VC1DSP) += loongarch/vc1dsp_init_loongarch.o
LASX-OBJS-$(CONFIG_H264CHROMA) += loongarch/h264chroma_lasx.o
LASX-OBJS-$(CONFIG_H264QPEL) += loongarch/h264qpel_lasx.o
LASX-OBJS-$(CONFIG_H264DSP) += loongarch/h264dsp_lasx.o \
loongarch/h264idct_lasx.o \
loongarch/h264_deblock_lasx.o
LASX-OBJS-$(CONFIG_H264PRED) += loongarch/h264_intrapred_lasx.o
+LASX-OBJS-$(CONFIG_VC1_DECODER) += loongarch/vc1dsp_lasx.o
LSX-OBJS-$(CONFIG_VP8_DECODER) += loongarch/vp8_mc_lsx.o \
loongarch/vp8_lpf_lsx.o
LSX-OBJS-$(CONFIG_VP9_DECODER) += loongarch/vp9_mc_lsx.o \
diff --git a/libavcodec/loongarch/vc1dsp_init_loongarch.c b/libavcodec/loongarch/vc1dsp_init_loongarch.c
new file mode 100644
index 0000000000..e72a4a3203
--- /dev/null
+++ b/libavcodec/loongarch/vc1dsp_init_loongarch.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hao Chen <chenhao@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/loongarch/cpu.h"
+#include "libavutil/attributes.h"
+#include "libavcodec/vc1dsp.h"
+#include "vc1dsp_loongarch.h"
+
+#define FN_ASSIGN(OP, X, Y, INSN) \
+ dsp->OP##vc1_mspel_pixels_tab[1][X+4*Y] = ff_##OP##vc1_mspel_mc##X##Y##INSN; \
+ dsp->OP##vc1_mspel_pixels_tab[0][X+4*Y] = ff_##OP##vc1_mspel_mc##X##Y##_16##INSN
+
+#define FN_ASSIGN_V(OP, Y, INSN) \
+ dsp->OP##vc1_mspel_pixels_tab[0][4*Y] = ff_##OP##vc1_mspel_mc0##Y##_16##INSN
+
+#define FN_ASSIGN_H(OP, X, INSN) \
+ dsp->OP##vc1_mspel_pixels_tab[0][X] = ff_##OP##vc1_mspel_mc##X##0_16##INSN
+
+av_cold void ff_vc1dsp_init_loongarch(VC1DSPContext *dsp)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (have_lasx(cpu_flags)) {
+ dsp->vc1_inv_trans_8x8 = ff_vc1_inv_trans_8x8_lasx;
+ dsp->vc1_inv_trans_4x8 = ff_vc1_inv_trans_4x8_lasx;
+ dsp->vc1_inv_trans_8x4 = ff_vc1_inv_trans_8x4_lasx;
+ dsp->vc1_inv_trans_4x4 = ff_vc1_inv_trans_4x4_lasx;
+ dsp->vc1_inv_trans_8x8_dc = ff_vc1_inv_trans_8x8_dc_lasx;
+ dsp->vc1_inv_trans_4x8_dc = ff_vc1_inv_trans_4x8_dc_lasx;
+ dsp->vc1_inv_trans_8x4_dc = ff_vc1_inv_trans_8x4_dc_lasx;
+ dsp->vc1_inv_trans_4x4_dc = ff_vc1_inv_trans_4x4_dc_lasx;
+ FN_ASSIGN(put_, 1, 1, _lasx);
+ FN_ASSIGN(put_, 1, 2, _lasx);
+ FN_ASSIGN(put_, 1, 3, _lasx);
+ FN_ASSIGN(put_, 2, 1, _lasx);
+ FN_ASSIGN(put_, 2, 2, _lasx);
+ FN_ASSIGN(put_, 2, 3, _lasx);
+ FN_ASSIGN(put_, 3, 1, _lasx);
+ FN_ASSIGN(put_, 3, 2, _lasx);
+ FN_ASSIGN(put_, 3, 3, _lasx);
+ FN_ASSIGN_V(put_, 1, _lasx);
+ FN_ASSIGN_V(put_, 2, _lasx);
+ FN_ASSIGN_V(put_, 3, _lasx);
+ FN_ASSIGN_H(put_, 1, _lasx);
+ FN_ASSIGN_H(put_, 2, _lasx);
+ FN_ASSIGN_H(put_, 3, _lasx);
+ dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = ff_put_no_rnd_vc1_chroma_mc8_lasx;
+ }
+}
diff --git a/libavcodec/loongarch/vc1dsp_lasx.c b/libavcodec/loongarch/vc1dsp_lasx.c
new file mode 100644
index 0000000000..40b8668f2b
--- /dev/null
+++ b/libavcodec/loongarch/vc1dsp_lasx.c
@@ -0,0 +1,1005 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hao Chen <chenhao@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "vc1dsp_loongarch.h"
+#include "libavutil/loongarch/loongson_intrinsics.h"
+
+void ff_vc1_inv_trans_8x8_lasx(int16_t block[64])
+{
+ int32_t con_4 = 4;
+ int32_t con_64 = 64;
+ __m256i in0, in1, in2, in3;
+ __m256i temp0, temp1, temp2, temp3, t1, t2, t3, t4, t5, t6, t7, t8;
+ __m256i const_1 = {0x000c000c000c000c, 0x000c000c000c000c,
+ 0x000c000c000c000c, 0x000c000c000c000c};
+ __m256i const_2 = {0xfff4000cfff4000c, 0xfff4000cfff4000c,
+ 0xfff4000cfff4000c, 0xfff4000cfff4000c};
+ __m256i const_3 = {0x0006001000060010, 0x0006001000060010,
+ 0x0006001000060010, 0x0006001000060010};
+ __m256i const_4 = {0xfff00006fff00006, 0xfff00006fff00006,
+ 0xfff00006fff00006, 0xfff00006fff00006};
+ __m256i const_5 = {0x000f0010000f0010, 0x000f0010000f0010,
+ 0x000f0010000f0010, 0x000f0010000f0010};
+ __m256i const_6 = {0x0004000900040009, 0x0004000900040009,
+ 0x0004000900040009, 0x0004000900040009};
+ __m256i const_7 = {0xfffc000ffffc000f, 0xfffc000ffffc000f,
+ 0xfffc000ffffc000f, 0xfffc000ffffc000f};
+ __m256i const_8 = {0xfff7fff0fff7fff0, 0xfff7fff0fff7fff0,
+ 0xfff7fff0fff7fff0, 0xfff7fff0fff7fff0};
+ __m256i const_9 = {0xfff00009fff00009, 0xfff00009fff00009,
+ 0xfff00009fff00009, 0xfff00009fff00009};
+ __m256i const_10 = {0x000f0004000f0004, 0x000f0004000f0004,
+ 0x000f0004000f0004, 0x000f0004000f0004};
+ __m256i const_11 = {0xfff70004fff70004, 0xfff70004fff70004,
+ 0xfff70004fff70004, 0xfff70004fff70004};
+ __m256i const_12 = {0xfff0000ffff0000f, 0xfff0000ffff0000f,
+ 0xfff0000ffff0000f, 0xfff0000ffff0000f};
+
+ DUP4_ARG2(__lasx_xvld, block, 0, block, 32, block, 64, block, 96,
+ in0, in1, in2, in3);
+ DUP4_ARG2(__lasx_xvpermi_d, in0, 0xD8, in1, 0xD8, in2, 0xD8, in3, 0xD8,
+ in0, in1, in2, in3);
+ /* first loops */
+ DUP2_ARG2(__lasx_xvilvl_h, in2, in0, in3, in1, temp0, temp1);
+ t2 = __lasx_xvreplgr2vr_w(con_4);
+ DUP2_ARG3(__lasx_xvdp2add_w_h, t2, temp0, const_1, t2, temp0,
+ const_2, t1, t2);
+ DUP2_ARG2(__lasx_xvdp2_w_h, temp1, const_3, temp1, const_4, t3, t4);
+
+ t5 = __lasx_xvadd_w(t1, t3);
+ t6 = __lasx_xvadd_w(t2, t4);
+ t7 = __lasx_xvsub_w(t2, t4);
+ t8 = __lasx_xvsub_w(t1, t3);
+
+ DUP2_ARG2(__lasx_xvilvh_h, in1, in0, in3, in2, temp0, temp1);
+ temp2 = __lasx_xvdp2_w_h(const_5, temp0);
+ t1 = __lasx_xvdp2add_w_h(temp2, temp1, const_6);
+ temp2 = __lasx_xvdp2_w_h(const_7, temp0);
+ t2 = __lasx_xvdp2add_w_h(temp2, temp1, const_8);
+ temp2 = __lasx_xvdp2_w_h(const_9, temp0);
+ t3 = __lasx_xvdp2add_w_h(temp2, temp1, const_10);
+ temp2 = __lasx_xvdp2_w_h(const_11, temp0);
+ t4 = __lasx_xvdp2add_w_h(temp2, temp1, const_12);
+
+ DUP4_ARG2(__lasx_xvadd_w, t1, t5, t6, t2, t7, t3, t8, t4,
+ temp0, temp1, temp2, temp3);
+ DUP4_ARG2(__lasx_xvsub_w, t8, t4, t7, t3, t6, t2, t5, t1,
+ in0, in1, in2, in3);
+ DUP4_ARG2(__lasx_xvsrai_w, temp0, 3, temp1, 3, temp2, 3, temp3, 3,
+ temp0, temp1, temp2, temp3);
+ DUP4_ARG2(__lasx_xvsrai_w, in0, 3, in1, 3, in2, 3, in3, 3,
+ in0, in1, in2, in3);
+
+ /* second loops */
+ DUP4_ARG2(__lasx_xvpackev_h, temp1, temp0, temp3, temp2, in1, in0,
+ in3, in2, temp0, temp1, temp2, temp3);
+ DUP2_ARG2(__lasx_xvilvl_w, temp1, temp0, temp3, temp2, t1, t3);
+ DUP2_ARG2(__lasx_xvilvh_w, temp1, temp0, temp3, temp2, t2, t4);
+ DUP4_ARG3(__lasx_xvpermi_q, t3, t1, 0x20, t3, t1, 0x31, t4, t2, 0x20,
+ t4, t2, 0x31, in0, in1, in2, in3);
+ DUP2_ARG2(__lasx_xvilvl_h, in1, in0, in3, in2, temp0, temp1);
+ t3 = __lasx_xvreplgr2vr_w(con_64);
+ DUP2_ARG3(__lasx_xvdp2add_w_h, t3, temp0, const_1, t3, temp0,
+ const_2, t1, t2);
+ DUP2_ARG2(__lasx_xvdp2_w_h, temp1, const_3, temp1, const_4, t3, t4);
+
+ t5 = __lasx_xvadd_w(t1, t3);
+ t6 = __lasx_xvadd_w(t2, t4);
+ t7 = __lasx_xvsub_w(t2, t4);
+ t8 = __lasx_xvsub_w(t1, t3);
+
+ DUP2_ARG2(__lasx_xvilvh_h, in2, in0, in3, in1, temp0, temp1);
+ temp2 = __lasx_xvdp2_w_h(const_5, temp0);
+ t1 = __lasx_xvdp2add_w_h(temp2, temp1, const_6);
+ temp2 = __lasx_xvdp2_w_h(const_7, temp0);
+ t2 = __lasx_xvdp2add_w_h(temp2, temp1, const_8);
+ temp2 = __lasx_xvdp2_w_h(const_9, temp0);
+ t3 = __lasx_xvdp2add_w_h(temp2, temp1, const_10);
+ temp2 = __lasx_xvdp2_w_h(const_11, temp0);
+ t4 = __lasx_xvdp2add_w_h(temp2, temp1, const_12);
+
+ DUP4_ARG2(__lasx_xvadd_w, t5, t1, t6, t2, t7, t3, t8, t4,
+ temp0, temp1, temp2, temp3);
+ DUP4_ARG2(__lasx_xvsub_w, t8, t4, t7, t3, t6, t2, t5, t1,
+ in0, in1, in2, in3);
+ DUP4_ARG2(__lasx_xvaddi_wu, in0, 1, in1, 1, in2, 1, in3, 1,
+ in0, in1, in2, in3);
+ DUP4_ARG3(__lasx_xvsrani_h_w, temp1, temp0, 7, temp3, temp2, 7,
+ in1, in0, 7, in3, in2, 7, t1, t2, t3, t4);
+ DUP4_ARG2(__lasx_xvpermi_d, t1, 0xD8, t2, 0xD8, t3, 0xD8, t4, 0xD8,
+ in0, in1, in2, in3);
+ __lasx_xvst(in0, block, 0);
+ __lasx_xvst(in1, block, 32);
+ __lasx_xvst(in2, block, 64);
+ __lasx_xvst(in3, block, 96);
+}
+
+void ff_vc1_inv_trans_8x8_dc_lasx(uint8_t *dest, ptrdiff_t stride,
+ int16_t *block)
+{
+ int dc = block[0];
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ uint8_t *dst = dest + (stride2 << 1);
+ __m256i in0, in1, in2, in3, in4, in5, in6, in7;
+ __m256i const_dc, temp0, temp1, temp2, temp3;
+ __m256i reg0, reg1, reg2, reg3;
+
+ dc = (3 * dc + 1) >> 1;
+ dc = (3 * dc + 16) >> 5;
+
+ const_dc = __lasx_xvreplgr2vr_h(dc);
+ DUP4_ARG2(__lasx_xvldrepl_d, dest, 0, dest + stride, 0, dest + stride2,
+ 0, dest + stride3, 0, in0, in1, in2, in3);
+ DUP4_ARG2(__lasx_xvldrepl_d, dst, 0, dst + stride, 0, dst + stride2,
+ 0, dst + stride3, 0, in4, in5, in6, in7);
+
+ DUP4_ARG2(__lasx_xvilvl_d, in1, in0, in3, in2, in5, in4, in7, in6,
+ temp0, temp1, temp2, temp3);
+ DUP4_ARG1(__lasx_vext2xv_hu_bu, temp0, temp1, temp2, temp3,
+ temp0, temp1, temp2, temp3);
+
+ DUP4_ARG2(__lasx_xvadd_h, temp0, const_dc, temp1, const_dc, temp2,
+ const_dc, temp3, const_dc, reg0, reg1, reg2, reg3);
+ DUP2_ARG3(__lasx_xvssrarni_bu_h, reg1, reg0, 0, reg3, reg2, 0,
+ temp0, temp1);
+ __lasx_xvstelm_d(temp0, dest, 0, 0);
+ __lasx_xvstelm_d(temp0, dest + stride, 0, 2);
+ __lasx_xvstelm_d(temp0, dest + stride2, 0, 1);
+ __lasx_xvstelm_d(temp0, dest + stride3, 0, 3);
+ __lasx_xvstelm_d(temp1, dst, 0, 0);
+ __lasx_xvstelm_d(temp1, dst + stride, 0, 2);
+ __lasx_xvstelm_d(temp1, dst + stride2, 0, 1);
+ __lasx_xvstelm_d(temp1, dst + stride3, 0, 3);
+}
+
+void ff_vc1_inv_trans_8x4_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ __m256i shift = {0x0000000400000000, 0x0000000500000001,
+ 0x0000000600000002, 0x0000000700000003};
+ __m256i const_64 = {0x0000004000000040, 0x0000004000000040,
+ 0x0000004000000040, 0x0000004000000040};
+ __m256i const_1 = {0x00060010000C000C, 0x00060010000C000C,
+ 0x00060010000C000C, 0x00060010000C000C};
+ __m256i const_2 = {0xFFF00006FFF4000C, 0xFFF00006FFF4000C,
+ 0xFFF00006FFF4000C, 0xFFF00006FFF4000C};
+ __m256i const_3 = {0x0004000F00090010, 0x0004000F00090010,
+ 0x0004000F00090010, 0x0004000F00090010};
+ __m256i const_4 = {0xFFF7FFFCFFF0000F, 0xFFF7FFFCFFF0000F,
+ 0xFFF7FFFCFFF0000F, 0xFFF7FFFCFFF0000F};
+ __m256i const_5 = {0x000FFFF000040009, 0x000FFFF000040009,
+ 0x000FFFF000040009, 0x000FFFF000040009};
+ __m256i const_6 = {0xFFF0FFF7000F0004, 0xFFF0FFF7000F0004,
+ 0xFFF0FFF7000F0004, 0xFFF0FFF7000F0004};
+ __m256i const_7 = {0x0000000000000004, 0x0000000000000004,
+ 0x0000000000000004, 0x0000000000000004};
+ __m256i const_8 = {0x0011001100110011, 0x0011001100110011,
+ 0x0011001100110011, 0x0011001100110011};
+ __m256i const_9 = {0xFFEF0011FFEF0011, 0xFFEF0011FFEF0011,
+ 0xFFEF0011FFEF0011, 0xFFEF0011FFEF0011};
+ __m256i const_10 = {0x000A0016000A0016, 0x000A0016000A0016,
+ 0x000A0016000A0016, 0x000A0016000A0016};
+ __m256i const_11 = {0x0016FFF60016FFF6, 0x0016FFF60016FFF6,
+ 0x0016FFF60016FFF6, 0x0016FFF60016FFF6};
+ __m256i in0, in1;
+ __m256i temp0, temp1, temp2, temp3, t1, t2, t3, t4;
+
+ DUP2_ARG2(__lasx_xvld, block, 0, block, 32, in0, in1);
+ /* first loops */
+ temp0 = __lasx_xvpermi_d(in0, 0xB1);
+ temp1 = __lasx_xvpermi_d(in1, 0xB1);
+ DUP2_ARG2(__lasx_xvilvl_h, temp0, in0, temp1, in1, temp0, temp1);
+ temp2 = __lasx_xvpickev_w(temp1, temp0);
+ temp3 = __lasx_xvpickod_w(temp1, temp0);
+
+ DUP2_ARG2(__lasx_xvdp2_w_h, temp2, const_1, temp2, const_2, temp0, temp1);
+ t1 = __lasx_xvadd_w(temp0, const_7);
+ t2 = __lasx_xvadd_w(temp1, const_7);
+ temp0 = __lasx_xvpickev_w(t2, t1);
+ temp1 = __lasx_xvpickod_w(t2, t1);
+ t3 = __lasx_xvadd_w(temp0, temp1);
+ t4 = __lasx_xvsub_w(temp0, temp1);
+ t4 = __lasx_xvpermi_d(t4, 0xB1);
+
+ DUP4_ARG2(__lasx_xvdp4_d_h, temp3, const_3, temp3, const_4, temp3,
+ const_5, temp3, const_6, t1, t2, temp0, temp1);
+ temp2 = __lasx_xvpickev_w(t2, t1);
+ temp3 = __lasx_xvpickev_w(temp1, temp0);
+
+ t1 = __lasx_xvadd_w(temp2, t3);
+ t2 = __lasx_xvadd_w(temp3, t4);
+ temp0 = __lasx_xvsub_w(t4, temp3);
+ temp1 = __lasx_xvsub_w(t3, temp2);
+ /* second loops */
+ DUP2_ARG3(__lasx_xvsrani_h_w, t2, t1, 3, temp1, temp0, 3, temp2, temp3);
+ temp3 = __lasx_xvshuf4i_h(temp3, 0x4E);
+ temp0 = __lasx_xvpermi_q(temp3, temp2, 0x20);
+ temp1 = __lasx_xvpermi_q(temp3, temp2, 0x31);
+ DUP2_ARG3(__lasx_xvdp2add_w_h, const_64, temp0, const_8, const_64, temp0,
+ const_9, t1, t2);
+ DUP2_ARG2(__lasx_xvdp2_w_h, temp1, const_10, temp1, const_11, t3, t4);
+ temp0 = __lasx_xvadd_w(t1, t3);
+ temp1 = __lasx_xvsub_w(t2, t4);
+ temp2 = __lasx_xvadd_w(t2, t4);
+ temp3 = __lasx_xvsub_w(t1, t3);
+ DUP4_ARG2(__lasx_xvsrai_w, temp0, 7, temp1, 7, temp2, 7, temp3, 7,
+ t1, t2, t3, t4);
+
+ temp0 = __lasx_xvldrepl_d(dest, 0);
+ DUP4_ARG2(__lasx_xvldrepl_d, dest, 0, dest + stride, 0, dest + stride2, 0,
+ dest + stride3, 0, temp0, temp1, temp2, temp3);
+ DUP4_ARG1(__lasx_vext2xv_wu_bu, temp0, temp1, temp2, temp3,
+ temp0, temp1, temp2, temp3);
+ DUP4_ARG2(__lasx_xvadd_w, temp0, t1, temp1, t2, temp2, t3, temp3, t4,
+ t1, t2, t3, t4);
+ DUP4_ARG1(__lasx_xvclip255_w, t1, t2, t3, t4, t1, t2, t3, t4);
+ DUP2_ARG2(__lasx_xvpickev_h, t2, t1, t4, t3, temp0, temp1);
+ temp2 = __lasx_xvpickev_b(temp1, temp0);
+ temp0 = __lasx_xvperm_w(temp2, shift);
+ __lasx_xvstelm_d(temp0, dest, 0, 0);
+ __lasx_xvstelm_d(temp0, dest + stride, 0, 1);
+ __lasx_xvstelm_d(temp0, dest + stride2, 0, 2);
+ __lasx_xvstelm_d(temp0, dest + stride3, 0, 3);
+}
+
+void ff_vc1_inv_trans_8x4_dc_lasx(uint8_t *dest, ptrdiff_t stride,
+ int16_t *block)
+{
+ int dc = block[0];
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ __m256i in0, in1, in2, in3;
+ __m256i const_dc, temp0, temp1, reg0, reg1;
+
+ dc = (3 * dc + 1) >> 1;
+ dc = (17 * dc + 64) >> 7;
+ const_dc = __lasx_xvreplgr2vr_h(dc);
+
+ DUP4_ARG2(__lasx_xvldrepl_d, dest, 0, dest + stride, 0, dest + stride2,
+ 0, dest + stride3, 0, in0, in1, in2, in3);
+ DUP2_ARG2(__lasx_xvilvl_d, in1, in0, in3, in2, temp0, temp1);
+ DUP2_ARG1(__lasx_vext2xv_hu_bu, temp0, temp1, temp0, temp1);
+ DUP2_ARG2(__lasx_xvadd_h, temp0, const_dc, temp1, const_dc, reg0, reg1);
+ temp0 = __lasx_xvssrarni_bu_h(reg1, reg0, 0);
+ __lasx_xvstelm_d(temp0, dest, 0, 0);
+ __lasx_xvstelm_d(temp0, dest + stride, 0, 2);
+ __lasx_xvstelm_d(temp0, dest + stride2, 0, 1);
+ __lasx_xvstelm_d(temp0, dest + stride3, 0, 3);
+}
+
+void ff_vc1_inv_trans_4x8_dc_lasx(uint8_t *dest, ptrdiff_t stride,
+ int16_t *block)
+{
+ int dc = block[0];
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ uint8_t *dst = dest + (stride2 << 1);
+ __m256i in0, in1, in2, in3, in4, in5, in6, in7;
+ __m256i const_dc, temp0, temp1, temp2, temp3, reg0, reg1;
+
+ dc = (17 * dc + 4) >> 3;
+ dc = (12 * dc + 64) >> 7;
+ const_dc = __lasx_xvreplgr2vr_h(dc);
+
+ DUP4_ARG2(__lasx_xvldrepl_w, dest, 0, dest + stride, 0, dest + stride2,
+ 0, dest + stride3, 0, in0, in1, in2, in3);
+ DUP4_ARG2(__lasx_xvldrepl_w, dst, 0, dst + stride, 0, dst + stride2,
+ 0, dst + stride3, 0, in4, in5, in6, in7);
+
+ DUP4_ARG2(__lasx_xvilvl_w, in1, in0, in3, in2, in5, in4, in7, in6,
+ temp0, temp1, temp2, temp3);
+ DUP2_ARG2(__lasx_xvilvl_d, temp1, temp0, temp3, temp2, reg0, reg1);
+ DUP2_ARG1(__lasx_vext2xv_hu_bu, reg0, reg1, temp0, temp1);
+ DUP2_ARG2(__lasx_xvadd_h, temp0, const_dc, temp1, const_dc, reg0, reg1);
+ temp0 = __lasx_xvssrarni_bu_h(reg1, reg0, 0);
+ __lasx_xvstelm_w(temp0, dest, 0, 0);
+ __lasx_xvstelm_w(temp0, dest + stride, 0, 1);
+ __lasx_xvstelm_w(temp0, dest + stride2, 0, 4);
+ __lasx_xvstelm_w(temp0, dest + stride3, 0, 5);
+ __lasx_xvstelm_w(temp0, dst, 0, 2);
+ __lasx_xvstelm_w(temp0, dst + stride, 0, 3);
+ __lasx_xvstelm_w(temp0, dst + stride2, 0, 6);
+ __lasx_xvstelm_w(temp0, dst + stride3, 0, 7);
+}
+
+void ff_vc1_inv_trans_4x8_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block)
+{
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride3 = stride2 + stride;
+ uint8_t *dst = dest + (stride2 << 1);
+ __m256i in0, in1, in2, in3;
+ __m256i temp0, temp1, temp2, temp3, t1, t2, t3, t4;
+
+ __m256i const_1 = {0x0011001100110011, 0x0011001100110011,
+ 0x0011001100110011, 0x0011001100110011};
+ __m256i const_2 = {0xFFEF0011FFEF0011, 0xFFEF0011FFEF0011,
+ 0xFFEF0011FFEF0011, 0xFFEF0011FFEF0011};
+ __m256i const_3 = {0x000A0016000A0016, 0x000A0016000A0016,
+ 0x000A0016000A0016, 0x000A0016000A0016};
+ __m256i const_4 = {0x0016FFF60016FFF6, 0x0016FFF60016FFF6,
+ 0x0016FFF60016FFF6, 0x0016FFF60016FFF6};
+ __m256i const_5 = {0x0000000400000004, 0x0000000400000004,
+ 0x0000000400000004, 0x0000000400000004};
+ __m256i const_6 = {0x0000004000000040, 0x0000004000000040,
+ 0x0000004000000040, 0x0000004000000040};
+ __m256i const_7 = {0x000C000C000C000C, 0X000C000C000C000C,
+ 0xFFF4000CFFF4000C, 0xFFF4000CFFF4000C};
+ __m256i const_8 = {0x0006001000060010, 0x0006001000060010,
+ 0xFFF00006FFF00006, 0xFFF00006FFF00006};
+ __m256i const_9 = {0x0009001000090010, 0x0009001000090010,
+ 0x0004000F0004000F, 0x0004000F0004000F};
+ __m256i const_10 = {0xFFF0000FFFF0000F, 0xFFF0000FFFF0000F,
+ 0xFFF7FFFCFFF7FFFC, 0xFFF7FFFCFFF7FFFC};
+ __m256i const_11 = {0x0004000900040009, 0x0004000900040009,
+ 0x000FFFF0000FFFF0, 0x000FFFF0000FFFF0};
+ __m256i const_12 = {0x000F0004000F0004, 0x000F0004000F0004,
+ 0xFFF0FFF7FFF0FFF7, 0xFFF0FFF7FFF0FFF7};
+ __m256i shift = {0x0000000400000000, 0x0000000600000002,
+ 0x0000000500000001, 0x0000000700000003};
+
+ /* first loops */
+ DUP4_ARG2(__lasx_xvld, block, 0, block, 32, block, 64, block, 96,
+ in0, in1, in2, in3);
+ in0 = __lasx_xvilvl_d(in1, in0);
+ in1 = __lasx_xvilvl_d(in3, in2);
+ temp0 = __lasx_xvpickev_h(in1, in0);
+ temp1 = __lasx_xvpickod_h(in1, in0);
+ temp0 = __lasx_xvperm_w(temp0, shift);
+ temp1 = __lasx_xvperm_w(temp1, shift);
+
+ DUP2_ARG3(__lasx_xvdp2add_w_h, const_5, temp0, const_1, const_5, temp0,
+ const_2, t1, t2);
+ DUP2_ARG2(__lasx_xvdp2_w_h, temp1, const_3, temp1, const_4, t3, t4);
+
+ temp0 = __lasx_xvadd_w(t1, t3);
+ temp1 = __lasx_xvsub_w(t2, t4);
+ temp2 = __lasx_xvadd_w(t2, t4);
+ temp3 = __lasx_xvsub_w(t1, t3);
+ DUP4_ARG2(__lasx_xvsrai_w, temp0, 3, temp1, 3, temp2, 3, temp3, 3,
+ temp0, temp1, temp2, temp3);
+
+ /* second loops */
+ t1 = __lasx_xvpickev_w(temp1, temp0);
+ t2 = __lasx_xvpickev_w(temp3, temp2);
+ t1 = __lasx_xvpickev_h(t2, t1);
+ t3 = __lasx_xvpickod_w(temp1, temp0);
+ t4 = __lasx_xvpickod_w(temp3, temp2);
+ temp1 = __lasx_xvpickev_h(t4, t3);
+ temp2 = __lasx_xvpermi_q(t1, t1, 0x00);
+ temp3 = __lasx_xvpermi_q(t1, t1, 0x11);
+ t1 = __lasx_xvdp2add_w_h(const_6, temp2, const_7);
+ t2 = __lasx_xvdp2_w_h(temp3, const_8);
+ t3 = __lasx_xvadd_w(t1, t2);
+ t4 = __lasx_xvsub_w(t1, t2);
+ t4 = __lasx_xvpermi_d(t4, 0x4E);
+
+ DUP4_ARG2(__lasx_xvdp2_w_h, temp1, const_9, temp1, const_10, temp1,
+ const_11, temp1, const_12, t1, t2, temp2, temp3);
+
+ temp0 = __lasx_xvpermi_q(t2, t1, 0x20);
+ temp1 = __lasx_xvpermi_q(t2, t1, 0x31);
+ t1 = __lasx_xvadd_w(temp0, temp1);
+ temp0 = __lasx_xvpermi_q(temp3, temp2, 0x20);
+ temp1 = __lasx_xvpermi_q(temp3, temp2, 0x31);
+ t2 = __lasx_xvadd_w(temp1, temp0);
+ temp0 = __lasx_xvadd_w(t1, t3);
+ temp1 = __lasx_xvadd_w(t2, t4);
+ temp2 = __lasx_xvsub_w(t4, t2);
+ temp3 = __lasx_xvsub_w(t3, t1);
+ temp2 = __lasx_xvaddi_wu(temp2, 1);
+ temp3 = __lasx_xvaddi_wu(temp3, 1);
+ DUP4_ARG2(__lasx_xvsrai_w, temp0, 7, temp1, 7, temp2, 7, temp3, 7,
+ temp0, temp1, temp2, temp3);
+
+ DUP4_ARG2(__lasx_xvldrepl_w, dest, 0, dest + stride, 0, dest + stride2, 0,
+ dest + stride3, 0, const_1, const_2, const_3, const_4);
+ DUP4_ARG2(__lasx_xvldrepl_w, dst, 0, dst + stride, 0, dst + stride2, 0,
+ dst + stride3, 0, const_5, const_6, const_7, const_8);
+
+ DUP4_ARG2(__lasx_xvilvl_w, const_2, const_1, const_4, const_3, const_5,
+ const_6, const_7, const_8, const_1, const_2, const_3, const_4);
+ DUP4_ARG1(__lasx_vext2xv_wu_bu, const_1, const_2, const_3, const_4,
+ const_1, const_2, const_3, const_4);
+ DUP4_ARG2(__lasx_xvadd_w, temp0, const_1, temp1, const_2, temp2, const_3,
+ temp3, const_4, temp0, temp1, temp2, temp3);
+ DUP4_ARG1(__lasx_xvclip255_w, temp0, temp1, temp2, temp3,
+ temp0, temp1, temp2, temp3);
+ DUP2_ARG2(__lasx_xvpickev_h, temp1, temp0, temp3, temp2, temp0, temp1);
+ temp0 = __lasx_xvpickev_b(temp1, temp0);
+ __lasx_xvstelm_w(temp0, dest, 0, 0);
+ __lasx_xvstelm_w(temp0, dest + stride, 0, 4);
+ __lasx_xvstelm_w(temp0, dest + stride2, 0, 1);
+ __lasx_xvstelm_w(temp0, dest + stride3, 0, 5);
+ __lasx_xvstelm_w(temp0, dst, 0, 6);
+ __lasx_xvstelm_w(temp0, dst + stride, 0, 2);
+ __lasx_xvstelm_w(temp0, dst + stride2, 0, 7);
+ __lasx_xvstelm_w(temp0, dst + stride3, 0, 3);
+}
+
+void ff_vc1_inv_trans_4x4_dc_lasx(uint8_t *dest, ptrdiff_t stride,
+ int16_t *block)
+{
+ int dc = block[0];
+ uint8_t *dst1 = dest + stride;
+ uint8_t *dst2 = dst1 + stride;
+ uint8_t *dst3 = dst2 + stride;
+ __m256i in0, in1, in2, in3, temp0, temp1, const_dc;
+ __m256i zero = {0};
+
+ dc = (17 * dc + 4) >> 3;
+ dc = (17 * dc + 64) >> 7;
+ const_dc = __lasx_xvreplgr2vr_h(dc);
+
+ DUP4_ARG2(__lasx_xvldrepl_w, dest, 0, dst1, 0, dst2, 0, dst3, 0,
+ in0, in1, in2, in3);
+ DUP2_ARG2(__lasx_xvilvl_w, in1, in0, in3, in2, temp0, temp1);
+ in0 = __lasx_xvpermi_q(temp1, temp0, 0x20);
+ temp0 = __lasx_xvilvl_b(zero, in0);
+ in0 = __lasx_xvadd_h(temp0, const_dc);
+ temp0 = __lasx_xvssrarni_bu_h(in0, in0, 0);
+ __lasx_xvstelm_w(temp0, dest, 0, 0);
+ __lasx_xvstelm_w(temp0, dst1, 0, 1);
+ __lasx_xvstelm_w(temp0, dst2, 0, 4);
+ __lasx_xvstelm_w(temp0, dst3, 0, 5);
+}
+
+void ff_vc1_inv_trans_4x4_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block)
+{
+ uint8_t *dst1 = dest + stride;
+ uint8_t *dst2 = dst1 + stride;
+ uint8_t *dst3 = dst2 + stride;
+ __m256i in0, in1, in2, in3;
+ __m256i temp0, temp1, temp2, temp3, t1, t2;
+
+ __m256i const_1 = {0x0011001100110011, 0xFFEF0011FFEF0011,
+ 0x0011001100110011, 0xFFEF0011FFEF0011};
+ __m256i const_2 = {0x000A0016000A0016, 0x0016FFF60016FFF6,
+ 0x000A0016000A0016, 0x0016FFF60016FFF6};
+ __m256i const_64 = {0x0000004000000040, 0x0000004000000040,
+ 0x0000004000000040, 0x0000004000000040};
+
+ DUP2_ARG2(__lasx_xvld, block, 0, block, 32, in0, in1);
+ /* first loops */
+ temp0 = __lasx_xvilvl_d(in1, in0);
+ temp1 = __lasx_xvpickev_h(temp0, temp0);
+ temp2 = __lasx_xvpickod_h(temp0, temp0);
+ DUP2_ARG2(__lasx_xvdp2_w_h, temp1, const_1, temp2, const_2, t1, t2);
+ t1 = __lasx_xvaddi_wu(t1, 4);
+ in0 = __lasx_xvadd_w(t1, t2);
+ in1 = __lasx_xvsub_w(t1, t2);
+ DUP2_ARG2(__lasx_xvsrai_w, in0, 3, in1, 3, in0, in1);
+ /* second loops */
+ temp0 = __lasx_xvpickev_h(in1, in0);
+ temp1 = __lasx_xvpermi_q(temp0, temp0, 0x00);
+ temp2 = __lasx_xvpermi_q(temp0, temp0, 0x11);
+ const_1 = __lasx_xvpermi_d(const_1, 0xD8);
+ const_2 = __lasx_xvpermi_d(const_2, 0xD8);
+ t1 = __lasx_xvdp2add_w_h(const_64, temp1, const_1);
+ t2 = __lasx_xvdp2_w_h(temp2, const_2);
+ in0 = __lasx_xvadd_w(t1, t2);
+ in1 = __lasx_xvsub_w(t1, t2);
+ DUP2_ARG2(__lasx_xvsrai_w, in0, 7, in1, 7, in0, in1);
+ temp0 = __lasx_xvshuf4i_w(in0, 0x9C);
+ temp1 = __lasx_xvshuf4i_w(in1, 0x9C);
+
+ DUP4_ARG2(__lasx_xvldrepl_w, dest, 0, dst1, 0, dst2, 0, dst3, 0,
+ in0, in1, in2, in3);
+ temp2 = __lasx_xvilvl_w(in2, in0);
+ temp2 = __lasx_vext2xv_wu_bu(temp2);
+ temp3 = __lasx_xvilvl_w(in1, in3);
+ temp3 = __lasx_vext2xv_wu_bu(temp3);
+ temp0 = __lasx_xvadd_w(temp0, temp2);
+ temp1 = __lasx_xvadd_w(temp1, temp3);
+ DUP2_ARG1(__lasx_xvclip255_w, temp0, temp1, temp0, temp1);
+ temp1 = __lasx_xvpickev_h(temp1, temp0);
+ temp0 = __lasx_xvpickev_b(temp1, temp1);
+ __lasx_xvstelm_w(temp0, dest, 0, 0);
+ __lasx_xvstelm_w(temp0, dst1, 0, 5);
+ __lasx_xvstelm_w(temp0, dst2, 0, 4);
+ __lasx_xvstelm_w(temp0, dst3, 0, 1);
+}
+
+static void put_vc1_mspel_mc_h_v_lasx(uint8_t *dst, const uint8_t *src,
+ ptrdiff_t stride, int hmode, int vmode,
+ int rnd)
+{
+ __m256i in0, in1, in2, in3;
+ __m256i t0, t1, t2, t3, t4, t5, t6, t7;
+ __m256i temp0, temp1, const_para1_2, const_para0_3;
+ __m256i const_r, const_sh;
+ __m256i sh = {0x0000000400000000, 0x0000000500000001,
+ 0x0000000600000002, 0x0000000700000003};
+ static const uint8_t para_value[][4] = {{4, 3, 53, 18},
+ {1, 1, 9, 9},
+ {3, 4, 18, 53}};
+ static const int shift_value[] = {0, 5, 1, 5};
+ int shift = (shift_value[hmode] + shift_value[vmode]) >> 1;
+ int r = (1 << (shift - 1)) + rnd - 1;
+ const uint8_t *para_v = para_value[vmode - 1];
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride4 = stride << 2;
+ ptrdiff_t stride3 = stride2 + stride;
+
+ const_r = __lasx_xvreplgr2vr_h(r);
+ const_sh = __lasx_xvreplgr2vr_h(shift);
+ src -= 1, src -= stride;
+ const_para0_3 = __lasx_xvldrepl_h(para_v, 0);
+ const_para1_2 = __lasx_xvldrepl_h(para_v, 2);
+ DUP4_ARG2(__lasx_xvld, src, 0, src + stride, 0, src + stride2, 0,
+ src + stride3, 0, in0, in1, in2, in3);
+ DUP4_ARG2(__lasx_xvpermi_d, in0, 0xD8, in1, 0xD8, in2, 0xD8, in3, 0xD8,
+ in0, in1, in2, in3);
+ DUP2_ARG2(__lasx_xvilvl_b, in2, in1, in3, in0, temp0, temp1);
+ t0 = __lasx_xvdp2_h_bu(temp0, const_para1_2);
+ t0 = __lasx_xvdp2sub_h_bu(t0, temp1, const_para0_3);
+ src += stride4;
+ in0 = __lasx_xvld(src, 0);
+ in0 = __lasx_xvpermi_d(in0, 0xD8);
+ DUP2_ARG2(__lasx_xvilvl_b, in3, in2, in0, in1, temp0, temp1);
+ t1 = __lasx_xvdp2_h_bu(temp0, const_para1_2);
+ t1 = __lasx_xvdp2sub_h_bu(t1, temp1, const_para0_3);
+ src += stride;
+ in1 = __lasx_xvld(src, 0);
+ in1 = __lasx_xvpermi_d(in1, 0xD8);
+ DUP2_ARG2(__lasx_xvilvl_b, in0, in3, in1, in2, temp0, temp1);
+ t2 = __lasx_xvdp2_h_bu(temp0, const_para1_2);
+ t2 = __lasx_xvdp2sub_h_bu(t2, temp1, const_para0_3);
+ src += stride;
+ in2 = __lasx_xvld(src, 0);
+ in2 = __lasx_xvpermi_d(in2, 0xD8);
+ DUP2_ARG2(__lasx_xvilvl_b, in1, in0, in2, in3, temp0, temp1);
+ t3 = __lasx_xvdp2_h_bu(temp0, const_para1_2);
+ t3 = __lasx_xvdp2sub_h_bu(t3, temp1, const_para0_3);
+ src += stride;
+ in3 = __lasx_xvld(src, 0);
+ in3 = __lasx_xvpermi_d(in3, 0xD8);
+ DUP2_ARG2(__lasx_xvilvl_b, in2, in1, in3, in0, temp0, temp1);
+ t4 = __lasx_xvdp2_h_bu(temp0, const_para1_2);
+ t4 = __lasx_xvdp2sub_h_bu(t4, temp1, const_para0_3);
+ src += stride;
+ in0 = __lasx_xvld(src, 0);
+ in0 = __lasx_xvpermi_d(in0, 0xD8);
+ DUP2_ARG2(__lasx_xvilvl_b, in3, in2, in0, in1, temp0, temp1);
+ t5 = __lasx_xvdp2_h_bu(temp0, const_para1_2);
+ t5 = __lasx_xvdp2sub_h_bu(t5, temp1, const_para0_3);
+ src += stride;
+ in1 = __lasx_xvld(src, 0);
+ in1 = __lasx_xvpermi_d(in1, 0xD8);
+ DUP2_ARG2(__lasx_xvilvl_b, in0, in3, in1, in2, temp0, temp1);
+ t6 = __lasx_xvdp2_h_bu(temp0, const_para1_2);
+ t6 = __lasx_xvdp2sub_h_bu(t6, temp1, const_para0_3);
+ src += stride;
+ in2 = __lasx_xvld(src, 0);
+ in2 = __lasx_xvpermi_d(in2, 0xD8);
+ DUP2_ARG2(__lasx_xvilvl_b, in1, in0, in2, in3, temp0, temp1);
+ t7 = __lasx_xvdp2_h_bu(temp0, const_para1_2);
+ t7 = __lasx_xvdp2sub_h_bu(t7, temp1, const_para0_3);
+ DUP4_ARG2(__lasx_xvadd_h, t0, const_r, t1, const_r, t2, const_r, t3,
+ const_r, t0, t1, t2, t3);
+ DUP4_ARG2(__lasx_xvadd_h, t4, const_r, t5, const_r, t6, const_r, t7,
+ const_r, t4, t5, t6, t7);
+ DUP4_ARG2(__lasx_xvsra_h, t0, const_sh, t1, const_sh, t2, const_sh,
+ t3, const_sh, t0, t1, t2, t3);
+ DUP4_ARG2(__lasx_xvsra_h, t4, const_sh, t5, const_sh, t6, const_sh,
+ t7, const_sh, t4, t5, t6, t7);
+ LASX_TRANSPOSE8x8_H(t0, t1, t2, t3, t4, t5, t6, t7, t0,
+ t1, t2, t3, t4, t5, t6, t7);
+ para_v = para_value[hmode - 1];
+ const_para0_3 = __lasx_xvldrepl_h(para_v, 0);
+ const_para1_2 = __lasx_xvldrepl_h(para_v, 2);
+ const_para0_3 = __lasx_vext2xv_h_b(const_para0_3);
+ const_para1_2 = __lasx_vext2xv_h_b(const_para1_2);
+ r = 64 - rnd;
+ const_r = __lasx_xvreplgr2vr_w(r);
+ DUP4_ARG2(__lasx_xvpermi_d, t0, 0x72, t1, 0x72, t2, 0x72, t0, 0xD8,
+ in0, in1, in2, t0);
+ DUP4_ARG2(__lasx_xvpermi_d, t1, 0xD8, t2, 0xD8, t3, 0xD8, t4, 0xD8,
+ t1, t2, t3, t4);
+ DUP2_ARG2(__lasx_xvpermi_d, t5, 0xD8, t6, 0xD8, t5, t6);
+ t7 = __lasx_xvpermi_d(t7, 0xD8);
+ DUP2_ARG2(__lasx_xvilvl_h, t2, t1, t3, t0, temp0, temp1);
+ t0 = __lasx_xvdp2_w_h(temp0, const_para1_2);
+ t0 = __lasx_xvdp2sub_w_h(t0, temp1, const_para0_3);
+ DUP2_ARG2(__lasx_xvilvl_h, t3, t2, t4, t1, temp0, temp1);
+ t1 = __lasx_xvdp2_w_h(temp0, const_para1_2);
+ t1 = __lasx_xvdp2sub_w_h(t1, temp1, const_para0_3);
+ DUP2_ARG2(__lasx_xvilvl_h, t4, t3, t5, t2, temp0, temp1);
+ t2 = __lasx_xvdp2_w_h(temp0, const_para1_2);
+ t2 = __lasx_xvdp2sub_w_h(t2, temp1, const_para0_3);
+ DUP2_ARG2(__lasx_xvilvl_h, t5, t4, t6, t3, temp0, temp1);
+ t3 = __lasx_xvdp2_w_h(temp0, const_para1_2);
+ t3 = __lasx_xvdp2sub_w_h(t3, temp1, const_para0_3);
+ DUP2_ARG2(__lasx_xvilvl_h, t6, t5, t7, t4, temp0, temp1);
+ t4 = __lasx_xvdp2_w_h(temp0, const_para1_2);
+ t4 = __lasx_xvdp2sub_w_h(t4, temp1, const_para0_3);
+ DUP2_ARG2(__lasx_xvilvl_h, t7, t6, in0, t5, temp0, temp1);
+ t5 = __lasx_xvdp2_w_h(temp0, const_para1_2);
+ t5 = __lasx_xvdp2sub_w_h(t5, temp1, const_para0_3);
+ DUP2_ARG2(__lasx_xvilvl_h, in0, t7, in1, t6, temp0, temp1);
+ t6 = __lasx_xvdp2_w_h(temp0, const_para1_2);
+ t6 = __lasx_xvdp2sub_w_h(t6, temp1, const_para0_3);
+ DUP2_ARG2(__lasx_xvilvl_h, in1, in0, in2, t7, temp0, temp1);
+ t7 = __lasx_xvdp2_w_h(temp0, const_para1_2);
+ t7 = __lasx_xvdp2sub_w_h(t7, temp1, const_para0_3);
+ DUP4_ARG2(__lasx_xvadd_w, t0, const_r, t1, const_r, t2, const_r,
+ t3, const_r, t0, t1, t2, t3);
+ DUP4_ARG2(__lasx_xvadd_w, t4, const_r, t5, const_r, t6, const_r,
+ t7, const_r, t4, t5, t6, t7);
+ DUP4_ARG2(__lasx_xvsrai_w, t0, 7, t1, 7, t2, 7, t3, 7, t0, t1, t2, t3);
+ DUP4_ARG2(__lasx_xvsrai_w, t4, 7, t5, 7, t6, 7, t7, 7, t4, t5, t6, t7);
+ LASX_TRANSPOSE8x8_W(t0, t1, t2, t3, t4, t5, t6, t7,
+ t0, t1, t2, t3, t4, t5, t6, t7);
+ DUP4_ARG1(__lasx_xvclip255_w, t0, t1, t2, t3, t0, t1, t2, t3);
+ DUP4_ARG1(__lasx_xvclip255_w, t4, t5, t6, t7, t4, t5, t6, t7);
+ DUP4_ARG2(__lasx_xvpickev_h, t1, t0, t3, t2, t5, t4, t7, t6,
+ t0, t1, t2, t3);
+ DUP2_ARG2(__lasx_xvpickev_b, t1, t0, t3, t2, t0, t1);
+ t0 = __lasx_xvperm_w(t0, sh);
+ t1 = __lasx_xvperm_w(t1, sh);
+ __lasx_xvstelm_d(t0, dst, 0, 0);
+ __lasx_xvstelm_d(t0, dst + stride, 0, 1);
+ __lasx_xvstelm_d(t0, dst + stride2, 0, 2);
+ __lasx_xvstelm_d(t0, dst + stride3, 0, 3);
+ dst += stride4;
+ __lasx_xvstelm_d(t1, dst, 0, 0);
+ __lasx_xvstelm_d(t1, dst + stride, 0, 1);
+ __lasx_xvstelm_d(t1, dst + stride2, 0, 2);
+ __lasx_xvstelm_d(t1, dst + stride3, 0, 3);
+}
+
+#define PUT_VC1_MSPEL_MC_LASX(hmode, vmode) \
+void ff_put_vc1_mspel_mc ## hmode ## vmode ## _lasx(uint8_t *dst, \
+ const uint8_t *src, \
+ ptrdiff_t stride, int rnd) \
+{ \
+ put_vc1_mspel_mc_h_v_lasx(dst, src, stride, hmode, vmode, rnd); \
+} \
+void ff_put_vc1_mspel_mc ## hmode ## vmode ## _16_lasx(uint8_t *dst, \
+ const uint8_t *src, \
+ ptrdiff_t stride, int rnd) \
+{ \
+ put_vc1_mspel_mc_h_v_lasx(dst, src, stride, hmode, vmode, rnd); \
+ put_vc1_mspel_mc_h_v_lasx(dst + 8, src + 8, stride, hmode, vmode, rnd); \
+ dst += 8 * stride, src += 8 * stride; \
+ put_vc1_mspel_mc_h_v_lasx(dst, src, stride, hmode, vmode, rnd); \
+ put_vc1_mspel_mc_h_v_lasx(dst + 8, src + 8, stride, hmode, vmode, rnd); \
+}
+
+PUT_VC1_MSPEL_MC_LASX(1, 1);
+PUT_VC1_MSPEL_MC_LASX(1, 2);
+PUT_VC1_MSPEL_MC_LASX(1, 3);
+
+PUT_VC1_MSPEL_MC_LASX(2, 1);
+PUT_VC1_MSPEL_MC_LASX(2, 2);
+PUT_VC1_MSPEL_MC_LASX(2, 3);
+
+PUT_VC1_MSPEL_MC_LASX(3, 1);
+PUT_VC1_MSPEL_MC_LASX(3, 2);
+PUT_VC1_MSPEL_MC_LASX(3, 3);
+
+void ff_put_no_rnd_vc1_chroma_mc8_lasx(uint8_t *dst /* align 8 */,
+ uint8_t *src /* align 1 */,
+ ptrdiff_t stride, int h, int x, int y)
+{
+ const int intA = (8 - x) * (8 - y);
+ const int intB = (x) * (8 - y);
+ const int intC = (8 - x) * (y);
+ const int intD = (x) * (y);
+ __m256i src00, src01, src10, src11;
+ __m256i A, B, C, D;
+ int i;
+
+ av_assert2(x < 8 && y < 8 && x >= 0 && y >= 0);
+
+ A = __lasx_xvreplgr2vr_h(intA);
+ B = __lasx_xvreplgr2vr_h(intB);
+ C = __lasx_xvreplgr2vr_h(intC);
+ D = __lasx_xvreplgr2vr_h(intD);
+ for(i = 0; i < h; i++){
+ DUP2_ARG2(__lasx_xvld, src, 0, src, 1, src00, src01);
+ src += stride;
+ DUP2_ARG2(__lasx_xvld, src, 0, src, 1, src10, src11);
+
+ DUP4_ARG1(__lasx_vext2xv_hu_bu, src00, src01, src10, src11,
+ src00, src01, src10, src11);
+ DUP4_ARG2(__lasx_xvmul_h, src00, A, src01, B, src10, C, src11, D,
+ src00, src01, src10, src11);
+ src00 = __lasx_xvadd_h(src00, src01);
+ src10 = __lasx_xvadd_h(src10, src11);
+ src00 = __lasx_xvadd_h(src00, src10);
+ src00 = __lasx_xvaddi_hu(src00, 28);
+ src00 = __lasx_xvsrli_h(src00, 6);
+ src00 = __lasx_xvpickev_b(src00, src00);
+ __lasx_xvstelm_d(src00, dst, 0, 0);
+ dst += stride;
+ }
+}
+
+static void put_vc1_mspel_mc_v_lasx(uint8_t *dst, const uint8_t *src,
+ ptrdiff_t stride, int vmode, int rnd)
+{
+ __m256i in0, in1, in2, in3, temp0, temp1, t0;
+ __m256i const_para0_3, const_para1_2, const_r, const_sh;
+ static const uint16_t para_value[][2] = {{0x0304, 0x1235},
+ {0x0101, 0x0909},
+ {0x0403, 0x3512}};
+ const uint16_t *para_v = para_value[vmode - 1];
+ static const int shift_value[] = {0, 6, 4, 6};
+ static int add_value[3];
+ ptrdiff_t stride_2x = stride << 1;
+ int i = 0;
+ add_value[2] = add_value[0] = 31 + rnd, add_value[1] = 7 + rnd;
+
+ const_r = __lasx_xvreplgr2vr_h(add_value[vmode - 1]);
+ const_sh = __lasx_xvreplgr2vr_h(shift_value[vmode]);
+ const_para0_3 = __lasx_xvreplgr2vr_h(*para_v);
+ const_para1_2 = __lasx_xvreplgr2vr_h(*(para_v + 1));
+
+ DUP2_ARG2(__lasx_xvld, src - stride, 0, src, 0, in0, in1);
+ in2 = __lasx_xvld(src + stride, 0);
+ in0 = __lasx_xvpermi_d(in0, 0xD8);
+ in1 = __lasx_xvpermi_d(in1, 0xD8);
+ in2 = __lasx_xvpermi_d(in2, 0xD8);
+ for (; i < 16; i++) {
+ in3 = __lasx_xvld(src + stride_2x, 0);
+ in3 = __lasx_xvpermi_d(in3, 0xD8);
+ DUP2_ARG2(__lasx_xvilvl_b, in2, in1, in3, in0, temp0, temp1);
+ t0 = __lasx_xvdp2_h_bu(temp0, const_para1_2);
+ t0 = __lasx_xvdp2sub_h_bu(t0, temp1, const_para0_3);
+ t0 = __lasx_xvadd_h(t0, const_r);
+ t0 = __lasx_xvsra_h(t0, const_sh);
+ t0 = __lasx_xvclip255_h(t0);
+ t0 = __lasx_xvpickev_b(t0, t0);
+ __lasx_xvstelm_d(t0, dst, 0, 0);
+ __lasx_xvstelm_d(t0, dst, 8, 2);
+ dst += stride;
+ src += stride;
+ in0 = in1;
+ in1 = in2;
+ in2 = in3;
+ }
+}
+
+#define PUT_VC1_MSPEL_MC_V_LASX(vmode) \
+void ff_put_vc1_mspel_mc0 ## vmode ## _16_lasx(uint8_t *dst, \
+ const uint8_t *src, \
+ ptrdiff_t stride, int rnd) \
+{ \
+ put_vc1_mspel_mc_v_lasx(dst, src, stride, vmode, rnd); \
+}
+
+PUT_VC1_MSPEL_MC_V_LASX(1);
+PUT_VC1_MSPEL_MC_V_LASX(2);
+PUT_VC1_MSPEL_MC_V_LASX(3);
+
+#define ROW_LASX(in0, in1, in2, in3, out0) \
+ DUP2_ARG2(__lasx_xvilvl_b, in2, in1, in3, in0, tmp0_m, tmp1_m); \
+ out0 = __lasx_xvdp2_h_bu(tmp0_m, const_para1_2); \
+ out0 = __lasx_xvdp2sub_h_bu(out0, tmp1_m, const_para0_3); \
+ out0 = __lasx_xvadd_h(out0, const_r); \
+ out0 = __lasx_xvsra_h(out0, const_sh); \
+ out0 = __lasx_xvclip255_h(out0); \
+ out0 = __lasx_xvpickev_b(out0, out0); \
+ out0 = __lasx_xvpermi_d(out0, 0xd8); \
+
+static void put_vc1_mspel_mc_h_lasx(uint8_t *dst, const uint8_t *src,
+ ptrdiff_t stride, int hmode, int rnd)
+{
+ __m256i in0, in1, in2, in3, in4, in5, in6, in7,
+ in8, in9, in10, in11, in12, in13, in14, in15;
+ __m256i out0, out1, out2, out3, out4, out5, out6, out7, out8, out9,
+ out10, out11, out12, out13, out14, out15, out16, out17, out18;
+ __m256i const_para0_3, const_para1_2, const_r, const_sh;
+ __m256i tmp0_m, tmp1_m, tmp2_m, tmp3_m;
+ __m256i tmp4_m, tmp5_m, tmp6_m, tmp7_m;
+ __m256i t0, t1, t2, t3, t4, t5, t6, t7;
+ ptrdiff_t stride2 = stride << 1;
+ ptrdiff_t stride4 = stride << 2;
+ ptrdiff_t stride3 = stride2 + stride;
+ static const uint16_t para_value[][2] = {{0x0304, 0x1235},
+ {0x0101, 0x0909},
+ {0x0403, 0x3512}};
+ const uint16_t *para_v = para_value[hmode - 1];
+ static const int shift_value[] = {0, 6, 4, 6};
+ static int add_value[3];
+ uint8_t *_src = (uint8_t*)src - 1;
+ add_value[2] = add_value[0] = 32 - rnd, add_value[1] = 8 - rnd;
+
+ const_r = __lasx_xvreplgr2vr_h(add_value[hmode - 1]);
+ const_sh = __lasx_xvreplgr2vr_h(shift_value[hmode]);
+ const_para0_3 = __lasx_xvreplgr2vr_h(*para_v);
+ const_para1_2 = __lasx_xvreplgr2vr_h(*(para_v + 1));
+
+ in0 = __lasx_xvld(_src, 0);
+ DUP2_ARG2(__lasx_xvldx, _src, stride, _src, stride2, in1, in2);
+ in3 = __lasx_xvldx(_src, stride3);
+ _src += stride4;
+ in4 = __lasx_xvld(_src, 0);
+ DUP2_ARG2(__lasx_xvldx, _src, stride, _src, stride2, in5, in6);
+ in7 = __lasx_xvldx(_src, stride3);
+ _src += stride4;
+ in8 = __lasx_xvld(_src, 0);
+ DUP2_ARG2(__lasx_xvldx, _src, stride, _src, stride2, in9, in10);
+ in11 = __lasx_xvldx(_src, stride3);
+ _src += stride4;
+ in12 = __lasx_xvld(_src, 0);
+ DUP2_ARG2(__lasx_xvldx, _src, stride, _src, stride2, in13, in14);
+ in15 = __lasx_xvldx(_src, stride3);
+ DUP4_ARG2(__lasx_xvilvl_b, in2, in0, in3, in1, in6, in4, in7, in5,
+ tmp0_m, tmp1_m, tmp2_m, tmp3_m);
+ DUP4_ARG2(__lasx_xvilvl_b, in10, in8, in11, in9, in14, in12, in15, in13,
+ tmp4_m, tmp5_m, tmp6_m, tmp7_m);
+ DUP4_ARG2(__lasx_xvilvl_b, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, t0, t2, t4, t6);
+ DUP4_ARG2(__lasx_xvilvh_b, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, t1, t3, t5, t7);
+ DUP4_ARG2(__lasx_xvilvl_w, t2, t0, t3, t1, t6, t4, t7, t5, tmp0_m, tmp4_m,
+ tmp1_m, tmp5_m);
+ DUP4_ARG2(__lasx_xvilvh_w, t2, t0, t3, t1, t6, t4, t7, t5, tmp2_m, tmp6_m,
+ tmp3_m, tmp7_m);
+ DUP4_ARG2(__lasx_xvilvl_d, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, out0, out2, out4, out6);
+ DUP4_ARG2(__lasx_xvilvh_d, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, out1, out3, out5, out7);
+
+ DUP4_ARG2(__lasx_xvilvh_b, in2, in0, in3, in1, in6, in4, in7, in5,
+ tmp0_m, tmp1_m, tmp2_m, tmp3_m);
+ DUP4_ARG2(__lasx_xvilvh_b, in10, in8, in11, in9, in14, in12, in15, in13,
+ tmp4_m, tmp5_m, tmp6_m, tmp7_m);
+ DUP4_ARG2(__lasx_xvilvl_b, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, t0, t2, t4, t6);
+ DUP4_ARG2(__lasx_xvilvh_b, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, t1, t3, t5, t7);
+ DUP4_ARG2(__lasx_xvilvl_w, t2, t0, t3, t1, t6, t4, t7, t5, tmp0_m, tmp4_m,
+ tmp1_m, tmp5_m);
+ DUP4_ARG2(__lasx_xvilvh_w, t2, t0, t3, t1, t6, t4, t7, t5, tmp2_m, tmp6_m,
+ tmp3_m, tmp7_m);
+ DUP4_ARG2(__lasx_xvilvl_d, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, out8, out10, out12, out14);
+ DUP4_ARG2(__lasx_xvilvh_d, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, out9, out11, out13, out15);
+ DUP2_ARG3(__lasx_xvpermi_q, out0, out0, 0x31, out1, out1, 0x31, out16, out17);
+ out18 = __lasx_xvpermi_q(out2, out2, 0x31);
+
+ DUP4_ARG2(__lasx_xvpermi_d, out0, 0xD8, out1, 0xD8, out2, 0xD8, out3, 0xD8,
+ out0, out1, out2, out3);
+ DUP4_ARG2(__lasx_xvpermi_d, out4, 0xD8, out5, 0xD8, out6, 0xD8, out7, 0xD8,
+ out4, out5, out6, out7);
+ DUP4_ARG2(__lasx_xvpermi_d, out8, 0xD8, out9, 0xD8, out10, 0xD8, out11,
+ 0xD8, out8, out9, out10, out11);
+ DUP4_ARG2(__lasx_xvpermi_d, out12, 0xD8, out13, 0xD8, out14, 0xD8, out15,
+ 0xD8, out12, out13, out14, out15);
+ out16 = __lasx_xvpermi_d(out16, 0xD8);
+ out17 = __lasx_xvpermi_d(out17, 0xD8);
+ out18 = __lasx_xvpermi_d(out18, 0xD8);
+
+ ROW_LASX(out0, out1, out2, out3, in0);
+ ROW_LASX(out1, out2, out3, out4, in1);
+ ROW_LASX(out2, out3, out4, out5, in2);
+ ROW_LASX(out3, out4, out5, out6, in3);
+ ROW_LASX(out4, out5, out6, out7, in4);
+ ROW_LASX(out5, out6, out7, out8, in5);
+ ROW_LASX(out6, out7, out8, out9, in6);
+ ROW_LASX(out7, out8, out9, out10, in7);
+ ROW_LASX(out8, out9, out10, out11, in8);
+ ROW_LASX(out9, out10, out11, out12, in9);
+ ROW_LASX(out10, out11, out12, out13, in10);
+ ROW_LASX(out11, out12, out13, out14, in11);
+ ROW_LASX(out12, out13, out14, out15, in12);
+ ROW_LASX(out13, out14, out15, out16, in13);
+ ROW_LASX(out14, out15, out16, out17, in14);
+ ROW_LASX(out15, out16, out17, out18, in15);
+
+ DUP4_ARG2(__lasx_xvilvl_b, in2, in0, in3, in1, in6, in4, in7, in5,
+ tmp0_m, tmp1_m, tmp2_m, tmp3_m);
+ DUP4_ARG2(__lasx_xvilvl_b, in10, in8, in11, in9, in14, in12, in15, in13,
+ tmp4_m, tmp5_m, tmp6_m, tmp7_m);
+ DUP4_ARG2(__lasx_xvilvl_b, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, t0, t2, t4, t6);
+ DUP4_ARG2(__lasx_xvilvh_b, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, t1, t3, t5, t7);
+ DUP4_ARG2(__lasx_xvilvl_w, t2, t0, t3, t1, t6, t4, t7, t5, tmp0_m, tmp4_m,
+ tmp1_m, tmp5_m);
+ DUP4_ARG2(__lasx_xvilvh_w, t2, t0, t3, t1, t6, t4, t7, t5, tmp2_m, tmp6_m,
+ tmp3_m, tmp7_m);
+ DUP4_ARG2(__lasx_xvilvl_d, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, out0, out2, out4, out6);
+ DUP4_ARG2(__lasx_xvilvh_d, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, out1, out3, out5, out7);
+
+ DUP4_ARG2(__lasx_xvilvh_b, in2, in0, in3, in1, in6, in4, in7, in5,
+ tmp0_m, tmp1_m, tmp2_m, tmp3_m);
+ DUP4_ARG2(__lasx_xvilvh_b, in10, in8, in11, in9, in14, in12, in15, in13,
+ tmp4_m, tmp5_m, tmp6_m, tmp7_m);
+ DUP4_ARG2(__lasx_xvilvl_b, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, t0, t2, t4, t6);
+ DUP4_ARG2(__lasx_xvilvh_b, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, t1, t3, t5, t7);
+ DUP4_ARG2(__lasx_xvilvl_w, t2, t0, t3, t1, t6, t4, t7, t5, tmp0_m, tmp4_m,
+ tmp1_m, tmp5_m);
+ DUP4_ARG2(__lasx_xvilvh_w, t2, t0, t3, t1, t6, t4, t7, t5, tmp2_m, tmp6_m,
+ tmp3_m, tmp7_m);
+ DUP4_ARG2(__lasx_xvilvl_d, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, out8, out10, out12, out14);
+ DUP4_ARG2(__lasx_xvilvh_d, tmp1_m, tmp0_m, tmp3_m, tmp2_m, tmp5_m, tmp4_m,
+ tmp7_m, tmp6_m, out9, out11, out13, out15);
+ __lasx_xvstelm_d(out0, dst, 0, 0);
+ __lasx_xvstelm_d(out0, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out1, dst, 0, 0);
+ __lasx_xvstelm_d(out1, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out2, dst, 0, 0);
+ __lasx_xvstelm_d(out2, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out3, dst, 0, 0);
+ __lasx_xvstelm_d(out3, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out4, dst, 0, 0);
+ __lasx_xvstelm_d(out4, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out5, dst, 0, 0);
+ __lasx_xvstelm_d(out5, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out6, dst, 0, 0);
+ __lasx_xvstelm_d(out6, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out7, dst, 0, 0);
+ __lasx_xvstelm_d(out7, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out8, dst, 0, 0);
+ __lasx_xvstelm_d(out8, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out9, dst, 0, 0);
+ __lasx_xvstelm_d(out9, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out10, dst, 0, 0);
+ __lasx_xvstelm_d(out10, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out11, dst, 0, 0);
+ __lasx_xvstelm_d(out11, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out12, dst, 0, 0);
+ __lasx_xvstelm_d(out12, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out13, dst, 0, 0);
+ __lasx_xvstelm_d(out13, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out14, dst, 0, 0);
+ __lasx_xvstelm_d(out14, dst, 8, 1);
+ dst += stride;
+ __lasx_xvstelm_d(out15, dst, 0, 0);
+ __lasx_xvstelm_d(out15, dst, 8, 1);
+}
+
+#define PUT_VC1_MSPEL_MC_H_LASX(hmode) \
+void ff_put_vc1_mspel_mc ## hmode ## 0_16_lasx(uint8_t *dst, \
+ const uint8_t *src, \
+ ptrdiff_t stride, int rnd) \
+{ \
+ put_vc1_mspel_mc_h_lasx(dst, src, stride, hmode, rnd); \
+}
+
+PUT_VC1_MSPEL_MC_H_LASX(1);
+PUT_VC1_MSPEL_MC_H_LASX(2);
+PUT_VC1_MSPEL_MC_H_LASX(3);
diff --git a/libavcodec/loongarch/vc1dsp_loongarch.h b/libavcodec/loongarch/vc1dsp_loongarch.h
new file mode 100644
index 0000000000..398631aecc
--- /dev/null
+++ b/libavcodec/loongarch/vc1dsp_loongarch.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2021 Loongson Technology Corporation Limited
+ * Contributed by Hao Chen <chenhao@loongson.cn>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_LOONGARCH_VC1DSP_LOONGARCH_H
+#define AVCODEC_LOONGARCH_VC1DSP_LOONGARCH_H
+
+#include "libavcodec/vc1dsp.h"
+#include "libavutil/avassert.h"
+
+void ff_vc1_inv_trans_8x8_lasx(int16_t block[64]);
+void ff_vc1_inv_trans_8x8_dc_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_8x4_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_8x4_dc_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_4x8_dc_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_4x8_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *blokc);
+void ff_vc1_inv_trans_4x4_dc_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+void ff_vc1_inv_trans_4x4_lasx(uint8_t *dest, ptrdiff_t stride, int16_t *block);
+
+#define FF_PUT_VC1_MSPEL_MC_LASX(hmode, vmode) \
+void ff_put_vc1_mspel_mc ## hmode ## vmode ## _lasx(uint8_t *dst, \
+ const uint8_t *src, \
+ ptrdiff_t stride, int rnd); \
+void ff_put_vc1_mspel_mc ## hmode ## vmode ## _16_lasx(uint8_t *dst, \
+ const uint8_t *src, \
+ ptrdiff_t stride, int rnd);
+
+FF_PUT_VC1_MSPEL_MC_LASX(1, 1);
+FF_PUT_VC1_MSPEL_MC_LASX(1, 2);
+FF_PUT_VC1_MSPEL_MC_LASX(1, 3);
+
+FF_PUT_VC1_MSPEL_MC_LASX(2, 1);
+FF_PUT_VC1_MSPEL_MC_LASX(2, 2);
+FF_PUT_VC1_MSPEL_MC_LASX(2, 3);
+
+FF_PUT_VC1_MSPEL_MC_LASX(3, 1);
+FF_PUT_VC1_MSPEL_MC_LASX(3, 2);
+FF_PUT_VC1_MSPEL_MC_LASX(3, 3);
+
+#define FF_PUT_VC1_MSPEL_MC_V_LASX(vmode) \
+void ff_put_vc1_mspel_mc0 ## vmode ## _16_lasx(uint8_t *dst, \
+ const uint8_t *src, \
+ ptrdiff_t stride, int rnd);
+
+FF_PUT_VC1_MSPEL_MC_V_LASX(1);
+FF_PUT_VC1_MSPEL_MC_V_LASX(2);
+FF_PUT_VC1_MSPEL_MC_V_LASX(3);
+
+#define FF_PUT_VC1_MSPEL_MC_H_LASX(hmode) \
+void ff_put_vc1_mspel_mc ## hmode ## 0_16_lasx(uint8_t *dst, \
+ const uint8_t *src, \
+ ptrdiff_t stride, int rnd);
+
+FF_PUT_VC1_MSPEL_MC_H_LASX(1);
+FF_PUT_VC1_MSPEL_MC_H_LASX(2);
+FF_PUT_VC1_MSPEL_MC_H_LASX(3);
+
+void ff_put_no_rnd_vc1_chroma_mc8_lasx(uint8_t *dst /* align 8 */,
+ uint8_t *src /* align 1 */,
+ ptrdiff_t stride, int h, int x, int y);
+
+#endif /* AVCODEC_LOONGARCH_VC1DSP_LOONGARCH_H */
diff --git a/libavcodec/vc1dsp.c b/libavcodec/vc1dsp.c
index c25a6f3adf..04fc8b12f3 100644
--- a/libavcodec/vc1dsp.c
+++ b/libavcodec/vc1dsp.c
@@ -1039,4 +1039,6 @@ av_cold void ff_vc1dsp_init(VC1DSPContext *dsp)
ff_vc1dsp_init_x86(dsp);
if (ARCH_MIPS)
ff_vc1dsp_init_mips(dsp);
+ if (ARCH_LOONGARCH)
+ ff_vc1dsp_init_loongarch(dsp);
}
diff --git a/libavcodec/vc1dsp.h b/libavcodec/vc1dsp.h
index 75db62b1b4..c6443acb20 100644
--- a/libavcodec/vc1dsp.h
+++ b/libavcodec/vc1dsp.h
@@ -88,5 +88,6 @@ void ff_vc1dsp_init_arm(VC1DSPContext* dsp);
void ff_vc1dsp_init_ppc(VC1DSPContext *c);
void ff_vc1dsp_init_x86(VC1DSPContext* dsp);
void ff_vc1dsp_init_mips(VC1DSPContext* dsp);
+void ff_vc1dsp_init_loongarch(VC1DSPContext* dsp);
#endif /* AVCODEC_VC1DSP_H */
--
2.20.1
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [FFmpeg-devel] Optimize VP8,VP9,WMV3 decoding for loongarch.
2021-12-18 14:27 [FFmpeg-devel] Optimize VP8,VP9,WMV3 decoding for loongarch Hao Chen
` (3 preceding siblings ...)
2021-12-18 14:27 ` [FFmpeg-devel] [PATCH 4/4] avcodec: [loongarch] Optimize vc1dsp with LASX Hao Chen
@ 2021-12-20 8:37 ` Shiyou Yin
2021-12-23 11:33 ` Michael Niedermayer
4 siblings, 1 reply; 10+ messages in thread
From: Shiyou Yin @ 2021-12-20 8:37 UTC (permalink / raw)
To: FFmpeg development discussions and patches
> 2021年12月18日 下午10:27,Hao Chen <chenhao@loongson.cn> 写道:
>
> ./ffmpeg -i ../9_vp8_1080p_30fps_2Mbps.webm -f rawvideo -y /dev/null -an
> before: 210fps
> after : 585fps
> ffmpeg -i ../10_vp9_1080p_30fps_3Mbps.webm -f rawvideo -y /dev/null -an
> before:170fps
> after :294fps
> ./ffmpeg -i 11_wmv3_720p_24fps_7Mbps.wmv -f rawvideo -y /dev/null -an
> before:131fps
> after :229fps
>
> [PATCH 1/4] avcodec: [loongarch] Optimize vp8_lpf/mc with LSX.
> [PATCH 2/4] avcodec: [loongarch] Optimize vp9_mc/intra with LSX.
> [PATCH 3/4] avcodec: [loongarch] Optimize vp9_lpf/idct with LSX.
> [PATCH 4/4] avcodec: [loongarch] Optimize vc1dsp with LASX.
>
LGTM
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [FFmpeg-devel] Optimize VP8,VP9,WMV3 decoding for loongarch.
2021-12-20 8:37 ` [FFmpeg-devel] Optimize VP8,VP9,WMV3 decoding for loongarch Shiyou Yin
@ 2021-12-23 11:33 ` Michael Niedermayer
0 siblings, 0 replies; 10+ messages in thread
From: Michael Niedermayer @ 2021-12-23 11:33 UTC (permalink / raw)
To: FFmpeg development discussions and patches
[-- Attachment #1.1: Type: text/plain, Size: 995 bytes --]
On Mon, Dec 20, 2021 at 04:37:35PM +0800, Shiyou Yin wrote:
>
>
> > 2021年12月18日 下午10:27,Hao Chen <chenhao@loongson.cn> 写道:
> >
> > ./ffmpeg -i ../9_vp8_1080p_30fps_2Mbps.webm -f rawvideo -y /dev/null -an
> > before: 210fps
> > after : 585fps
> > ffmpeg -i ../10_vp9_1080p_30fps_3Mbps.webm -f rawvideo -y /dev/null -an
> > before:170fps
> > after :294fps
> > ./ffmpeg -i 11_wmv3_720p_24fps_7Mbps.wmv -f rawvideo -y /dev/null -an
> > before:131fps
> > after :229fps
> >
> > [PATCH 1/4] avcodec: [loongarch] Optimize vp8_lpf/mc with LSX.
> > [PATCH 2/4] avcodec: [loongarch] Optimize vp9_mc/intra with LSX.
> > [PATCH 3/4] avcodec: [loongarch] Optimize vp9_lpf/idct with LSX.
> > [PATCH 4/4] avcodec: [loongarch] Optimize vc1dsp with LASX.
> >
> LGTM
will apply patchset
thx
[...]
--
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB
The misfortune of the wise is better than the prosperity of the fool.
-- Epicurus
[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 195 bytes --]
[-- Attachment #2: Type: text/plain, Size: 251 bytes --]
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 10+ messages in thread