* [FFmpeg-devel] [PATCH] x86/tx_float: implement inverse MDCT AVX2 assembly
@ 2022-09-01 21:47 Lynne
[not found] ` <NAv0PJm--3-2@lynne.ee-NAv0T7a----2>
0 siblings, 1 reply; 4+ messages in thread
From: Lynne @ 2022-09-01 21:47 UTC (permalink / raw)
To: Ffmpeg Devel
[-- Attachment #1: Type: text/plain, Size: 1448 bytes --]
This commit implements an iMDCT in pure assembly.
This is capable of processing any mod-8 transforms, rather than just
power of two, but since power of two is all we have assembly for
currently, that's what's supported.
It would really benefit if we could somehow use the C code to decide
which function to jump into, but exposing function labels from assebly
into C is anything but easy.
The post-transform loop could probably be improved.
This was somewhat annoying to write, as we must support arbitrary
strides during runtime. There's a fast branch for stride == 4 bytes
and a slower one which uses vgatherdps.
Benchmarks for stride == 4 for old (av_imdct_half) vs new (av_tx):
128pt:
2791 decicycles in av_tx (imdct),16775675 runs, 1541 skips
3024 decicycles in av_imdct_half,16776779 runs, 437 skips
256pt:
5055 decicycles in av_tx (imdct), 2096602 runs, 550 skips
5324 decicycles in av_imdct_half, 2097046 runs, 106 skips
512pt:
9922 decicycles in av_tx (imdct), 2096983 runs, 169 skips
10390 decicycles in av_imdct_half, 2097002 runs, 150 skips
1024pt:
20482 decicycles in av_tx (imdct), 2097089 runs, 63 skips
20662 decicycles in av_imdct_half, 2097115 runs, 37 skips
Patch attached.
[-- Attachment #2: 0001-x86-tx_float-implement-inverse-MDCT-AVX2-assembly.patch --]
[-- Type: text/x-diff, Size: 11995 bytes --]
From 724dc202806a34995694f734ea20fc92c365ef6a Mon Sep 17 00:00:00 2001
From: Lynne <dev@lynne.ee>
Date: Thu, 1 Sep 2022 23:26:29 +0200
Subject: [PATCH] x86/tx_float: implement inverse MDCT AVX2 assembly
This commit implements an iMDCT in pure assembly.
This is capable of processing any mod-8 transforms, rather than just
power of two, but since power of two is all we have assembly for
currently, that's what's supported.
It would really benefit if we could somehow use the C code to decide
which function to jump into, but exposing function labels from assebly
into C is anything but easy.
The post-transform loop could probably be improved.
This was somewhat annoying to write, as we must support arbitrary
strides during runtime. There's a fast branch for stride == 4 bytes
and a slower one which uses vgatherdps.
Benchmarks for stride == 4 for old (av_imdct_half) vs new (av_tx):
128pt:
2791 decicycles in av_tx (imdct),16775675 runs, 1541 skips
3024 decicycles in av_imdct_half,16776779 runs, 437 skips
256pt:
5055 decicycles in av_tx (imdct), 2096602 runs, 550 skips
5324 decicycles in av_imdct_half, 2097046 runs, 106 skips
512pt:
9922 decicycles in av_tx (imdct), 2096983 runs, 169 skips
10390 decicycles in av_imdct_half, 2097002 runs, 150 skips
1024pt:
20482 decicycles in av_tx (imdct), 2097089 runs, 63 skips
20662 decicycles in av_imdct_half, 2097115 runs, 37 skips
---
libavutil/tx.c | 19 ++--
libavutil/tx_priv.h | 8 +-
libavutil/x86/tx_float.asm | 168 +++++++++++++++++++++++++++++++++-
libavutil/x86/tx_float_init.c | 29 +++++-
4 files changed, 209 insertions(+), 15 deletions(-)
diff --git a/libavutil/tx.c b/libavutil/tx.c
index 28e49a5d41..01f9cb7ea0 100644
--- a/libavutil/tx.c
+++ b/libavutil/tx.c
@@ -206,23 +206,24 @@ static void parity_revtab_generator(int *revtab, int n, int inv, int offset,
1, 1, len >> 1, basis, dual_stride, inv_lookup);
}
-int ff_tx_gen_split_radix_parity_revtab(AVTXContext *s, int invert_lookup,
- int basis, int dual_stride)
+int ff_tx_gen_split_radix_parity_revtab(AVTXContext *s, int len, int inv,
+ int inv_lookup, int basis, int dual_stride)
{
- int len = s->len;
- int inv = s->inv;
-
- if (!(s->map = av_mallocz(len*sizeof(*s->map))))
- return AVERROR(ENOMEM);
-
basis >>= 1;
if (len < basis)
return AVERROR(EINVAL);
+ if (!(s->map = av_mallocz((inv_lookup == -1 ? 2 : 1)*len*sizeof(*s->map))))
+ return AVERROR(ENOMEM);
+
av_assert0(!dual_stride || !(dual_stride & (dual_stride - 1)));
av_assert0(dual_stride <= basis);
+
parity_revtab_generator(s->map, len, inv, 0, 0, 0, len,
- basis, dual_stride, invert_lookup);
+ basis, dual_stride, inv_lookup != 0);
+ if (inv_lookup == -1)
+ parity_revtab_generator(s->map + len, len, inv, 0, 0, 0, len,
+ basis, dual_stride, 0);
return 0;
}
diff --git a/libavutil/tx_priv.h b/libavutil/tx_priv.h
index e38490bd56..1688b69509 100644
--- a/libavutil/tx_priv.h
+++ b/libavutil/tx_priv.h
@@ -287,9 +287,13 @@ int ff_tx_gen_ptwo_inplace_revtab_idx(AVTXContext *s);
* functions in AVX mode.
*
* If length is smaller than basis/2 this function will not do anything.
+ *
+ * If inv_lookup is set to 1, it will flip the lookup from out[map[i]] = src[i]
+ * to out[i] = src[map[i]]. If set to -1, will generate 2 maps, the first one
+ * flipped, the second one regular.
*/
-int ff_tx_gen_split_radix_parity_revtab(AVTXContext *s, int invert_lookup,
- int basis, int dual_stride);
+int ff_tx_gen_split_radix_parity_revtab(AVTXContext *s, int len, int inv,
+ int inv_lookup, int basis, int dual_stride);
/* Typed init function to initialize shared tables. Will initialize all tables
* for all factors of a length. */
diff --git a/libavutil/x86/tx_float.asm b/libavutil/x86/tx_float.asm
index 191af7d68f..567f0dc807 100644
--- a/libavutil/x86/tx_float.asm
+++ b/libavutil/x86/tx_float.asm
@@ -925,9 +925,9 @@ ALIGN 16
%macro FFT_SPLIT_RADIX_FN 3
INIT_YMM %1
-cglobal fft_sr_ %+ %2, 4, 8, 16, 272, lut, out, in, len, tmp, itab, rtab, tgt
- movsxd lenq, dword [lutq + AVTXContext.len]
- mov lutq, [lutq + AVTXContext.map]
+cglobal fft_sr_ %+ %2, 4, 9, 16, 272, ctx, out, in, tmp, len, lut, itab, rtab, tgt
+ movsxd lenq, dword [ctxq + AVTXContext.len]
+ mov lutq, [ctxq + AVTXContext.map]
mov tgtq, lenq
; Bottom-most/32-point transform ===============================================
@@ -1289,3 +1289,165 @@ FFT_SPLIT_RADIX_FN avx2, float, 0
FFT_SPLIT_RADIX_FN avx2, ns_float, 1
%endif
%endif
+
+%macro IMDCT_FN 1
+INIT_YMM %1
+cglobal mdct_sr_inv_float, 4, 9, 16, 272, ctx, out, in, stride, len, lut, exp, t1, t2
+ movsxd lenq, dword [ctxq + AVTXContext.len]
+ mov lutq, [ctxq + AVTXContext.map]
+ mov expq, [ctxq + AVTXContext.exp]
+
+ lea t1d, [lend*2 - 1]
+ imul t1d, strided
+ lea t1q, [inq + t1q]
+
+ cmp strideq, 4
+ je .stride4
+
+ mov t2d, strided
+ movd xmm4, strided
+ neg t2d
+ movd xmm5, t2d
+ SPLATD xmm4
+ SPLATD xmm5
+ vperm2f128 m4, m4, m4, 0x00 ; +stride splatted
+ vperm2f128 m5, m5, m5, 0x00 ; -stride splatted
+
+ mov t2q, outq
+
+.stridex_pre:
+ mova m2, [lutq] ; load LUT indices
+ pcmpeqd m0, m0 ; zero out a register
+ pmulld m3, m2, m4 ; multiply by +stride
+ pmulld m2, m5 ; multiply by -stride
+ movaps m1, m0
+ vgatherdps m6, [inq + 2*m3], m0 ; im
+ vgatherdps m7, [t1q + 2*m2], m1 ; re
+
+ movaps m8, [expq + 0*mmsize] ; tab 1
+ movaps m9, [expq + 1*mmsize] ; tab 2
+
+ unpcklps m0, m7, m6 ; re, im, re, im
+ unpckhps m1, m7, m6 ; re, im, re, im
+
+ vperm2f128 m2, m1, m0, 0x02 ; output order
+ vperm2f128 m3, m1, m0, 0x13 ; output order
+
+ movshdup m10, m8 ; tab 1 imim
+ movshdup m11, m9 ; tab 2 imim
+ movsldup m12, m8 ; tab 1 rere
+ movsldup m13, m9 ; tab 2 rere
+
+ mulps m10, m2 ; 1 reim * imim
+ mulps m11, m3 ; 2 reim * imim
+
+ shufps m10, m10, q2301
+ shufps m11, m11, q2301
+
+ fmaddsubps m10, m12, m2, m10
+ fmaddsubps m11, m13, m3, m11
+
+ mova [outq + 0*mmsize], m10
+ mova [outq + 1*mmsize], m11
+
+ add expq, mmsize*2
+ add lutq, mmsize
+ add outq, mmsize*2
+ sub lenq, mmsize/4
+ jg .stridex_pre
+ mov outq, t2q ; restore output
+ jmp .transform
+
+.stride4:
+ lea expq, [expq + lenq*8]
+ lea lutq, [lutq + lenq*4]
+ lea t1q, [t1q + strideq - mmsize]
+
+.stride4_pre:
+ movsldup m1, [inq] ; im im, im im
+ movshdup m0, [t1q] ; re re, re re
+
+ vperm2f128 m0, m0, 0x01 ; flip
+ shufpd m0, m0, 101b
+
+ movaps m2, [expq] ; tab
+
+ mulps m0, m2 ; re re * tab.reim
+ mulps m1, m2 ; im im * tab.reim
+
+ shufps m1, m1, m1, q2301
+ addsubps m0, m1
+
+ vextractf128 xm3, m0, 1
+
+ ; scatter
+ movsxd strideq, dword [lutq + 0*4]
+ movlps [outq + strideq*8], xm0
+ movsxd strideq, dword [lutq + 1*4]
+ movhps [outq + strideq*8], xm0
+ movsxd strideq, dword [lutq + 2*4]
+ movlps [outq + strideq*8], xm3
+ movsxd strideq, dword [lutq + 3*4]
+ movhps [outq + strideq*8], xm3
+
+ add lutq, mmsize/2
+ add expq, mmsize
+ add inq, mmsize
+ sub t1q, mmsize
+ sub lenq, mmsize/8
+ jg .stride4_pre
+
+.transform:
+ mov inq, outq ; in-place transform
+ call ff_tx_fft_sr_ns_float_avx2 ; call the FFT
+
+ movsxd lenq, dword [ctxq + AVTXContext.len]
+ mov expq, [ctxq + AVTXContext.exp]
+ lea expq, [expq + lenq*8]
+
+ lea t1q, [lenq*4] ; high
+ lea t2q, [lenq*4 - mmsize] ; low
+
+ neg lenq
+ lea outq, [inq + lenq*8]
+
+.post:
+ movaps m2, [expq + t1q] ; tab h
+ movaps m3, [expq + t2q] ; tab l
+ movaps m0, [outq + t1q] ; in h
+ movaps m1, [outq + t2q] ; in l
+
+ movshdup m4, m2 ; tab h imim
+ movshdup m5, m3 ; tab l imim
+ movsldup m6, m2 ; tab h rere
+ movsldup m7, m3 ; tab l rere
+
+ shufps m2, m0, m0, q2301 ; in h imre
+ shufps m3, m1, m1, q2301 ; in l imre
+
+ mulps m6, m0
+ mulps m7, m1
+
+ fmaddsubps m4, m4, m2, m6
+ fmaddsubps m5, m5, m3, m7
+
+ vperm2f128 m0, m4, m4, 0x01 ; flip
+ vperm2f128 m1, m5, m5, 0x01 ; flip
+
+ shufps m2, m0, m0, q1230
+ shufps m3, m1, m1, q1230
+
+ blendps m0, m3, m4, 01010101b
+ blendps m1, m2, m5, 01010101b
+
+ movaps [outq + t1q], m0
+ movaps [outq + t2q], m1
+
+ add t1q, mmsize
+ sub t2q, mmsize
+ jge .post
+
+ RET
+%endmacro
+
+IMDCT_FN avx2
diff --git a/libavutil/x86/tx_float_init.c b/libavutil/x86/tx_float_init.c
index 5db0b57d13..785ddb2343 100644
--- a/libavutil/x86/tx_float_init.c
+++ b/libavutil/x86/tx_float_init.c
@@ -43,6 +43,8 @@ TX_DECL_FN(fft_sr_ns, fma3)
TX_DECL_FN(fft_sr, avx2)
TX_DECL_FN(fft_sr_ns, avx2)
+TX_DECL_FN(mdct_sr_inv, avx2)
+
#define DECL_INIT_FN(basis, interleave) \
static av_cold int b ##basis## _i ##interleave(AVTXContext *s, \
const FFTXCodelet *cd, \
@@ -56,13 +58,35 @@ static av_cold int b ##basis## _i ##interleave(AVTXContext *s, \
if (cd->max_len == 2) \
return ff_tx_gen_ptwo_revtab(s, inv_lookup); \
else \
- return ff_tx_gen_split_radix_parity_revtab(s, inv_lookup, \
+ return ff_tx_gen_split_radix_parity_revtab(s, len, inv, inv_lookup, \
basis, interleave); \
}
DECL_INIT_FN(8, 0)
DECL_INIT_FN(8, 2)
+static av_cold int m_inv_init(AVTXContext *s, const FFTXCodelet *cd,
+ uint64_t flags, FFTXCodeletOptions *opts,
+ int len, int inv, const void *scale)
+{
+ int ret;
+
+ s->scale_d = *((SCALE_TYPE *)scale);
+ s->scale_f = s->scale_d;
+
+ ff_tx_init_tabs_float(len >> 1);
+
+ if ((ret = ff_tx_gen_split_radix_parity_revtab(s, len >> 1, 1, -1, 8, 2)) < 0)
+ return ret;
+
+ if ((ret = ff_tx_mdct_gen_exp_float(s, s->map)))
+ return ret;
+
+ s->len >>= 1;
+
+ return 0;
+}
+
const FFTXCodelet * const ff_tx_codelet_list_float_x86[] = {
TX_DEF(fft2, FFT, 2, 2, 2, 0, 128, NULL, sse3, SSE3, AV_TX_INPLACE, 0),
TX_DEF(fft2, FFT, 2, 2, 2, 0, 192, b8_i0, sse3, SSE3, AV_TX_INPLACE | FF_TX_PRESHUFFLE, 0),
@@ -81,6 +105,9 @@ const FFTXCodelet * const ff_tx_codelet_list_float_x86[] = {
TX_DEF(fft16_ns, FFT, 16, 16, 2, 0, 352, b8_i2, fma3, FMA3, AV_TX_INPLACE | FF_TX_PRESHUFFLE,
AV_CPU_FLAG_AVXSLOW),
+ TX_DEF(mdct_sr_inv, MDCT, 128, TX_LEN_UNLIMITED, 2, TX_FACTOR_ANY, 384, m_inv_init, avx2, AVX2,
+ FF_TX_INVERSE_ONLY, AV_CPU_FLAG_AVXSLOW | AV_CPU_FLAG_SLOW_GATHER),
+
#if ARCH_X86_64
TX_DEF(fft32, FFT, 32, 32, 2, 0, 256, b8_i2, avx, AVX, AV_TX_INPLACE, AV_CPU_FLAG_AVXSLOW),
TX_DEF(fft32_ns, FFT, 32, 32, 2, 0, 320, b8_i2, avx, AVX, AV_TX_INPLACE | FF_TX_PRESHUFFLE,
--
2.37.2.609.g9ff673ca1a
[-- Attachment #3: Type: text/plain, Size: 251 bytes --]
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 4+ messages in thread
* [FFmpeg-devel] [PATCH v2] x86/tx_float: implement inverse MDCT AVX2 assembly
[not found] ` <NAv0PJm--3-2@lynne.ee-NAv0T7a----2>
@ 2022-09-02 5:49 ` Lynne
[not found] ` <NAwjob8--3-2@lynne.ee-NAwjsed----2>
1 sibling, 0 replies; 4+ messages in thread
From: Lynne @ 2022-09-02 5:49 UTC (permalink / raw)
To: FFmpeg development discussions and patches
[-- Attachment #1: Type: text/plain, Size: 1568 bytes --]
Version 2 notes: halved the amount of loads and loops for the
pre-transform loop by exploiting the symmetry.
This commit implements an iMDCT in pure assembly.
This is capable of processing any mod-8 transforms, rather than just
power of two, but since power of two is all we have assembly for
currently, that's what's supported.
It would really benefit if we could somehow use the C code to decide
which function to jump into, but exposing function labels from assebly
into C is anything but easy.
The post-transform loop could probably be improved.
This was somewhat annoying to write, as we must support arbitrary
strides during runtime. There's a fast branch for stride == 4 bytes
and a slower one which uses vgatherdps.
Zen 3 benchmarks for stride == 4 for old (av_imdct_half) vs new (av_tx):
128pt:
2815 decicycles in av_tx (imdct),16776766 runs, 450 skips
3097 decicycles in av_imdct_half,16776745 runs, 471 skips
256pt:
4931 decicycles in av_tx (imdct), 4193127 runs, 1177 skips
5401 decicycles in av_imdct_half, 2097058 runs, 94 skips
512pt:
9764 decicycles in av_tx (imdct), 4193929 runs, 375 skips
10690 decicycles in av_imdct_half, 2096948 runs, 204 skips
1024pt:
20113 decicycles in av_tx (imdct), 4194202 runs, 102 skips
21258 decicycles in av_imdct_half, 2097147 runs, 5 skips
Patch attached.
[-- Attachment #2: v2-0001-x86-tx_float-implement-inverse-MDCT-AVX2-assembly.patch --]
[-- Type: text/x-diff, Size: 12880 bytes --]
From f882b039bd8875a8d392ebfe320ac46f9b3d083f Mon Sep 17 00:00:00 2001
From: Lynne <dev@lynne.ee>
Date: Thu, 1 Sep 2022 23:26:29 +0200
Subject: [PATCH v2] x86/tx_float: implement inverse MDCT AVX2 assembly
This commit implements an iMDCT in pure assembly.
This is capable of processing any mod-8 transforms, rather than just
power of two, but since power of two is all we have assembly for
currently, that's what's supported.
It would really benefit if we could somehow use the C code to decide
which function to jump into, but exposing function labels from assebly
into C is anything but easy.
The post-transform loop could probably be improved.
This was somewhat annoying to write, as we must support arbitrary
strides during runtime. There's a fast branch for stride == 4 bytes
and a slower one which uses vgatherdps.
Zen 3 benchmarks for stride == 4 for old (av_imdct_half) vs new (av_tx):
128pt:
2815 decicycles in av_tx (imdct),16776766 runs, 450 skips
3097 decicycles in av_imdct_half,16776745 runs, 471 skips
256pt:
4931 decicycles in av_tx (imdct), 4193127 runs, 1177 skips
5401 decicycles in av_imdct_half, 2097058 runs, 94 skips
512pt:
9764 decicycles in av_tx (imdct), 4193929 runs, 375 skips
10690 decicycles in av_imdct_half, 2096948 runs, 204 skips
1024pt:
20113 decicycles in av_tx (imdct), 4194202 runs, 102 skips
21258 decicycles in av_imdct_half, 2097147 runs, 5 skips
---
libavutil/tx.c | 19 ++--
libavutil/tx_priv.h | 8 +-
libavutil/x86/tx_float.asm | 193 +++++++++++++++++++++++++++++++++-
libavutil/x86/tx_float_init.c | 29 ++++-
4 files changed, 234 insertions(+), 15 deletions(-)
diff --git a/libavutil/tx.c b/libavutil/tx.c
index 28e49a5d41..01f9cb7ea0 100644
--- a/libavutil/tx.c
+++ b/libavutil/tx.c
@@ -206,23 +206,24 @@ static void parity_revtab_generator(int *revtab, int n, int inv, int offset,
1, 1, len >> 1, basis, dual_stride, inv_lookup);
}
-int ff_tx_gen_split_radix_parity_revtab(AVTXContext *s, int invert_lookup,
- int basis, int dual_stride)
+int ff_tx_gen_split_radix_parity_revtab(AVTXContext *s, int len, int inv,
+ int inv_lookup, int basis, int dual_stride)
{
- int len = s->len;
- int inv = s->inv;
-
- if (!(s->map = av_mallocz(len*sizeof(*s->map))))
- return AVERROR(ENOMEM);
-
basis >>= 1;
if (len < basis)
return AVERROR(EINVAL);
+ if (!(s->map = av_mallocz((inv_lookup == -1 ? 2 : 1)*len*sizeof(*s->map))))
+ return AVERROR(ENOMEM);
+
av_assert0(!dual_stride || !(dual_stride & (dual_stride - 1)));
av_assert0(dual_stride <= basis);
+
parity_revtab_generator(s->map, len, inv, 0, 0, 0, len,
- basis, dual_stride, invert_lookup);
+ basis, dual_stride, inv_lookup != 0);
+ if (inv_lookup == -1)
+ parity_revtab_generator(s->map + len, len, inv, 0, 0, 0, len,
+ basis, dual_stride, 0);
return 0;
}
diff --git a/libavutil/tx_priv.h b/libavutil/tx_priv.h
index e38490bd56..1688b69509 100644
--- a/libavutil/tx_priv.h
+++ b/libavutil/tx_priv.h
@@ -287,9 +287,13 @@ int ff_tx_gen_ptwo_inplace_revtab_idx(AVTXContext *s);
* functions in AVX mode.
*
* If length is smaller than basis/2 this function will not do anything.
+ *
+ * If inv_lookup is set to 1, it will flip the lookup from out[map[i]] = src[i]
+ * to out[i] = src[map[i]]. If set to -1, will generate 2 maps, the first one
+ * flipped, the second one regular.
*/
-int ff_tx_gen_split_radix_parity_revtab(AVTXContext *s, int invert_lookup,
- int basis, int dual_stride);
+int ff_tx_gen_split_radix_parity_revtab(AVTXContext *s, int len, int inv,
+ int inv_lookup, int basis, int dual_stride);
/* Typed init function to initialize shared tables. Will initialize all tables
* for all factors of a length. */
diff --git a/libavutil/x86/tx_float.asm b/libavutil/x86/tx_float.asm
index 191af7d68f..ddc949ea3d 100644
--- a/libavutil/x86/tx_float.asm
+++ b/libavutil/x86/tx_float.asm
@@ -925,9 +925,9 @@ ALIGN 16
%macro FFT_SPLIT_RADIX_FN 3
INIT_YMM %1
-cglobal fft_sr_ %+ %2, 4, 8, 16, 272, lut, out, in, len, tmp, itab, rtab, tgt
- movsxd lenq, dword [lutq + AVTXContext.len]
- mov lutq, [lutq + AVTXContext.map]
+cglobal fft_sr_ %+ %2, 4, 9, 16, 272, ctx, out, in, tmp, len, lut, itab, rtab, tgt
+ movsxd lenq, dword [ctxq + AVTXContext.len]
+ mov lutq, [ctxq + AVTXContext.map]
mov tgtq, lenq
; Bottom-most/32-point transform ===============================================
@@ -1289,3 +1289,190 @@ FFT_SPLIT_RADIX_FN avx2, float, 0
FFT_SPLIT_RADIX_FN avx2, ns_float, 1
%endif
%endif
+
+%macro IMDCT_FN 1
+INIT_YMM %1
+cglobal mdct_sr_inv_float, 4, 9, 16, 272, ctx, out, in, stride, len, lut, exp, t1, t2
+ movsxd lenq, dword [ctxq + AVTXContext.len]
+ mov lutq, [ctxq + AVTXContext.map]
+ mov expq, [ctxq + AVTXContext.exp]
+
+ lea t1d, [lend*2 - 1]
+ imul t1d, strided
+ lea t1q, [inq + t1q]
+
+ cmp strideq, 4
+ je .stride4
+
+ mov t2d, strided
+ movd xmm4, strided
+ neg t2d
+ movd xmm5, t2d
+ SPLATD xmm4
+ SPLATD xmm5
+ vperm2f128 m4, m4, m4, 0x00 ; +stride splatted
+ vperm2f128 m5, m5, m5, 0x00 ; -stride splatted
+
+ mov t2q, outq
+
+.stridex_pre:
+ mova m2, [lutq] ; load LUT indices
+ pcmpeqd m0, m0 ; zero out a register
+ pmulld m3, m2, m4 ; multiply by +stride
+ pmulld m2, m5 ; multiply by -stride
+ movaps m1, m0
+ vgatherdps m6, [inq + 2*m3], m0 ; im
+ vgatherdps m7, [t1q + 2*m2], m1 ; re
+
+ movaps m8, [expq + 0*mmsize] ; tab 1
+ movaps m9, [expq + 1*mmsize] ; tab 2
+
+ unpcklps m0, m7, m6 ; re, im, re, im
+ unpckhps m1, m7, m6 ; re, im, re, im
+
+ vperm2f128 m2, m1, m0, 0x02 ; output order
+ vperm2f128 m3, m1, m0, 0x13 ; output order
+
+ movshdup m10, m8 ; tab 1 imim
+ movshdup m11, m9 ; tab 2 imim
+ movsldup m12, m8 ; tab 1 rere
+ movsldup m13, m9 ; tab 2 rere
+
+ mulps m10, m2 ; 1 reim * imim
+ mulps m11, m3 ; 2 reim * imim
+
+ shufps m10, m10, q2301
+ shufps m11, m11, q2301
+
+ fmaddsubps m10, m12, m2, m10
+ fmaddsubps m11, m13, m3, m11
+
+ mova [outq + 0*mmsize], m10
+ mova [outq + 1*mmsize], m11
+
+ add expq, mmsize*2
+ add lutq, mmsize
+ add outq, mmsize*2
+ sub lenq, mmsize/4
+ jg .stridex_pre
+ mov outq, t2q ; restore output
+ jmp .transform
+
+.stride4:
+ lea expq, [expq + lenq*8]
+ lea lutq, [lutq + lenq*4]
+ lea t1q, [t1q + strideq - mmsize]
+ lea t2q, [lenq*4 - mmsize/2]
+
+.stride4_pre:
+ movaps m4, [inq]
+ movaps m3, [t1q]
+
+ movsldup m1, m4 ; im im, im im
+ movshdup m0, m3 ; re re, re re
+ movshdup m4, m4 ; re re, re re (2)
+ movsldup m3, m3 ; im im, im im (2)
+
+ vperm2f128 m0, m0, 0x01 ; flip
+ vperm2f128 m4, m4, 0x01 ; flip (2)
+ shufpd m0, m0, 101b
+ shufpd m4, m4, 101b
+
+ movaps m2, [expq] ; tab
+ movaps m5, [expq + 2*t2q] ; tab (2)
+
+ mulps m1, m2 ; im im * tab.reim
+ mulps m0, m2 ; re re * tab.reim
+ mulps m3, m5 ; im im * tab.reim (2)
+ mulps m4, m5 ; re re * tab.reim (2)
+
+ shufps m1, m1, m1, q2301
+ shufps m3, m3, m3, q2301
+ addsubps m0, m1
+ addsubps m4, m3
+
+ vextractf128 xm3, m0, 1
+ vextractf128 xm6, m4, 1
+
+ ; scatter
+ movsxd strideq, dword [lutq + 0*4]
+ movsxd lenq, dword [lutq + 1*4]
+ movlps [outq + strideq*8], xm0
+ movhps [outq + lenq*8], xm0
+
+ movsxd strideq, dword [lutq + 2*4]
+ movsxd lenq, dword [lutq + 3*4]
+ movlps [outq + strideq*8], xm3
+ movhps [outq + lenq*8], xm3
+
+ movsxd strideq, dword [lutq + 0*4 + t2q]
+ movsxd lenq, dword [lutq + 1*4 + t2q]
+ movlps [outq + strideq*8], xm4
+ movhps [outq + lenq*8], xm4
+
+ movsxd strideq, dword [lutq + 2*4 + t2q]
+ movsxd lenq, dword [lutq + 3*4 + t2q]
+ movlps [outq + strideq*8], xm6
+ movhps [outq + lenq*8], xm6
+
+ add lutq, mmsize/2
+ add expq, mmsize
+ add inq, mmsize
+ sub t1q, mmsize
+ sub t2q, mmsize
+ jg .stride4_pre
+
+.transform:
+ mov inq, outq ; in-place transform
+ call ff_tx_fft_sr_ns_float_avx2 ; call the FFT
+
+ movsxd lenq, dword [ctxq + AVTXContext.len]
+ mov expq, [ctxq + AVTXContext.exp]
+ lea expq, [expq + lenq*8]
+
+ lea t1q, [lenq*4] ; high
+ lea t2q, [lenq*4 - mmsize] ; low
+
+ neg lenq
+ lea outq, [inq + lenq*8]
+
+.post:
+ movaps m2, [expq + t1q] ; tab h
+ movaps m3, [expq + t2q] ; tab l
+ movaps m0, [outq + t1q] ; in h
+ movaps m1, [outq + t2q] ; in l
+
+ movshdup m4, m2 ; tab h imim
+ movshdup m5, m3 ; tab l imim
+ movsldup m6, m2 ; tab h rere
+ movsldup m7, m3 ; tab l rere
+
+ shufps m2, m0, m0, q2301 ; in h imre
+ shufps m3, m1, m1, q2301 ; in l imre
+
+ mulps m6, m0
+ mulps m7, m1
+
+ fmaddsubps m4, m4, m2, m6
+ fmaddsubps m5, m5, m3, m7
+
+ vperm2f128 m0, m4, m4, 0x01 ; flip
+ vperm2f128 m1, m5, m5, 0x01 ; flip
+
+ shufps m2, m0, m0, q1230
+ shufps m3, m1, m1, q1230
+
+ blendps m0, m3, m4, 01010101b
+ blendps m1, m2, m5, 01010101b
+
+ movaps [outq + t1q], m0
+ movaps [outq + t2q], m1
+
+ add t1q, mmsize
+ sub t2q, mmsize
+ jge .post
+
+ RET
+%endmacro
+
+IMDCT_FN avx2
diff --git a/libavutil/x86/tx_float_init.c b/libavutil/x86/tx_float_init.c
index 5db0b57d13..785ddb2343 100644
--- a/libavutil/x86/tx_float_init.c
+++ b/libavutil/x86/tx_float_init.c
@@ -43,6 +43,8 @@ TX_DECL_FN(fft_sr_ns, fma3)
TX_DECL_FN(fft_sr, avx2)
TX_DECL_FN(fft_sr_ns, avx2)
+TX_DECL_FN(mdct_sr_inv, avx2)
+
#define DECL_INIT_FN(basis, interleave) \
static av_cold int b ##basis## _i ##interleave(AVTXContext *s, \
const FFTXCodelet *cd, \
@@ -56,13 +58,35 @@ static av_cold int b ##basis## _i ##interleave(AVTXContext *s, \
if (cd->max_len == 2) \
return ff_tx_gen_ptwo_revtab(s, inv_lookup); \
else \
- return ff_tx_gen_split_radix_parity_revtab(s, inv_lookup, \
+ return ff_tx_gen_split_radix_parity_revtab(s, len, inv, inv_lookup, \
basis, interleave); \
}
DECL_INIT_FN(8, 0)
DECL_INIT_FN(8, 2)
+static av_cold int m_inv_init(AVTXContext *s, const FFTXCodelet *cd,
+ uint64_t flags, FFTXCodeletOptions *opts,
+ int len, int inv, const void *scale)
+{
+ int ret;
+
+ s->scale_d = *((SCALE_TYPE *)scale);
+ s->scale_f = s->scale_d;
+
+ ff_tx_init_tabs_float(len >> 1);
+
+ if ((ret = ff_tx_gen_split_radix_parity_revtab(s, len >> 1, 1, -1, 8, 2)) < 0)
+ return ret;
+
+ if ((ret = ff_tx_mdct_gen_exp_float(s, s->map)))
+ return ret;
+
+ s->len >>= 1;
+
+ return 0;
+}
+
const FFTXCodelet * const ff_tx_codelet_list_float_x86[] = {
TX_DEF(fft2, FFT, 2, 2, 2, 0, 128, NULL, sse3, SSE3, AV_TX_INPLACE, 0),
TX_DEF(fft2, FFT, 2, 2, 2, 0, 192, b8_i0, sse3, SSE3, AV_TX_INPLACE | FF_TX_PRESHUFFLE, 0),
@@ -81,6 +105,9 @@ const FFTXCodelet * const ff_tx_codelet_list_float_x86[] = {
TX_DEF(fft16_ns, FFT, 16, 16, 2, 0, 352, b8_i2, fma3, FMA3, AV_TX_INPLACE | FF_TX_PRESHUFFLE,
AV_CPU_FLAG_AVXSLOW),
+ TX_DEF(mdct_sr_inv, MDCT, 128, TX_LEN_UNLIMITED, 2, TX_FACTOR_ANY, 384, m_inv_init, avx2, AVX2,
+ FF_TX_INVERSE_ONLY, AV_CPU_FLAG_AVXSLOW | AV_CPU_FLAG_SLOW_GATHER),
+
#if ARCH_X86_64
TX_DEF(fft32, FFT, 32, 32, 2, 0, 256, b8_i2, avx, AVX, AV_TX_INPLACE, AV_CPU_FLAG_AVXSLOW),
TX_DEF(fft32_ns, FFT, 32, 32, 2, 0, 320, b8_i2, avx, AVX, AV_TX_INPLACE | FF_TX_PRESHUFFLE,
--
2.37.2.609.g9ff673ca1a
[-- Attachment #3: Type: text/plain, Size: 251 bytes --]
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [FFmpeg-devel] [PATCH v2] x86/tx_float: implement inverse MDCT AVX2 assembly
[not found] ` <NAwjob8--3-2@lynne.ee-NAwjsed----2>
@ 2022-09-02 5:55 ` Lynne
2022-09-02 14:03 ` Henrik Gramner
0 siblings, 1 reply; 4+ messages in thread
From: Lynne @ 2022-09-02 5:55 UTC (permalink / raw)
To: FFmpeg development discussions and patches
[-- Attachment #1: Type: text/plain, Size: 1735 bytes --]
Sep 2, 2022, 07:49 by dev@lynne.ee:
> Version 2 notes: halved the amount of loads and loops for the
> pre-transform loop by exploiting the symmetry.
>
> This commit implements an iMDCT in pure assembly.
>
> This is capable of processing any mod-8 transforms, rather than just
> power of two, but since power of two is all we have assembly for
> currently, that's what's supported.
> It would really benefit if we could somehow use the C code to decide
> which function to jump into, but exposing function labels from assebly
> into C is anything but easy.
> The post-transform loop could probably be improved.
>
> This was somewhat annoying to write, as we must support arbitrary
> strides during runtime. There's a fast branch for stride == 4 bytes
> and a slower one which uses vgatherdps.
>
> Zen 3 benchmarks for stride == 4 for old (av_imdct_half) vs new (av_tx):
>
> 128pt:
> 2815 decicycles in av_tx (imdct),16776766 runs, 450 skips
> 3097 decicycles in av_imdct_half,16776745 runs, 471 skips
>
> 256pt:
> 4931 decicycles in av_tx (imdct), 4193127 runs, 1177 skips
> 5401 decicycles in av_imdct_half, 2097058 runs, 94 skips
>
> 512pt:
> 9764 decicycles in av_tx (imdct), 4193929 runs, 375 skips
> 10690 decicycles in av_imdct_half, 2096948 runs, 204 skips
>
> 1024pt:
> 20113 decicycles in av_tx (imdct), 4194202 runs, 102 skips
> 21258 decicycles in av_imdct_half, 2097147 runs, 5 skips
>
> Patch attached.
>
Forgot to git add some minor reordering/fma changes.
W/e.
[-- Attachment #2: v2-0001-x86-tx_float-implement-inverse-MDCT-AVX2-assembly.patch --]
[-- Type: text/x-diff, Size: 12780 bytes --]
From d29fe57522a7bc26452da7198afbda440e6aba1c Mon Sep 17 00:00:00 2001
From: Lynne <dev@lynne.ee>
Date: Thu, 1 Sep 2022 23:26:29 +0200
Subject: [PATCH v2] x86/tx_float: implement inverse MDCT AVX2 assembly
This commit implements an iMDCT in pure assembly.
This is capable of processing any mod-8 transforms, rather than just
power of two, but since power of two is all we have assembly for
currently, that's what's supported.
It would really benefit if we could somehow use the C code to decide
which function to jump into, but exposing function labels from assebly
into C is anything but easy.
The post-transform loop could probably be improved.
This was somewhat annoying to write, as we must support arbitrary
strides during runtime. There's a fast branch for stride == 4 bytes
and a slower one which uses vgatherdps.
Zen 3 benchmarks for stride == 4 for old (av_imdct_half) vs new (av_tx):
128pt:
2815 decicycles in av_tx (imdct),16776766 runs, 450 skips
3097 decicycles in av_imdct_half,16776745 runs, 471 skips
256pt:
4931 decicycles in av_tx (imdct), 4193127 runs, 1177 skips
5401 decicycles in av_imdct_half, 2097058 runs, 94 skips
512pt:
9764 decicycles in av_tx (imdct), 4193929 runs, 375 skips
10690 decicycles in av_imdct_half, 2096948 runs, 204 skips
1024pt:
20113 decicycles in av_tx (imdct), 4194202 runs, 102 skips
21258 decicycles in av_imdct_half, 2097147 runs, 5 skips
---
libavutil/tx.c | 19 ++--
libavutil/tx_priv.h | 8 +-
libavutil/x86/tx_float.asm | 192 +++++++++++++++++++++++++++++++++-
libavutil/x86/tx_float_init.c | 29 ++++-
4 files changed, 233 insertions(+), 15 deletions(-)
diff --git a/libavutil/tx.c b/libavutil/tx.c
index 28e49a5d41..01f9cb7ea0 100644
--- a/libavutil/tx.c
+++ b/libavutil/tx.c
@@ -206,23 +206,24 @@ static void parity_revtab_generator(int *revtab, int n, int inv, int offset,
1, 1, len >> 1, basis, dual_stride, inv_lookup);
}
-int ff_tx_gen_split_radix_parity_revtab(AVTXContext *s, int invert_lookup,
- int basis, int dual_stride)
+int ff_tx_gen_split_radix_parity_revtab(AVTXContext *s, int len, int inv,
+ int inv_lookup, int basis, int dual_stride)
{
- int len = s->len;
- int inv = s->inv;
-
- if (!(s->map = av_mallocz(len*sizeof(*s->map))))
- return AVERROR(ENOMEM);
-
basis >>= 1;
if (len < basis)
return AVERROR(EINVAL);
+ if (!(s->map = av_mallocz((inv_lookup == -1 ? 2 : 1)*len*sizeof(*s->map))))
+ return AVERROR(ENOMEM);
+
av_assert0(!dual_stride || !(dual_stride & (dual_stride - 1)));
av_assert0(dual_stride <= basis);
+
parity_revtab_generator(s->map, len, inv, 0, 0, 0, len,
- basis, dual_stride, invert_lookup);
+ basis, dual_stride, inv_lookup != 0);
+ if (inv_lookup == -1)
+ parity_revtab_generator(s->map + len, len, inv, 0, 0, 0, len,
+ basis, dual_stride, 0);
return 0;
}
diff --git a/libavutil/tx_priv.h b/libavutil/tx_priv.h
index e38490bd56..1688b69509 100644
--- a/libavutil/tx_priv.h
+++ b/libavutil/tx_priv.h
@@ -287,9 +287,13 @@ int ff_tx_gen_ptwo_inplace_revtab_idx(AVTXContext *s);
* functions in AVX mode.
*
* If length is smaller than basis/2 this function will not do anything.
+ *
+ * If inv_lookup is set to 1, it will flip the lookup from out[map[i]] = src[i]
+ * to out[i] = src[map[i]]. If set to -1, will generate 2 maps, the first one
+ * flipped, the second one regular.
*/
-int ff_tx_gen_split_radix_parity_revtab(AVTXContext *s, int invert_lookup,
- int basis, int dual_stride);
+int ff_tx_gen_split_radix_parity_revtab(AVTXContext *s, int len, int inv,
+ int inv_lookup, int basis, int dual_stride);
/* Typed init function to initialize shared tables. Will initialize all tables
* for all factors of a length. */
diff --git a/libavutil/x86/tx_float.asm b/libavutil/x86/tx_float.asm
index 191af7d68f..ca73bfd6e8 100644
--- a/libavutil/x86/tx_float.asm
+++ b/libavutil/x86/tx_float.asm
@@ -925,9 +925,9 @@ ALIGN 16
%macro FFT_SPLIT_RADIX_FN 3
INIT_YMM %1
-cglobal fft_sr_ %+ %2, 4, 8, 16, 272, lut, out, in, len, tmp, itab, rtab, tgt
- movsxd lenq, dword [lutq + AVTXContext.len]
- mov lutq, [lutq + AVTXContext.map]
+cglobal fft_sr_ %+ %2, 4, 9, 16, 272, ctx, out, in, tmp, len, lut, itab, rtab, tgt
+ movsxd lenq, dword [ctxq + AVTXContext.len]
+ mov lutq, [ctxq + AVTXContext.map]
mov tgtq, lenq
; Bottom-most/32-point transform ===============================================
@@ -1289,3 +1289,189 @@ FFT_SPLIT_RADIX_FN avx2, float, 0
FFT_SPLIT_RADIX_FN avx2, ns_float, 1
%endif
%endif
+
+%macro IMDCT_FN 1
+INIT_YMM %1
+cglobal mdct_sr_inv_float, 4, 9, 16, 272, ctx, out, in, stride, len, lut, exp, t1, t2
+ movsxd lenq, dword [ctxq + AVTXContext.len]
+ mov lutq, [ctxq + AVTXContext.map]
+ mov expq, [ctxq + AVTXContext.exp]
+
+ lea t1d, [lend*2 - 1]
+ imul t1d, strided
+ lea t1q, [inq + t1q]
+
+ cmp strideq, 4
+ je .stride4
+
+ mov t2d, strided
+ movd xmm4, strided
+ neg t2d
+ movd xmm5, t2d
+ SPLATD xmm4
+ SPLATD xmm5
+ vperm2f128 m4, m4, m4, 0x00 ; +stride splatted
+ vperm2f128 m5, m5, m5, 0x00 ; -stride splatted
+
+ mov t2q, outq
+
+.stridex_pre:
+ mova m2, [lutq] ; load LUT indices
+ pcmpeqd m0, m0 ; zero out a register
+ pmulld m3, m2, m4 ; multiply by +stride
+ pmulld m2, m5 ; multiply by -stride
+ movaps m1, m0
+ vgatherdps m6, [inq + 2*m3], m0 ; im
+ vgatherdps m7, [t1q + 2*m2], m1 ; re
+
+ movaps m8, [expq + 0*mmsize] ; tab 1
+ movaps m9, [expq + 1*mmsize] ; tab 2
+
+ unpcklps m0, m7, m6 ; re, im, re, im
+ unpckhps m1, m7, m6 ; re, im, re, im
+
+ vperm2f128 m2, m1, m0, 0x02 ; output order
+ vperm2f128 m3, m1, m0, 0x13 ; output order
+
+ movshdup m10, m8 ; tab 1 imim
+ movshdup m11, m9 ; tab 2 imim
+ movsldup m12, m8 ; tab 1 rere
+ movsldup m13, m9 ; tab 2 rere
+
+ mulps m10, m2 ; 1 reim * imim
+ mulps m11, m3 ; 2 reim * imim
+
+ shufps m10, m10, q2301
+ shufps m11, m11, q2301
+
+ fmaddsubps m10, m12, m2, m10
+ fmaddsubps m11, m13, m3, m11
+
+ mova [outq + 0*mmsize], m10
+ mova [outq + 1*mmsize], m11
+
+ add expq, mmsize*2
+ add lutq, mmsize
+ add outq, mmsize*2
+ sub lenq, mmsize/4
+ jg .stridex_pre
+ mov outq, t2q ; restore output
+ jmp .transform
+
+.stride4:
+ lea expq, [expq + lenq*8]
+ lea lutq, [lutq + lenq*4]
+ lea t1q, [t1q + strideq - mmsize]
+ lea t2q, [lenq*4 - mmsize/2]
+
+.stride4_pre:
+ movaps m4, [inq]
+ movaps m3, [t1q]
+
+ movsldup m1, m4 ; im im, im im
+ movshdup m0, m3 ; re re, re re
+ movshdup m4, m4 ; re re, re re (2)
+ movsldup m3, m3 ; im im, im im (2)
+
+ movaps m2, [expq] ; tab
+ movaps m5, [expq + 2*t2q] ; tab (2)
+
+ vperm2f128 m0, m0, 0x01 ; flip
+ shufps m7, m2, m2, q2301
+ vperm2f128 m4, m4, 0x01 ; flip (2)
+ shufps m8, m5, m5, q2301
+
+ shufpd m0, m0, 101b
+ shufpd m4, m4, 101b
+
+ mulps m1, m7 ; im im * tab.reim
+ mulps m3, m8 ; im im * tab.reim (2)
+
+ fmaddsubps m0, m0, m2, m1
+ fmaddsubps m4, m4, m5, m3
+
+ vextractf128 xm3, m0, 1
+ vextractf128 xm6, m4, 1
+
+ ; scatter
+ movsxd strideq, dword [lutq + 0*4]
+ movsxd lenq, dword [lutq + 1*4]
+ movlps [outq + strideq*8], xm0
+ movhps [outq + lenq*8], xm0
+
+ movsxd strideq, dword [lutq + 2*4]
+ movsxd lenq, dword [lutq + 3*4]
+ movlps [outq + strideq*8], xm3
+ movhps [outq + lenq*8], xm3
+
+ movsxd strideq, dword [lutq + 0*4 + t2q]
+ movsxd lenq, dword [lutq + 1*4 + t2q]
+ movlps [outq + strideq*8], xm4
+ movhps [outq + lenq*8], xm4
+
+ movsxd strideq, dword [lutq + 2*4 + t2q]
+ movsxd lenq, dword [lutq + 3*4 + t2q]
+ movlps [outq + strideq*8], xm6
+ movhps [outq + lenq*8], xm6
+
+ add lutq, mmsize/2
+ add expq, mmsize
+ add inq, mmsize
+ sub t1q, mmsize
+ sub t2q, mmsize
+ jg .stride4_pre
+
+.transform:
+ mov inq, outq ; in-place transform
+ call ff_tx_fft_sr_ns_float_avx2 ; call the FFT
+
+ movsxd lenq, dword [ctxq + AVTXContext.len]
+ mov expq, [ctxq + AVTXContext.exp]
+ lea expq, [expq + lenq*8]
+
+ lea t1q, [lenq*4] ; high
+ lea t2q, [lenq*4 - mmsize] ; low
+
+ neg lenq
+ lea outq, [inq + lenq*8]
+
+.post:
+ movaps m2, [expq + t1q] ; tab h
+ movaps m3, [expq + t2q] ; tab l
+ movaps m0, [outq + t1q] ; in h
+ movaps m1, [outq + t2q] ; in l
+
+ movshdup m4, m2 ; tab h imim
+ movshdup m5, m3 ; tab l imim
+ movsldup m6, m2 ; tab h rere
+ movsldup m7, m3 ; tab l rere
+
+ shufps m2, m0, m0, q2301 ; in h imre
+ shufps m3, m1, m1, q2301 ; in l imre
+
+ mulps m6, m0
+ mulps m7, m1
+
+ fmaddsubps m4, m4, m2, m6
+ fmaddsubps m5, m5, m3, m7
+
+ vperm2f128 m0, m4, m4, 0x01 ; flip
+ vperm2f128 m1, m5, m5, 0x01 ; flip
+
+ shufps m2, m0, m0, q1230
+ shufps m3, m1, m1, q1230
+
+ blendps m0, m3, m4, 01010101b
+ blendps m1, m2, m5, 01010101b
+
+ movaps [outq + t1q], m0
+ movaps [outq + t2q], m1
+
+ add t1q, mmsize
+ sub t2q, mmsize
+ jge .post
+
+ RET
+%endmacro
+
+IMDCT_FN avx2
diff --git a/libavutil/x86/tx_float_init.c b/libavutil/x86/tx_float_init.c
index 5db0b57d13..785ddb2343 100644
--- a/libavutil/x86/tx_float_init.c
+++ b/libavutil/x86/tx_float_init.c
@@ -43,6 +43,8 @@ TX_DECL_FN(fft_sr_ns, fma3)
TX_DECL_FN(fft_sr, avx2)
TX_DECL_FN(fft_sr_ns, avx2)
+TX_DECL_FN(mdct_sr_inv, avx2)
+
#define DECL_INIT_FN(basis, interleave) \
static av_cold int b ##basis## _i ##interleave(AVTXContext *s, \
const FFTXCodelet *cd, \
@@ -56,13 +58,35 @@ static av_cold int b ##basis## _i ##interleave(AVTXContext *s, \
if (cd->max_len == 2) \
return ff_tx_gen_ptwo_revtab(s, inv_lookup); \
else \
- return ff_tx_gen_split_radix_parity_revtab(s, inv_lookup, \
+ return ff_tx_gen_split_radix_parity_revtab(s, len, inv, inv_lookup, \
basis, interleave); \
}
DECL_INIT_FN(8, 0)
DECL_INIT_FN(8, 2)
+static av_cold int m_inv_init(AVTXContext *s, const FFTXCodelet *cd,
+ uint64_t flags, FFTXCodeletOptions *opts,
+ int len, int inv, const void *scale)
+{
+ int ret;
+
+ s->scale_d = *((SCALE_TYPE *)scale);
+ s->scale_f = s->scale_d;
+
+ ff_tx_init_tabs_float(len >> 1);
+
+ if ((ret = ff_tx_gen_split_radix_parity_revtab(s, len >> 1, 1, -1, 8, 2)) < 0)
+ return ret;
+
+ if ((ret = ff_tx_mdct_gen_exp_float(s, s->map)))
+ return ret;
+
+ s->len >>= 1;
+
+ return 0;
+}
+
const FFTXCodelet * const ff_tx_codelet_list_float_x86[] = {
TX_DEF(fft2, FFT, 2, 2, 2, 0, 128, NULL, sse3, SSE3, AV_TX_INPLACE, 0),
TX_DEF(fft2, FFT, 2, 2, 2, 0, 192, b8_i0, sse3, SSE3, AV_TX_INPLACE | FF_TX_PRESHUFFLE, 0),
@@ -81,6 +105,9 @@ const FFTXCodelet * const ff_tx_codelet_list_float_x86[] = {
TX_DEF(fft16_ns, FFT, 16, 16, 2, 0, 352, b8_i2, fma3, FMA3, AV_TX_INPLACE | FF_TX_PRESHUFFLE,
AV_CPU_FLAG_AVXSLOW),
+ TX_DEF(mdct_sr_inv, MDCT, 128, TX_LEN_UNLIMITED, 2, TX_FACTOR_ANY, 384, m_inv_init, avx2, AVX2,
+ FF_TX_INVERSE_ONLY, AV_CPU_FLAG_AVXSLOW | AV_CPU_FLAG_SLOW_GATHER),
+
#if ARCH_X86_64
TX_DEF(fft32, FFT, 32, 32, 2, 0, 256, b8_i2, avx, AVX, AV_TX_INPLACE, AV_CPU_FLAG_AVXSLOW),
TX_DEF(fft32_ns, FFT, 32, 32, 2, 0, 320, b8_i2, avx, AVX, AV_TX_INPLACE | FF_TX_PRESHUFFLE,
--
2.37.2.609.g9ff673ca1a
[-- Attachment #3: Type: text/plain, Size: 251 bytes --]
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [FFmpeg-devel] [PATCH v2] x86/tx_float: implement inverse MDCT AVX2 assembly
2022-09-02 5:55 ` Lynne
@ 2022-09-02 14:03 ` Henrik Gramner
0 siblings, 0 replies; 4+ messages in thread
From: Henrik Gramner @ 2022-09-02 14:03 UTC (permalink / raw)
To: FFmpeg development discussions and patches
On Fri, Sep 2, 2022 at 7:55 AM Lynne <dev@lynne.ee> wrote:
> + movd xmm4, strided
> + neg t2d
> + movd xmm5, t2d
> + SPLATD xmm4
> + SPLATD xmm5
> + vperm2f128 m4, m4, m4, 0x00 ; +stride splatted
> + vperm2f128 m5, m5, m5, 0x00 ; -stride splatted
movd xm4, strided
pxor m5, m5
vpbroadcastd m4, xm4
+ mova m2, [lutq] ; load LUT indices
+ pcmpeqd m0, m0 ; zero out a register
+ pmulld m3, m2, m4 ; multiply by +stride
+ pmulld m2, m5 ; multiply by -stride
+ movaps m1, m0
+ vgatherdps m6, [inq + 2*m3], m0 ; im
+ vgatherdps m7, [t1q + 2*m2], m1 ; re
pmulld m2, m4, [lutq]
pcmpeqd m0, m0
mova m1, m0
vgatherdps m6, [inq + 2*m2], m0
psubd m2, m5, m2
vgatherdps m7, [t1q + 2*m2], m1
The comment for pcmpeqd is also wrong as bits are set to 1, not 0.
That instruction could also be moved outside the loop and replaced
with a cheaper register-register move inside the loop.
> + vperm2f128 m0, m0, 0x01 ; flip
> + vperm2f128 m4, m4, 0x01 ; flip (2)
> + shufpd m0, m0, 101b
> + shufpd m4, m4, 101b
vpermpd m0, m0, q0123
vpermpd m4, m4, q0123
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2022-09-02 14:03 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-01 21:47 [FFmpeg-devel] [PATCH] x86/tx_float: implement inverse MDCT AVX2 assembly Lynne
[not found] ` <NAv0PJm--3-2@lynne.ee-NAv0T7a----2>
2022-09-02 5:49 ` [FFmpeg-devel] [PATCH v2] " Lynne
[not found] ` <NAwjob8--3-2@lynne.ee-NAwjsed----2>
2022-09-02 5:55 ` Lynne
2022-09-02 14:03 ` Henrik Gramner
Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
This inbox may be cloned and mirrored by anyone:
git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git
# If you have public-inbox 1.1+ installed, you may
# initialize and index your mirror using the following commands:
public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
ffmpegdev@gitmailbox.com
public-inbox-index ffmpegdev
Example config snippet for mirrors.
AGPL code for this site: git clone https://public-inbox.org/public-inbox.git