From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from ffbox0-bg.mplayerhq.hu (ffbox0-bg.ffmpeg.org [79.124.17.100]) by master.gitmailbox.com (Postfix) with ESMTP id A7D784A22E for ; Mon, 25 Mar 2024 15:05:42 +0000 (UTC) Received: from [127.0.1.1] (localhost [127.0.0.1]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTP id 7F5EC68D603; Mon, 25 Mar 2024 17:03:11 +0200 (EET) Received: from mail-lf1-f47.google.com (mail-lf1-f47.google.com [209.85.167.47]) by ffbox0-bg.mplayerhq.hu (Postfix) with ESMTPS id E422868D47C for ; Mon, 25 Mar 2024 17:02:58 +0200 (EET) Received: by mail-lf1-f47.google.com with SMTP id 2adb3069b0e04-513d3746950so5456043e87.1 for ; Mon, 25 Mar 2024 08:02:58 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=martin-st.20230601.gappssmtp.com; s=20230601; t=1711378978; x=1711983778; darn=ffmpeg.org; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=fU7LLNckb2Xqlf59U/14XmMa7vqZ/O25dksDPPOdyak=; b=eP3JQJ3oq9339DRenyGAEOj1Lsrec/0Kq7R45qJQlC2Hrcqy8rCve3J43kro+pEnnC kJ58ZCWvPgAajg8tA6TLiEx4RedFiklPG9xm6/h0Pkt6slxFcv1vU//QYJfe8EpHNHat A6cgQs9ZueJdCE9JFt+zJSbHdrmL3WeMYwuGDNp+cwBR6S8VPICVC83lr4hV2aKQbS6X u6V65Sr1MzI1BPNAjPPoGxnlfBgeJ5txhoP/+upBEcdb1C0562HMGS8v//ILlcrh/9Nf oS22DNoQZzLeqUp0uFa8qCmLbj5fjrCtmA7yIdvOozynEAxomLoQFzW0qYbZ61PsfXxY R0+w== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20230601; t=1711378978; x=1711983778; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=fU7LLNckb2Xqlf59U/14XmMa7vqZ/O25dksDPPOdyak=; b=t/6yqd/39HfID6oUgr6uHrfsOoR0lJjJZWBQANRf02NSjfHB4p7qF9/ItzZ/DW30FL 2IvdiwfxtV5hf4e+DVUY5J0dssd3wBNxdnmuTRVGIZhbCBd64zO0r+jLOMRnU9rGXCD9 y3g3yU35OJP8y9YMnjiJqX1YpAFRhVCvpfBQn1MaPUdAAFOhLGzPuYdN+BhIiC32eY02 i1jaXqaukqNjcYWoKLcjOMCCLVh55zo+UMPBo71n6P45imSCC+FIwTgLbabwFF7Ldhwl psy/njjFwv4kQIFDsKzh/5dadBTCsQox5ZJ6jUjJxZKaWM3Hu3DBDAFjxXNVMnw0Z3Rl qepA== X-Gm-Message-State: AOJu0YzVB9tm+QTf+l9CdiLoGJIFLJaDYJ7nn/iTV/lHX6K1t+AFkIJ/ 3EE8GNgAKLrOMbVPI+ykwhZaT3CsIvRRm/X1OEoWNFMtKsa+o22l89KYm77Ot5+j9pI/4oJIcsl eGeGi X-Google-Smtp-Source: AGHT+IGtBIz7RqqaBaNhh8UtHrCGEvlJuxXUDvq3x7ALiEH/scdN7ZweGb8yqOwL3L43vMtHtfUr3A== X-Received: by 2002:ac2:5e73:0:b0:513:e945:e9a7 with SMTP id a19-20020ac25e73000000b00513e945e9a7mr4081944lfr.4.1711378977686; Mon, 25 Mar 2024 08:02:57 -0700 (PDT) Received: from localhost (host-114-191.parnet.fi. [77.234.114.191]) by smtp.gmail.com with ESMTPSA id d26-20020a196b1a000000b00513c170a4desm1105970lfa.236.2024.03.25.08.02.57 (version=TLS1 cipher=AES128-SHA bits=128/128); Mon, 25 Mar 2024 08:02:57 -0700 (PDT) From: =?UTF-8?q?Martin=20Storsj=C3=B6?= To: ffmpeg-devel@ffmpeg.org Date: Mon, 25 Mar 2024 17:02:39 +0200 Message-Id: <20240325150243.59058-18-martin@martin.st> X-Mailer: git-send-email 2.39.3 (Apple Git-146) In-Reply-To: <20240325150243.59058-1-martin@martin.st> References: <20240325150243.59058-1-martin@martin.st> MIME-Version: 1.0 Subject: [FFmpeg-devel] [PATCH 17/21] aarch64: hevc: Reorder qpel_hv functions to prepare for templating X-BeenThere: ffmpeg-devel@ffmpeg.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: FFmpeg development discussions and patches List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Reply-To: FFmpeg development discussions and patches Cc: Logan Lyu , "J . Dekker" Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Errors-To: ffmpeg-devel-bounces@ffmpeg.org Sender: "ffmpeg-devel" Archived-At: List-Archive: List-Post: --- libavcodec/aarch64/hevcdsp_qpel_neon.S | 695 +++++++++++++------------ 1 file changed, 355 insertions(+), 340 deletions(-) diff --git a/libavcodec/aarch64/hevcdsp_qpel_neon.S b/libavcodec/aarch64/hevcdsp_qpel_neon.S index 06832603d9..ad568e415b 100644 --- a/libavcodec/aarch64/hevcdsp_qpel_neon.S +++ b/libavcodec/aarch64/hevcdsp_qpel_neon.S @@ -2146,29 +2146,6 @@ function ff_hevc_put_hevc_qpel_uni_w_v64_8_neon, export=1 ret endfunc -#if HAVE_I8MM -ENABLE_I8MM - -function ff_hevc_put_hevc_qpel_uni_hv4_8_neon_i8mm, export=1 - add w10, w4, #7 - lsl x10, x10, #7 - sub sp, sp, x10 // tmp_array - str x30, [sp, #-48]! - stp x4, x6, [sp, #16] - stp x0, x1, [sp, #32] - sub x1, x2, x3, lsl #1 - sub x1, x1, x3 - add x0, sp, #48 - mov x2, x3 - add x3, x4, #7 - mov x4, x5 - bl X(ff_hevc_put_hevc_qpel_h4_8_neon_i8mm) - ldp x4, x6, [sp, #16] - ldp x0, x1, [sp, #32] - ldr x30, [sp], #48 - b hevc_put_hevc_qpel_uni_hv4_8_end_neon -endfunc - function hevc_put_hevc_qpel_uni_hv4_8_end_neon mov x9, #(MAX_PB_SIZE * 2) load_qpel_filterh x6, x5 @@ -2195,26 +2172,6 @@ function hevc_put_hevc_qpel_uni_hv4_8_end_neon 2: ret endfunc -function ff_hevc_put_hevc_qpel_uni_hv6_8_neon_i8mm, export=1 - add w10, w4, #7 - lsl x10, x10, #7 - sub sp, sp, x10 // tmp_array - str x30, [sp, #-48]! - stp x4, x6, [sp, #16] - stp x0, x1, [sp, #32] - sub x1, x2, x3, lsl #1 - sub x1, x1, x3 - add x0, sp, #48 - mov x2, x3 - add w3, w4, #7 - mov x4, x5 - bl X(ff_hevc_put_hevc_qpel_h6_8_neon_i8mm) - ldp x4, x6, [sp, #16] - ldp x0, x1, [sp, #32] - ldr x30, [sp], #48 - b hevc_put_hevc_qpel_uni_hv6_8_end_neon -endfunc - function hevc_put_hevc_qpel_uni_hv6_8_end_neon mov x9, #(MAX_PB_SIZE * 2) load_qpel_filterh x6, x5 @@ -2244,26 +2201,6 @@ function hevc_put_hevc_qpel_uni_hv6_8_end_neon 2: ret endfunc -function ff_hevc_put_hevc_qpel_uni_hv8_8_neon_i8mm, export=1 - add w10, w4, #7 - lsl x10, x10, #7 - sub sp, sp, x10 // tmp_array - str x30, [sp, #-48]! - stp x4, x6, [sp, #16] - stp x0, x1, [sp, #32] - sub x1, x2, x3, lsl #1 - sub x1, x1, x3 - add x0, sp, #48 - mov x2, x3 - add w3, w4, #7 - mov x4, x5 - bl X(ff_hevc_put_hevc_qpel_h8_8_neon_i8mm) - ldp x4, x6, [sp, #16] - ldp x0, x1, [sp, #32] - ldr x30, [sp], #48 - b hevc_put_hevc_qpel_uni_hv8_8_end_neon -endfunc - function hevc_put_hevc_qpel_uni_hv8_8_end_neon mov x9, #(MAX_PB_SIZE * 2) load_qpel_filterh x6, x5 @@ -2291,26 +2228,6 @@ function hevc_put_hevc_qpel_uni_hv8_8_end_neon 2: ret endfunc -function ff_hevc_put_hevc_qpel_uni_hv12_8_neon_i8mm, export=1 - add w10, w4, #7 - lsl x10, x10, #7 - sub sp, sp, x10 // tmp_array - stp x7, x30, [sp, #-48]! - stp x4, x6, [sp, #16] - stp x0, x1, [sp, #32] - sub x1, x2, x3, lsl #1 - sub x1, x1, x3 - mov x2, x3 - add x0, sp, #48 - add w3, w4, #7 - mov x4, x5 - bl X(ff_hevc_put_hevc_qpel_h12_8_neon_i8mm) - ldp x4, x6, [sp, #16] - ldp x0, x1, [sp, #32] - ldp x7, x30, [sp], #48 - b hevc_put_hevc_qpel_uni_hv12_8_end_neon -endfunc - function hevc_put_hevc_qpel_uni_hv12_8_end_neon mov x9, #(MAX_PB_SIZE * 2) load_qpel_filterh x6, x5 @@ -2338,26 +2255,6 @@ function hevc_put_hevc_qpel_uni_hv12_8_end_neon 2: ret endfunc -function ff_hevc_put_hevc_qpel_uni_hv16_8_neon_i8mm, export=1 - add w10, w4, #7 - lsl x10, x10, #7 - sub sp, sp, x10 // tmp_array - stp x7, x30, [sp, #-48]! - stp x4, x6, [sp, #16] - stp x0, x1, [sp, #32] - add x0, sp, #48 - sub x1, x2, x3, lsl #1 - sub x1, x1, x3 - mov x2, x3 - add w3, w4, #7 - mov x4, x5 - bl X(ff_hevc_put_hevc_qpel_h16_8_neon_i8mm) - ldp x4, x6, [sp, #16] - ldp x0, x1, [sp, #32] - ldp x7, x30, [sp], #48 - b hevc_put_hevc_qpel_uni_hv16_8_end_neon -endfunc - function hevc_put_hevc_qpel_uni_hv16_8_end_neon mov x9, #(MAX_PB_SIZE * 2) load_qpel_filterh x6, x5 @@ -2396,6 +2293,109 @@ function hevc_put_hevc_qpel_uni_hv16_8_end_neon ret endfunc +#if HAVE_I8MM +ENABLE_I8MM + +function ff_hevc_put_hevc_qpel_uni_hv4_8_neon_i8mm, export=1 + add w10, w4, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + str x30, [sp, #-48]! + stp x4, x6, [sp, #16] + stp x0, x1, [sp, #32] + sub x1, x2, x3, lsl #1 + sub x1, x1, x3 + add x0, sp, #48 + mov x2, x3 + add x3, x4, #7 + mov x4, x5 + bl X(ff_hevc_put_hevc_qpel_h4_8_neon_i8mm) + ldp x4, x6, [sp, #16] + ldp x0, x1, [sp, #32] + ldr x30, [sp], #48 + b hevc_put_hevc_qpel_uni_hv4_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_uni_hv6_8_neon_i8mm, export=1 + add w10, w4, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + str x30, [sp, #-48]! + stp x4, x6, [sp, #16] + stp x0, x1, [sp, #32] + sub x1, x2, x3, lsl #1 + sub x1, x1, x3 + add x0, sp, #48 + mov x2, x3 + add w3, w4, #7 + mov x4, x5 + bl X(ff_hevc_put_hevc_qpel_h6_8_neon_i8mm) + ldp x4, x6, [sp, #16] + ldp x0, x1, [sp, #32] + ldr x30, [sp], #48 + b hevc_put_hevc_qpel_uni_hv6_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_uni_hv8_8_neon_i8mm, export=1 + add w10, w4, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + str x30, [sp, #-48]! + stp x4, x6, [sp, #16] + stp x0, x1, [sp, #32] + sub x1, x2, x3, lsl #1 + sub x1, x1, x3 + add x0, sp, #48 + mov x2, x3 + add w3, w4, #7 + mov x4, x5 + bl X(ff_hevc_put_hevc_qpel_h8_8_neon_i8mm) + ldp x4, x6, [sp, #16] + ldp x0, x1, [sp, #32] + ldr x30, [sp], #48 + b hevc_put_hevc_qpel_uni_hv8_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_uni_hv12_8_neon_i8mm, export=1 + add w10, w4, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x7, x30, [sp, #-48]! + stp x4, x6, [sp, #16] + stp x0, x1, [sp, #32] + sub x1, x2, x3, lsl #1 + sub x1, x1, x3 + mov x2, x3 + add x0, sp, #48 + add w3, w4, #7 + mov x4, x5 + bl X(ff_hevc_put_hevc_qpel_h12_8_neon_i8mm) + ldp x4, x6, [sp, #16] + ldp x0, x1, [sp, #32] + ldp x7, x30, [sp], #48 + b hevc_put_hevc_qpel_uni_hv12_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_uni_hv16_8_neon_i8mm, export=1 + add w10, w4, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x7, x30, [sp, #-48]! + stp x4, x6, [sp, #16] + stp x0, x1, [sp, #32] + add x0, sp, #48 + sub x1, x2, x3, lsl #1 + sub x1, x1, x3 + mov x2, x3 + add w3, w4, #7 + mov x4, x5 + bl X(ff_hevc_put_hevc_qpel_h16_8_neon_i8mm) + ldp x4, x6, [sp, #16] + ldp x0, x1, [sp, #32] + ldp x7, x30, [sp], #48 + b hevc_put_hevc_qpel_uni_hv16_8_end_neon +endfunc + function ff_hevc_put_hevc_qpel_uni_hv24_8_neon_i8mm, export=1 stp x4, x5, [sp, #-64]! stp x2, x3, [sp, #16] @@ -3779,25 +3779,10 @@ function ff_hevc_put_hevc_qpel_h64_8_neon_i8mm, export=1 b.ne 1b ret endfunc +DISABLE_I8MM +#endif -function ff_hevc_put_hevc_qpel_hv4_8_neon_i8mm, export=1 - add w10, w3, #7 - mov x7, #128 - lsl x10, x10, #7 - sub sp, sp, x10 // tmp_array - stp x5, x30, [sp, #-32]! - stp x0, x3, [sp, #16] - add x0, sp, #32 - sub x1, x1, x2, lsl #1 - add x3, x3, #7 - sub x1, x1, x2 - bl X(ff_hevc_put_hevc_qpel_h4_8_neon_i8mm) - ldp x0, x3, [sp, #16] - ldp x5, x30, [sp], #32 - b hevc_put_hevc_qpel_hv4_8_end_neon -endfunc - function hevc_put_hevc_qpel_hv4_8_end_neon load_qpel_filterh x5, x4 ldr d16, [sp] @@ -3822,23 +3807,6 @@ function hevc_put_hevc_qpel_hv4_8_end_neon 2: ret endfunc -function ff_hevc_put_hevc_qpel_hv6_8_neon_i8mm, export=1 - add w10, w3, #7 - mov x7, #128 - lsl x10, x10, #7 - sub sp, sp, x10 // tmp_array - stp x5, x30, [sp, #-32]! - stp x0, x3, [sp, #16] - add x0, sp, #32 - sub x1, x1, x2, lsl #1 - add x3, x3, #7 - sub x1, x1, x2 - bl X(ff_hevc_put_hevc_qpel_h6_8_neon_i8mm) - ldp x0, x3, [sp, #16] - ldp x5, x30, [sp], #32 - b hevc_put_hevc_qpel_hv6_8_end_neon -endfunc - function hevc_put_hevc_qpel_hv6_8_end_neon mov x8, #120 load_qpel_filterh x5, x4 @@ -3866,22 +3834,6 @@ function hevc_put_hevc_qpel_hv6_8_end_neon 2: ret endfunc -function ff_hevc_put_hevc_qpel_hv8_8_neon_i8mm, export=1 - add w10, w3, #7 - lsl x10, x10, #7 - sub x1, x1, x2, lsl #1 - sub sp, sp, x10 // tmp_array - stp x5, x30, [sp, #-32]! - stp x0, x3, [sp, #16] - add x0, sp, #32 - add x3, x3, #7 - sub x1, x1, x2 - bl X(ff_hevc_put_hevc_qpel_h8_8_neon_i8mm) - ldp x0, x3, [sp, #16] - ldp x5, x30, [sp], #32 - b hevc_put_hevc_qpel_hv8_8_end_neon -endfunc - function hevc_put_hevc_qpel_hv8_8_end_neon mov x7, #128 load_qpel_filterh x5, x4 @@ -3908,22 +3860,6 @@ function hevc_put_hevc_qpel_hv8_8_end_neon 2: ret endfunc -function ff_hevc_put_hevc_qpel_hv12_8_neon_i8mm, export=1 - add w10, w3, #7 - lsl x10, x10, #7 - sub x1, x1, x2, lsl #1 - sub sp, sp, x10 // tmp_array - stp x5, x30, [sp, #-32]! - stp x0, x3, [sp, #16] - add x0, sp, #32 - add x3, x3, #7 - sub x1, x1, x2 - bl X(ff_hevc_put_hevc_qpel_h12_8_neon_i8mm) - ldp x0, x3, [sp, #16] - ldp x5, x30, [sp], #32 - b hevc_put_hevc_qpel_hv12_8_end_neon -endfunc - function hevc_put_hevc_qpel_hv12_8_end_neon mov x7, #128 load_qpel_filterh x5, x4 @@ -3949,22 +3885,6 @@ function hevc_put_hevc_qpel_hv12_8_end_neon 2: ret endfunc -function ff_hevc_put_hevc_qpel_hv16_8_neon_i8mm, export=1 - add w10, w3, #7 - lsl x10, x10, #7 - sub x1, x1, x2, lsl #1 - sub sp, sp, x10 // tmp_array - stp x5, x30, [sp, #-32]! - stp x0, x3, [sp, #16] - add x3, x3, #7 - add x0, sp, #32 - sub x1, x1, x2 - bl X(ff_hevc_put_hevc_qpel_h16_8_neon_i8mm) - ldp x0, x3, [sp, #16] - ldp x5, x30, [sp], #32 - b hevc_put_hevc_qpel_hv16_8_end_neon -endfunc - function hevc_put_hevc_qpel_hv16_8_end_neon mov x7, #128 load_qpel_filterh x5, x4 @@ -3989,38 +3909,6 @@ function hevc_put_hevc_qpel_hv16_8_end_neon 2: ret endfunc -function ff_hevc_put_hevc_qpel_hv24_8_neon_i8mm, export=1 - stp x4, x5, [sp, #-64]! - stp x2, x3, [sp, #16] - stp x0, x1, [sp, #32] - str x30, [sp, #48] - bl X(ff_hevc_put_hevc_qpel_hv12_8_neon_i8mm) - ldp x0, x1, [sp, #32] - ldp x2, x3, [sp, #16] - ldp x4, x5, [sp], #48 - add x1, x1, #12 - add x0, x0, #24 - bl X(ff_hevc_put_hevc_qpel_hv12_8_neon_i8mm) - ldr x30, [sp], #16 - ret -endfunc - -function ff_hevc_put_hevc_qpel_hv32_8_neon_i8mm, export=1 - add w10, w3, #7 - sub x1, x1, x2, lsl #1 - lsl x10, x10, #7 - sub x1, x1, x2 - sub sp, sp, x10 // tmp_array - stp x5, x30, [sp, #-32]! - stp x0, x3, [sp, #16] - add x3, x3, #7 - add x0, sp, #32 - bl X(ff_hevc_put_hevc_qpel_h32_8_neon_i8mm) - ldp x0, x3, [sp, #16] - ldp x5, x30, [sp], #32 - b hevc_put_hevc_qpel_hv32_8_end_neon -endfunc - function hevc_put_hevc_qpel_hv32_8_end_neon mov x7, #128 load_qpel_filterh x5, x4 @@ -4056,6 +3944,122 @@ function hevc_put_hevc_qpel_hv32_8_end_neon ret endfunc +#if HAVE_I8MM +ENABLE_I8MM +function ff_hevc_put_hevc_qpel_hv4_8_neon_i8mm, export=1 + add w10, w3, #7 + mov x7, #128 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x5, x30, [sp, #-32]! + stp x0, x3, [sp, #16] + add x0, sp, #32 + sub x1, x1, x2, lsl #1 + add x3, x3, #7 + sub x1, x1, x2 + bl X(ff_hevc_put_hevc_qpel_h4_8_neon_i8mm) + ldp x0, x3, [sp, #16] + ldp x5, x30, [sp], #32 + b hevc_put_hevc_qpel_hv4_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_hv6_8_neon_i8mm, export=1 + add w10, w3, #7 + mov x7, #128 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x5, x30, [sp, #-32]! + stp x0, x3, [sp, #16] + add x0, sp, #32 + sub x1, x1, x2, lsl #1 + add x3, x3, #7 + sub x1, x1, x2 + bl X(ff_hevc_put_hevc_qpel_h6_8_neon_i8mm) + ldp x0, x3, [sp, #16] + ldp x5, x30, [sp], #32 + b hevc_put_hevc_qpel_hv6_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_hv8_8_neon_i8mm, export=1 + add w10, w3, #7 + lsl x10, x10, #7 + sub x1, x1, x2, lsl #1 + sub sp, sp, x10 // tmp_array + stp x5, x30, [sp, #-32]! + stp x0, x3, [sp, #16] + add x0, sp, #32 + add x3, x3, #7 + sub x1, x1, x2 + bl X(ff_hevc_put_hevc_qpel_h8_8_neon_i8mm) + ldp x0, x3, [sp, #16] + ldp x5, x30, [sp], #32 + b hevc_put_hevc_qpel_hv8_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_hv12_8_neon_i8mm, export=1 + add w10, w3, #7 + lsl x10, x10, #7 + sub x1, x1, x2, lsl #1 + sub sp, sp, x10 // tmp_array + stp x5, x30, [sp, #-32]! + stp x0, x3, [sp, #16] + add x0, sp, #32 + add x3, x3, #7 + sub x1, x1, x2 + bl X(ff_hevc_put_hevc_qpel_h12_8_neon_i8mm) + ldp x0, x3, [sp, #16] + ldp x5, x30, [sp], #32 + b hevc_put_hevc_qpel_hv12_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_hv16_8_neon_i8mm, export=1 + add w10, w3, #7 + lsl x10, x10, #7 + sub x1, x1, x2, lsl #1 + sub sp, sp, x10 // tmp_array + stp x5, x30, [sp, #-32]! + stp x0, x3, [sp, #16] + add x3, x3, #7 + add x0, sp, #32 + sub x1, x1, x2 + bl X(ff_hevc_put_hevc_qpel_h16_8_neon_i8mm) + ldp x0, x3, [sp, #16] + ldp x5, x30, [sp], #32 + b hevc_put_hevc_qpel_hv16_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_hv24_8_neon_i8mm, export=1 + stp x4, x5, [sp, #-64]! + stp x2, x3, [sp, #16] + stp x0, x1, [sp, #32] + str x30, [sp, #48] + bl X(ff_hevc_put_hevc_qpel_hv12_8_neon_i8mm) + ldp x0, x1, [sp, #32] + ldp x2, x3, [sp, #16] + ldp x4, x5, [sp], #48 + add x1, x1, #12 + add x0, x0, #24 + bl X(ff_hevc_put_hevc_qpel_hv12_8_neon_i8mm) + ldr x30, [sp], #16 + ret +endfunc + +function ff_hevc_put_hevc_qpel_hv32_8_neon_i8mm, export=1 + add w10, w3, #7 + sub x1, x1, x2, lsl #1 + lsl x10, x10, #7 + sub x1, x1, x2 + sub sp, sp, x10 // tmp_array + stp x5, x30, [sp, #-32]! + stp x0, x3, [sp, #16] + add x3, x3, #7 + add x0, sp, #32 + bl X(ff_hevc_put_hevc_qpel_h32_8_neon_i8mm) + ldp x0, x3, [sp, #16] + ldp x5, x30, [sp], #32 + b hevc_put_hevc_qpel_hv32_8_end_neon +endfunc + function ff_hevc_put_hevc_qpel_hv48_8_neon_i8mm, export=1 stp x4, x5, [sp, #-64]! stp x2, x3, [sp, #16] @@ -4089,6 +4093,8 @@ function ff_hevc_put_hevc_qpel_hv64_8_neon_i8mm, export=1 ldr x30, [sp], #16 ret endfunc +DISABLE_I8MM +#endif .macro QPEL_UNI_W_HV_HEADER width ldp x14, x15, [sp] // mx, my @@ -4168,11 +4174,6 @@ endfunc smlal2 \dst\().4s, \src7\().8h, v0.h[7] .endm -function ff_hevc_put_hevc_qpel_uni_w_hv4_8_neon_i8mm, export=1 - QPEL_UNI_W_HV_HEADER 4 - b hevc_put_hevc_qpel_uni_w_hv4_8_end_neon -endfunc - function hevc_put_hevc_qpel_uni_w_hv4_8_end_neon ldr d16, [sp] ldr d17, [sp, x10] @@ -4262,11 +4263,6 @@ endfunc st1 {v24.d}[0], [x20], x21 .endm -function ff_hevc_put_hevc_qpel_uni_w_hv8_8_neon_i8mm, export=1 - QPEL_UNI_W_HV_HEADER 8 - b hevc_put_hevc_qpel_uni_w_hv8_8_end_neon -endfunc - function hevc_put_hevc_qpel_uni_w_hv8_8_end_neon ldr q16, [sp] ldr q17, [sp, x10] @@ -4376,21 +4372,6 @@ endfunc st1 {v24.16b}, [x20], x21 .endm -function ff_hevc_put_hevc_qpel_uni_w_hv16_8_neon_i8mm, export=1 - QPEL_UNI_W_HV_HEADER 16 - b hevc_put_hevc_qpel_uni_w_hv16_8_end_neon -endfunc - -function ff_hevc_put_hevc_qpel_uni_w_hv32_8_neon_i8mm, export=1 - QPEL_UNI_W_HV_HEADER 32 - b hevc_put_hevc_qpel_uni_w_hv16_8_end_neon -endfunc - -function ff_hevc_put_hevc_qpel_uni_w_hv64_8_neon_i8mm, export=1 - QPEL_UNI_W_HV_HEADER 64 - b hevc_put_hevc_qpel_uni_w_hv16_8_end_neon -endfunc - function hevc_put_hevc_qpel_uni_w_hv16_8_end_neon mov x11, sp mov w12, w22 @@ -4503,26 +4484,37 @@ function hevc_put_hevc_qpel_uni_w_hv16_8_end_neon ret endfunc -function ff_hevc_put_hevc_qpel_bi_hv4_8_neon_i8mm, export=1 - add w10, w5, #7 - lsl x10, x10, #7 - sub sp, sp, x10 // tmp_array - stp x7, x30, [sp, #-48]! - stp x4, x5, [sp, #16] - stp x0, x1, [sp, #32] - sub x1, x2, x3, lsl #1 - sub x1, x1, x3 - add x0, sp, #48 - mov x2, x3 - add w3, w5, #7 - mov x4, x6 - bl X(ff_hevc_put_hevc_qpel_h4_8_neon_i8mm) - ldp x4, x5, [sp, #16] - ldp x0, x1, [sp, #32] - ldp x7, x30, [sp], #48 - b hevc_put_hevc_qpel_bi_hv4_8_end_neon +#if HAVE_I8MM +ENABLE_I8MM + +function ff_hevc_put_hevc_qpel_uni_w_hv4_8_neon_i8mm, export=1 + QPEL_UNI_W_HV_HEADER 4 + b hevc_put_hevc_qpel_uni_w_hv4_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_uni_w_hv8_8_neon_i8mm, export=1 + QPEL_UNI_W_HV_HEADER 8 + b hevc_put_hevc_qpel_uni_w_hv8_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_uni_w_hv16_8_neon_i8mm, export=1 + QPEL_UNI_W_HV_HEADER 16 + b hevc_put_hevc_qpel_uni_w_hv16_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_uni_w_hv32_8_neon_i8mm, export=1 + QPEL_UNI_W_HV_HEADER 32 + b hevc_put_hevc_qpel_uni_w_hv16_8_end_neon endfunc +function ff_hevc_put_hevc_qpel_uni_w_hv64_8_neon_i8mm, export=1 + QPEL_UNI_W_HV_HEADER 64 + b hevc_put_hevc_qpel_uni_w_hv16_8_end_neon +endfunc + +DISABLE_I8MM +#endif + function hevc_put_hevc_qpel_bi_hv4_8_end_neon mov x9, #(MAX_PB_SIZE * 2) load_qpel_filterh x7, x6 @@ -4548,26 +4540,6 @@ function hevc_put_hevc_qpel_bi_hv4_8_end_neon 2: ret endfunc -function ff_hevc_put_hevc_qpel_bi_hv6_8_neon_i8mm, export=1 - add w10, w5, #7 - lsl x10, x10, #7 - sub sp, sp, x10 // tmp_array - stp x7, x30, [sp, #-48]! - stp x4, x5, [sp, #16] - stp x0, x1, [sp, #32] - sub x1, x2, x3, lsl #1 - sub x1, x1, x3 - add x0, sp, #48 - mov x2, x3 - add x3, x5, #7 - mov x4, x6 - bl X(ff_hevc_put_hevc_qpel_h6_8_neon_i8mm) - ldp x4, x5, [sp, #16] - ldp x0, x1, [sp, #32] - ldp x7, x30, [sp], #48 - b hevc_put_hevc_qpel_bi_hv6_8_end_neon -endfunc - function hevc_put_hevc_qpel_bi_hv6_8_end_neon mov x9, #(MAX_PB_SIZE * 2) load_qpel_filterh x7, x6 @@ -4598,26 +4570,6 @@ function hevc_put_hevc_qpel_bi_hv6_8_end_neon 2: ret endfunc -function ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm, export=1 - add w10, w5, #7 - lsl x10, x10, #7 - sub sp, sp, x10 // tmp_array - stp x7, x30, [sp, #-48]! - stp x4, x5, [sp, #16] - stp x0, x1, [sp, #32] - sub x1, x2, x3, lsl #1 - sub x1, x1, x3 - add x0, sp, #48 - mov x2, x3 - add x3, x5, #7 - mov x4, x6 - bl X(ff_hevc_put_hevc_qpel_h8_8_neon_i8mm) - ldp x4, x5, [sp, #16] - ldp x0, x1, [sp, #32] - ldp x7, x30, [sp], #48 - b hevc_put_hevc_qpel_bi_hv8_8_end_neon -endfunc - function hevc_put_hevc_qpel_bi_hv8_8_end_neon mov x9, #(MAX_PB_SIZE * 2) load_qpel_filterh x7, x6 @@ -4646,46 +4598,6 @@ function hevc_put_hevc_qpel_bi_hv8_8_end_neon 2: ret endfunc -function ff_hevc_put_hevc_qpel_bi_hv12_8_neon_i8mm, export=1 - stp x6, x7, [sp, #-80]! - stp x4, x5, [sp, #16] - stp x2, x3, [sp, #32] - stp x0, x1, [sp, #48] - str x30, [sp, #64] - bl X(ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm) - ldp x4, x5, [sp, #16] - ldp x2, x3, [sp, #32] - ldp x0, x1, [sp, #48] - ldp x6, x7, [sp], #64 - add x4, x4, #16 - add x2, x2, #8 - add x0, x0, #8 - bl X(ff_hevc_put_hevc_qpel_bi_hv4_8_neon_i8mm) - ldr x30, [sp], #16 - ret -endfunc - -function ff_hevc_put_hevc_qpel_bi_hv16_8_neon_i8mm, export=1 - add w10, w5, #7 - lsl x10, x10, #7 - sub sp, sp, x10 // tmp_array - stp x7, x30, [sp, #-48]! - stp x4, x5, [sp, #16] - stp x0, x1, [sp, #32] - add x0, sp, #48 - sub x1, x2, x3, lsl #1 - sub x1, x1, x3 - mov x2, x3 - add w3, w5, #7 - mov x4, x6 - bl X(ff_hevc_put_hevc_qpel_h16_8_neon_i8mm) - ldp x4, x5, [sp, #16] - ldp x0, x1, [sp, #32] - ldp x7, x30, [sp], #48 - mov x6, #16 // width - b hevc_put_hevc_qpel_bi_hv16_8_end_neon -endfunc - function hevc_put_hevc_qpel_bi_hv16_8_end_neon load_qpel_filterh x7, x8 mov x9, #(MAX_PB_SIZE * 2) @@ -4735,6 +4647,109 @@ function hevc_put_hevc_qpel_bi_hv16_8_end_neon ret endfunc +#if HAVE_I8MM +ENABLE_I8MM + +function ff_hevc_put_hevc_qpel_bi_hv4_8_neon_i8mm, export=1 + add w10, w5, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x7, x30, [sp, #-48]! + stp x4, x5, [sp, #16] + stp x0, x1, [sp, #32] + sub x1, x2, x3, lsl #1 + sub x1, x1, x3 + add x0, sp, #48 + mov x2, x3 + add w3, w5, #7 + mov x4, x6 + bl X(ff_hevc_put_hevc_qpel_h4_8_neon_i8mm) + ldp x4, x5, [sp, #16] + ldp x0, x1, [sp, #32] + ldp x7, x30, [sp], #48 + b hevc_put_hevc_qpel_bi_hv4_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_bi_hv6_8_neon_i8mm, export=1 + add w10, w5, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x7, x30, [sp, #-48]! + stp x4, x5, [sp, #16] + stp x0, x1, [sp, #32] + sub x1, x2, x3, lsl #1 + sub x1, x1, x3 + add x0, sp, #48 + mov x2, x3 + add x3, x5, #7 + mov x4, x6 + bl X(ff_hevc_put_hevc_qpel_h6_8_neon_i8mm) + ldp x4, x5, [sp, #16] + ldp x0, x1, [sp, #32] + ldp x7, x30, [sp], #48 + b hevc_put_hevc_qpel_bi_hv6_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm, export=1 + add w10, w5, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x7, x30, [sp, #-48]! + stp x4, x5, [sp, #16] + stp x0, x1, [sp, #32] + sub x1, x2, x3, lsl #1 + sub x1, x1, x3 + add x0, sp, #48 + mov x2, x3 + add x3, x5, #7 + mov x4, x6 + bl X(ff_hevc_put_hevc_qpel_h8_8_neon_i8mm) + ldp x4, x5, [sp, #16] + ldp x0, x1, [sp, #32] + ldp x7, x30, [sp], #48 + b hevc_put_hevc_qpel_bi_hv8_8_end_neon +endfunc + +function ff_hevc_put_hevc_qpel_bi_hv12_8_neon_i8mm, export=1 + stp x6, x7, [sp, #-80]! + stp x4, x5, [sp, #16] + stp x2, x3, [sp, #32] + stp x0, x1, [sp, #48] + str x30, [sp, #64] + bl X(ff_hevc_put_hevc_qpel_bi_hv8_8_neon_i8mm) + ldp x4, x5, [sp, #16] + ldp x2, x3, [sp, #32] + ldp x0, x1, [sp, #48] + ldp x6, x7, [sp], #64 + add x4, x4, #16 + add x2, x2, #8 + add x0, x0, #8 + bl X(ff_hevc_put_hevc_qpel_bi_hv4_8_neon_i8mm) + ldr x30, [sp], #16 + ret +endfunc + +function ff_hevc_put_hevc_qpel_bi_hv16_8_neon_i8mm, export=1 + add w10, w5, #7 + lsl x10, x10, #7 + sub sp, sp, x10 // tmp_array + stp x7, x30, [sp, #-48]! + stp x4, x5, [sp, #16] + stp x0, x1, [sp, #32] + add x0, sp, #48 + sub x1, x2, x3, lsl #1 + sub x1, x1, x3 + mov x2, x3 + add w3, w5, #7 + mov x4, x6 + bl X(ff_hevc_put_hevc_qpel_h16_8_neon_i8mm) + ldp x4, x5, [sp, #16] + ldp x0, x1, [sp, #32] + ldp x7, x30, [sp], #48 + mov x6, #16 // width + b hevc_put_hevc_qpel_bi_hv16_8_end_neon +endfunc + function ff_hevc_put_hevc_qpel_bi_hv24_8_neon_i8mm, export=1 stp x6, x7, [sp, #-80]! stp x4, x5, [sp, #16] -- 2.39.3 (Apple Git-146) _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".