Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
From: tong1.wu-at-intel.com@ffmpeg.org
To: ffmpeg-devel@ffmpeg.org
Cc: Tong Wu <tong1.wu@intel.com>
Subject: [FFmpeg-devel] [PATCH v8 06/15] avcodec/vaapi_encode: extract the init function to base layer
Date: Thu, 18 Apr 2024 16:59:00 +0800
Message-ID: <20240418085910.547-6-tong1.wu@intel.com> (raw)
In-Reply-To: <20240418085910.547-1-tong1.wu@intel.com>

From: Tong Wu <tong1.wu@intel.com>

Related parameters are also moved to base layer.

Signed-off-by: Tong Wu <tong1.wu@intel.com>
---
 libavcodec/hw_base_encode.c     | 33 ++++++++++++++++
 libavcodec/hw_base_encode.h     | 11 ++++++
 libavcodec/vaapi_encode.c       | 68 ++++++++++-----------------------
 libavcodec/vaapi_encode.h       |  6 ---
 libavcodec/vaapi_encode_av1.c   |  2 +-
 libavcodec/vaapi_encode_h264.c  |  2 +-
 libavcodec/vaapi_encode_h265.c  |  2 +-
 libavcodec/vaapi_encode_mjpeg.c |  6 ++-
 8 files changed, 72 insertions(+), 58 deletions(-)

diff --git a/libavcodec/hw_base_encode.c b/libavcodec/hw_base_encode.c
index 1d9a255f69..14f3ecfc94 100644
--- a/libavcodec/hw_base_encode.c
+++ b/libavcodec/hw_base_encode.c
@@ -598,3 +598,36 @@ end:
 
     return 0;
 }
+
+int ff_hw_base_encode_init(AVCodecContext *avctx)
+{
+    HWBaseEncodeContext *ctx = avctx->priv_data;
+
+    ctx->frame = av_frame_alloc();
+    if (!ctx->frame)
+        return AVERROR(ENOMEM);
+
+    if (!avctx->hw_frames_ctx) {
+        av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is "
+               "required to associate the encoding device.\n");
+        return AVERROR(EINVAL);
+    }
+
+    ctx->input_frames_ref = av_buffer_ref(avctx->hw_frames_ctx);
+    if (!ctx->input_frames_ref)
+        return AVERROR(ENOMEM);
+
+    ctx->input_frames = (AVHWFramesContext *)ctx->input_frames_ref->data;
+
+    ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref);
+    if (!ctx->device_ref)
+        return AVERROR(ENOMEM);
+
+    ctx->device = (AVHWDeviceContext *)ctx->device_ref->data;
+
+    ctx->tail_pkt = av_packet_alloc();
+    if (!ctx->tail_pkt)
+        return AVERROR(ENOMEM);
+
+    return 0;
+}
diff --git a/libavcodec/hw_base_encode.h b/libavcodec/hw_base_encode.h
index b5b676b9a8..f7e385e840 100644
--- a/libavcodec/hw_base_encode.h
+++ b/libavcodec/hw_base_encode.h
@@ -19,6 +19,7 @@
 #ifndef AVCODEC_HW_BASE_ENCODE_H
 #define AVCODEC_HW_BASE_ENCODE_H
 
+#include "libavutil/hwcontext.h"
 #include "libavutil/fifo.h"
 
 #define MAX_DPB_SIZE 16
@@ -117,6 +118,14 @@ typedef struct HWBaseEncodeContext {
     // Hardware-specific hooks.
     const struct HWEncodePictureOperation *op;
 
+    // The hardware device context.
+    AVBufferRef    *device_ref;
+    AVHWDeviceContext *device;
+
+    // The hardware frame context containing the input frames.
+    AVBufferRef    *input_frames_ref;
+    AVHWFramesContext *input_frames;
+
     // Current encoding window, in display (input) order.
     HWBaseEncodePicture *pic_start, *pic_end;
     // The next picture to use as the previous reference picture in
@@ -183,6 +192,8 @@ typedef struct HWBaseEncodeContext {
 
 int ff_hw_base_encode_receive_packet(AVCodecContext *avctx, AVPacket *pkt);
 
+int ff_hw_base_encode_init(AVCodecContext *avctx);
+
 #define HW_BASE_ENCODE_COMMON_OPTIONS \
     { "async_depth", "Maximum processing parallelism. " \
       "Increase this to improve single channel performance.", \
diff --git a/libavcodec/vaapi_encode.c b/libavcodec/vaapi_encode.c
index 18966596e1..c7488ad150 100644
--- a/libavcodec/vaapi_encode.c
+++ b/libavcodec/vaapi_encode.c
@@ -996,9 +996,10 @@ static const VAEntrypoint vaapi_encode_entrypoints_low_power[] = {
 
 static av_cold int vaapi_encode_profile_entrypoint(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext      *ctx = avctx->priv_data;
-    VAProfile    *va_profiles    = NULL;
-    VAEntrypoint *va_entrypoints = NULL;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
+    VAProfile     *va_profiles    = NULL;
+    VAEntrypoint  *va_entrypoints = NULL;
     VAStatus vas;
     const VAEntrypoint *usable_entrypoints;
     const VAAPIEncodeProfile *profile;
@@ -1021,10 +1022,10 @@ static av_cold int vaapi_encode_profile_entrypoint(AVCodecContext *avctx)
         usable_entrypoints = vaapi_encode_entrypoints_normal;
     }
 
-    desc = av_pix_fmt_desc_get(ctx->input_frames->sw_format);
+    desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
     if (!desc) {
         av_log(avctx, AV_LOG_ERROR, "Invalid input pixfmt (%d).\n",
-               ctx->input_frames->sw_format);
+               base_ctx->input_frames->sw_format);
         return AVERROR(EINVAL);
     }
     depth = desc->comp[0].depth;
@@ -2131,20 +2132,21 @@ static int vaapi_encode_alloc_output_buffer(FFRefStructOpaque opaque, void *obj)
 
 static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
     AVVAAPIHWConfig *hwconfig = NULL;
     AVHWFramesConstraints *constraints = NULL;
     enum AVPixelFormat recon_format;
     int err, i;
 
-    hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref);
+    hwconfig = av_hwdevice_hwconfig_alloc(base_ctx->device_ref);
     if (!hwconfig) {
         err = AVERROR(ENOMEM);
         goto fail;
     }
     hwconfig->config_id = ctx->va_config;
 
-    constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref,
+    constraints = av_hwdevice_get_hwframe_constraints(base_ctx->device_ref,
                                                       hwconfig);
     if (!constraints) {
         err = AVERROR(ENOMEM);
@@ -2157,9 +2159,9 @@ static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx)
     recon_format = AV_PIX_FMT_NONE;
     if (constraints->valid_sw_formats) {
         for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) {
-            if (ctx->input_frames->sw_format ==
+            if (base_ctx->input_frames->sw_format ==
                 constraints->valid_sw_formats[i]) {
-                recon_format = ctx->input_frames->sw_format;
+                recon_format = base_ctx->input_frames->sw_format;
                 break;
             }
         }
@@ -2170,7 +2172,7 @@ static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx)
         }
     } else {
         // No idea what to use; copy input format.
-        recon_format = ctx->input_frames->sw_format;
+        recon_format = base_ctx->input_frames->sw_format;
     }
     av_log(avctx, AV_LOG_DEBUG, "Using %s as format of "
            "reconstructed frames.\n", av_get_pix_fmt_name(recon_format));
@@ -2191,7 +2193,7 @@ static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx)
     av_freep(&hwconfig);
     av_hwframe_constraints_free(&constraints);
 
-    ctx->recon_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref);
+    ctx->recon_frames_ref = av_hwframe_ctx_alloc(base_ctx->device_ref);
     if (!ctx->recon_frames_ref) {
         err = AVERROR(ENOMEM);
         goto fail;
@@ -2235,44 +2237,16 @@ av_cold int ff_vaapi_encode_init(AVCodecContext *avctx)
     VAStatus vas;
     int err;
 
+    err = ff_hw_base_encode_init(avctx);
+    if (err < 0)
+        goto fail;
+
     ctx->va_config  = VA_INVALID_ID;
     ctx->va_context = VA_INVALID_ID;
 
     base_ctx->op = &vaapi_op;
 
-    /* If you add something that can fail above this av_frame_alloc(),
-     * modify ff_vaapi_encode_close() accordingly. */
-    base_ctx->frame = av_frame_alloc();
-    if (!base_ctx->frame) {
-        return AVERROR(ENOMEM);
-    }
-
-    if (!avctx->hw_frames_ctx) {
-        av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is "
-               "required to associate the encoding device.\n");
-        return AVERROR(EINVAL);
-    }
-
-    ctx->input_frames_ref = av_buffer_ref(avctx->hw_frames_ctx);
-    if (!ctx->input_frames_ref) {
-        err = AVERROR(ENOMEM);
-        goto fail;
-    }
-    ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data;
-
-    ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref);
-    if (!ctx->device_ref) {
-        err = AVERROR(ENOMEM);
-        goto fail;
-    }
-    ctx->device = (AVHWDeviceContext*)ctx->device_ref->data;
-    ctx->hwctx = ctx->device->hwctx;
-
-    base_ctx->tail_pkt = av_packet_alloc();
-    if (!base_ctx->tail_pkt) {
-        err = AVERROR(ENOMEM);
-        goto fail;
-    }
+    ctx->hwctx = base_ctx->device->hwctx;
 
     err = vaapi_encode_profile_entrypoint(avctx);
     if (err < 0)
@@ -2475,8 +2449,8 @@ av_cold int ff_vaapi_encode_close(AVCodecContext *avctx)
     av_fifo_freep2(&base_ctx->encode_fifo);
 
     av_buffer_unref(&ctx->recon_frames_ref);
-    av_buffer_unref(&ctx->input_frames_ref);
-    av_buffer_unref(&ctx->device_ref);
+    av_buffer_unref(&base_ctx->input_frames_ref);
+    av_buffer_unref(&base_ctx->device_ref);
 
     return 0;
 }
diff --git a/libavcodec/vaapi_encode.h b/libavcodec/vaapi_encode.h
index 13ccad8e47..8e466e2074 100644
--- a/libavcodec/vaapi_encode.h
+++ b/libavcodec/vaapi_encode.h
@@ -219,14 +219,8 @@ typedef struct VAAPIEncodeContext {
     VAConfigID      va_config;
     VAContextID     va_context;
 
-    AVBufferRef    *device_ref;
-    AVHWDeviceContext *device;
     AVVAAPIDeviceContext *hwctx;
 
-    // The hardware frame context containing the input frames.
-    AVBufferRef    *input_frames_ref;
-    AVHWFramesContext *input_frames;
-
     // The hardware frame context containing the reconstructed frames.
     AVBufferRef    *recon_frames_ref;
     AVHWFramesContext *recon_frames;
diff --git a/libavcodec/vaapi_encode_av1.c b/libavcodec/vaapi_encode_av1.c
index 393b479f99..c37ff7591a 100644
--- a/libavcodec/vaapi_encode_av1.c
+++ b/libavcodec/vaapi_encode_av1.c
@@ -370,7 +370,7 @@ static int vaapi_encode_av1_init_sequence_params(AVCodecContext *avctx)
     memset(sh_obu, 0, sizeof(*sh_obu));
     sh_obu->header.obu_type = AV1_OBU_SEQUENCE_HEADER;
 
-    desc = av_pix_fmt_desc_get(priv->common.input_frames->sw_format);
+    desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
     av_assert0(desc);
 
     sh->seq_profile  = avctx->profile;
diff --git a/libavcodec/vaapi_encode_h264.c b/libavcodec/vaapi_encode_h264.c
index 412e392ac4..106df30563 100644
--- a/libavcodec/vaapi_encode_h264.c
+++ b/libavcodec/vaapi_encode_h264.c
@@ -308,7 +308,7 @@ static int vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx)
     memset(sps, 0, sizeof(*sps));
     memset(pps, 0, sizeof(*pps));
 
-    desc = av_pix_fmt_desc_get(priv->common.input_frames->sw_format);
+    desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
     av_assert0(desc);
     if (desc->nb_components == 1 || desc->log2_chroma_w != 1 || desc->log2_chroma_h != 1) {
         av_log(avctx, AV_LOG_ERROR, "Chroma format of input pixel format "
diff --git a/libavcodec/vaapi_encode_h265.c b/libavcodec/vaapi_encode_h265.c
index bea35d7ca8..199d61c3a2 100644
--- a/libavcodec/vaapi_encode_h265.c
+++ b/libavcodec/vaapi_encode_h265.c
@@ -278,7 +278,7 @@ static int vaapi_encode_h265_init_sequence_params(AVCodecContext *avctx)
     memset(pps, 0, sizeof(*pps));
 
 
-    desc = av_pix_fmt_desc_get(priv->common.input_frames->sw_format);
+    desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
     av_assert0(desc);
     if (desc->nb_components == 1) {
         chroma_format = 0;
diff --git a/libavcodec/vaapi_encode_mjpeg.c b/libavcodec/vaapi_encode_mjpeg.c
index 8ca1be192a..17fd8ba8e9 100644
--- a/libavcodec/vaapi_encode_mjpeg.c
+++ b/libavcodec/vaapi_encode_mjpeg.c
@@ -222,6 +222,7 @@ static int vaapi_encode_mjpeg_write_extra_buffer(AVCodecContext *avctx,
 static int vaapi_encode_mjpeg_init_picture_params(AVCodecContext *avctx,
                                                   VAAPIEncodePicture *pic)
 {
+    HWBaseEncodeContext         *base_ctx = avctx->priv_data;
     VAAPIEncodeMJPEGContext         *priv = avctx->priv_data;
     HWBaseEncodePicture         *base_pic = (HWBaseEncodePicture *)pic;
     JPEGRawFrameHeader                *fh = &priv->frame_header;
@@ -235,7 +236,7 @@ static int vaapi_encode_mjpeg_init_picture_params(AVCodecContext *avctx,
 
     av_assert0(base_pic->type == PICTURE_TYPE_IDR);
 
-    desc = av_pix_fmt_desc_get(priv->common.input_frames->sw_format);
+    desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
     av_assert0(desc);
     if (desc->flags & AV_PIX_FMT_FLAG_RGB)
         components = components_rgb;
@@ -437,10 +438,11 @@ static int vaapi_encode_mjpeg_init_slice_params(AVCodecContext *avctx,
 
 static av_cold int vaapi_encode_mjpeg_get_encoder_caps(AVCodecContext *avctx)
 {
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
     VAAPIEncodeContext *ctx = avctx->priv_data;
     const AVPixFmtDescriptor *desc;
 
-    desc = av_pix_fmt_desc_get(ctx->input_frames->sw_format);
+    desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
     av_assert0(desc);
 
     ctx->surface_width  = FFALIGN(avctx->width,  8 << desc->log2_chroma_w);
-- 
2.41.0.windows.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

  parent reply	other threads:[~2024-04-18  9:01 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-04-18  8:58 [FFmpeg-devel] [PATCH v8 01/15] avcodec/vaapi_encode: introduce a base layer for vaapi encode tong1.wu-at-intel.com
2024-04-18  8:58 ` [FFmpeg-devel] [PATCH v8 02/15] avcodec/vaapi_encode: add async_depth to common options tong1.wu-at-intel.com
2024-04-18  8:58 ` [FFmpeg-devel] [PATCH v8 03/15] avcodec/vaapi_encode: add picture type name to base tong1.wu-at-intel.com
2024-04-18  8:58 ` [FFmpeg-devel] [PATCH v8 04/15] avcodec/vaapi_encode: move pic->input_surface initialization to encode_alloc tong1.wu-at-intel.com
2024-04-18  8:58 ` [FFmpeg-devel] [PATCH v8 05/15] avcodec/vaapi_encode: move the dpb logic from VAAPI to base layer tong1.wu-at-intel.com
2024-05-14 20:46   ` Mark Thompson
2024-05-15 10:14     ` Wu, Tong1
2024-04-18  8:59 ` tong1.wu-at-intel.com [this message]
2024-05-14 20:56   ` [FFmpeg-devel] [PATCH v8 06/15] avcodec/vaapi_encode: extract the init function " Mark Thompson
2024-05-15 10:14     ` Wu, Tong1
2024-04-18  8:59 ` [FFmpeg-devel] [PATCH v8 07/15] avcodec/vaapi_encode: extract gop configuration and two options " tong1.wu-at-intel.com
2024-04-18  8:59 ` [FFmpeg-devel] [PATCH v8 08/15] avcodec/vaapi_encode: move two reconstructed frames parameters " tong1.wu-at-intel.com
2024-04-18  8:59 ` [FFmpeg-devel] [PATCH v8 09/15] avcodec/vaapi_encode: extract a close function for " tong1.wu-at-intel.com
2024-04-18  8:59 ` [FFmpeg-devel] [PATCH v8 10/15] avcodec/vaapi_encode: extract set_output_property to " tong1.wu-at-intel.com
2024-04-18  8:59 ` [FFmpeg-devel] [PATCH v8 11/15] avcodec/vaapi_encode: extract a get_recon_format function " tong1.wu-at-intel.com
2024-05-14 21:06   ` Mark Thompson
2024-05-15 10:14     ` Wu, Tong1
2024-04-18  8:59 ` [FFmpeg-devel] [PATCH v8 12/15] avcodec/vaapi_encode: extract a free funtion " tong1.wu-at-intel.com
2024-05-14 21:07   ` Mark Thompson
2024-05-20 15:10     ` Wu, Tong1
2024-04-18  8:59 ` [FFmpeg-devel] [PATCH v8 13/15] avutil/hwcontext_d3d12va: add Flags for resource creation tong1.wu-at-intel.com
2024-04-18  8:59 ` [FFmpeg-devel] [PATCH v8 14/15] avcodec: add D3D12VA hardware HEVC encoder tong1.wu-at-intel.com
2024-05-09  5:32   ` Wu, Tong1
2024-04-18  8:59 ` [FFmpeg-devel] [PATCH v8 15/15] Changelog: add D3D12VA HEVC encoder changelog tong1.wu-at-intel.com
2024-05-14 19:55 ` [FFmpeg-devel] [PATCH v8 01/15] avcodec/vaapi_encode: introduce a base layer for vaapi encode Mark Thompson
2024-05-15  9:43   ` Wu, Tong1

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240418085910.547-6-tong1.wu@intel.com \
    --to=tong1.wu-at-intel.com@ffmpeg.org \
    --cc=ffmpeg-devel@ffmpeg.org \
    --cc=tong1.wu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git