Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
* [FFmpeg-devel] [PATCH v1 1/4] lavu: add sub frame side data
@ 2022-04-29  7:59 Fei Wang
  2022-04-29  7:59 ` [FFmpeg-devel] [PATCH v1 2/4] lavc: add sub frame options and flag Fei Wang
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Fei Wang @ 2022-04-29  7:59 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Fei Wang

Sub frame side data allows attach another AVFrame as side data into
the target AVFrame.

Signed-off-by: Fei Wang <fei.w.wang@intel.com>
---
 libavutil/Makefile             |  2 ++
 libavutil/frame.c              |  5 ++-
 libavutil/frame.h              |  5 +++
 libavutil/sub_frame_metadata.c | 61 ++++++++++++++++++++++++++++++++++
 libavutil/sub_frame_metadata.h | 35 +++++++++++++++++++
 libavutil/version.h            |  4 +--
 6 files changed, 109 insertions(+), 3 deletions(-)
 create mode 100644 libavutil/sub_frame_metadata.c
 create mode 100644 libavutil/sub_frame_metadata.h

diff --git a/libavutil/Makefile b/libavutil/Makefile
index 81df3b0640..09333a0b48 100644
--- a/libavutil/Makefile
+++ b/libavutil/Makefile
@@ -88,6 +88,7 @@ HEADERS = adler32.h                                                     \
           tea.h                                                         \
           tx.h                                                          \
           film_grain_params.h                                           \
+          sub_frame_metadata.h                                          \
 
 ARCH_HEADERS = bswap.h                                                  \
                intmath.h                                                \
@@ -176,6 +177,7 @@ OBJS = adler32.o                                                        \
        tx_int32.o                                                       \
        video_enc_params.o                                               \
        film_grain_params.o                                              \
+       sub_frame_metadata.o                                             \
 
 
 OBJS-$(CONFIG_CUDA)                     += hwcontext_cuda.o
diff --git a/libavutil/frame.c b/libavutil/frame.c
index fbb869fffa..b21a15e652 100644
--- a/libavutil/frame.c
+++ b/libavutil/frame.c
@@ -315,7 +315,9 @@ static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
         if (   sd_src->type == AV_FRAME_DATA_PANSCAN
             && (src->width != dst->width || src->height != dst->height))
             continue;
-        if (force_copy) {
+        /* Don't copy sub frame side data, otherwise sub frame's pointers in
+         * dst may be invalid. */
+        if (force_copy && sd_src->type != AV_FRAME_DATA_SUB_FRAME) {
             sd_dst = av_frame_new_side_data(dst, sd_src->type,
                                             sd_src->size);
             if (!sd_dst) {
@@ -815,6 +817,7 @@ const char *av_frame_side_data_name(enum AVFrameSideDataType type)
     case AV_FRAME_DATA_DETECTION_BBOXES:            return "Bounding boxes for object detection and classification";
     case AV_FRAME_DATA_DOVI_RPU_BUFFER:             return "Dolby Vision RPU Data";
     case AV_FRAME_DATA_DOVI_METADATA:               return "Dolby Vision Metadata";
+    case AV_FRAME_DATA_SUB_FRAME:                   return "Sub frame Metadata";
     }
     return NULL;
 }
diff --git a/libavutil/frame.h b/libavutil/frame.h
index 33fac2054c..9ae1286100 100644
--- a/libavutil/frame.h
+++ b/libavutil/frame.h
@@ -209,6 +209,11 @@ enum AVFrameSideDataType {
      * volume transform - CUVA 005.1-2021.
      */
     AV_FRAME_DATA_DYNAMIC_HDR_VIVID,
+
+    /**
+     * Sub frame of a target frame, as described by AVFrame.
+     */
+    AV_FRAME_DATA_SUB_FRAME,
 };
 
 enum AVActiveFormatDescription {
diff --git a/libavutil/sub_frame_metadata.c b/libavutil/sub_frame_metadata.c
new file mode 100644
index 0000000000..82ea32383f
--- /dev/null
+++ b/libavutil/sub_frame_metadata.c
@@ -0,0 +1,61 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "sub_frame_metadata.h"
+
+static void sub_frame_free(void *opaque, uint8_t *data)
+{
+    AVFrame *frame = (AVFrame*)data;
+
+    av_frame_free(&frame);
+}
+
+static AVFrame *sub_frame_alloc(size_t *out_size)
+{
+    AVFrame *sub_frame = av_frame_alloc();
+    if (!sub_frame)
+        return NULL;
+
+    *out_size = sizeof(*sub_frame);
+
+    return sub_frame;
+}
+
+AVFrame *av_sub_frame_create_side_data(AVFrame *frame)
+{
+    AVBufferRef *buf;
+    AVFrame *sub_frame;
+    size_t size;
+
+    sub_frame = sub_frame_alloc(&size);
+    if (!sub_frame)
+        return NULL;
+
+    buf = av_buffer_create((uint8_t *)sub_frame, size, &sub_frame_free, NULL, 0);
+    if (!buf) {
+        av_frame_free(&sub_frame);
+        return NULL;
+    }
+
+    if (!av_frame_new_side_data_from_buf(frame, AV_FRAME_DATA_SUB_FRAME, buf)) {
+        av_buffer_unref(&buf);
+        return NULL;
+    }
+
+    return sub_frame;
+}
diff --git a/libavutil/sub_frame_metadata.h b/libavutil/sub_frame_metadata.h
new file mode 100644
index 0000000000..621fb31e42
--- /dev/null
+++ b/libavutil/sub_frame_metadata.h
@@ -0,0 +1,35 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SUB_FRAME_METADATA_H
+#define AVUTIL_SUB_FRAME_METADATA_H
+
+#include "frame.h"
+
+/**
+ * Allocate a AVFrame structure and add it to the input frame as
+ * the side data. The allocated AVFrame will be freed automatically
+ * once the buf of created side data reference count decrease to zero.
+ *
+ * @param frame The frame which side data is added to.
+ *
+ * @return The AVFrame structure to be filled by caller.
+ */
+AVFrame *av_sub_frame_create_side_data(AVFrame *frame);
+
+#endif /* AVUTIL_SUB_FRAME_METADATA_H */
diff --git a/libavutil/version.h b/libavutil/version.h
index 6735c20090..dd7d20a9fa 100644
--- a/libavutil/version.h
+++ b/libavutil/version.h
@@ -79,8 +79,8 @@
  */
 
 #define LIBAVUTIL_VERSION_MAJOR  57
-#define LIBAVUTIL_VERSION_MINOR  24
-#define LIBAVUTIL_VERSION_MICRO 101
+#define LIBAVUTIL_VERSION_MINOR  25
+#define LIBAVUTIL_VERSION_MICRO 100
 
 #define LIBAVUTIL_VERSION_INT   AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
                                                LIBAVUTIL_VERSION_MINOR, \
-- 
2.25.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [FFmpeg-devel] [PATCH v1 2/4] lavc: add sub frame options and flag
  2022-04-29  7:59 [FFmpeg-devel] [PATCH v1 1/4] lavu: add sub frame side data Fei Wang
@ 2022-04-29  7:59 ` Fei Wang
  2022-04-29  7:59 ` [FFmpeg-devel] [PATCH v1 3/4] lavc/hevc_vaapi: enable sub frame support Fei Wang
  2022-04-29  7:59 ` [FFmpeg-devel] [PATCH v1 4/4] examples: seperate vaapi_decode from hw_decode Fei Wang
  2 siblings, 0 replies; 4+ messages in thread
From: Fei Wang @ 2022-04-29  7:59 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Fei Wang

Signed-off-by: Fei Wang <fei.w.wang@intel.com>
---
 doc/codecs.texi            |  9 +++++++++
 libavcodec/avcodec.h       | 15 +++++++++++++++
 libavcodec/options_table.h |  2 ++
 libavcodec/version.h       |  2 +-
 4 files changed, 27 insertions(+), 1 deletion(-)

diff --git a/doc/codecs.texi b/doc/codecs.texi
index 5e10020900..d74678a5eb 100644
--- a/doc/codecs.texi
+++ b/doc/codecs.texi
@@ -662,6 +662,9 @@ for codecs that support it. At present, those are H.264 and VP9.
 @item film_grain
 Export film grain parameters through frame side data (see @code{AV_FRAME_DATA_FILM_GRAIN_PARAMS}).
 Supported at present by AV1 decoders.
+@item sub_frame
+Export sub frame through frame side data (see @code{AV_FRAME_DATA_SUB_FRAME}).
+Supported at present by hevc VAAPI decoder.
 @end table
 
 @item threads @var{integer} (@emph{decoding/encoding,video})
@@ -1018,6 +1021,12 @@ Note: The required alignment depends on if @code{AV_CODEC_FLAG_UNALIGNED} is set
 CPU. @code{AV_CODEC_FLAG_UNALIGNED} cannot be changed from the command line. Also hardware
 decoders will not apply left/top Cropping.
 
+@item sub_frame_opts @var{dict} (@emph{decoding,video})
+Sub frames parameters, like width/height/format etc.
+@example
+-sub_frame_opts "width=640:height=480:format=nv12"
+@end example
+
 
 @end table
 
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index 4dae23d06e..3b1ab39f0c 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -361,6 +361,12 @@ typedef struct RcOverride{
  */
 #define AV_CODEC_EXPORT_DATA_FILM_GRAIN (1 << 3)
 
+/**
+ * Decoding only.
+ * export sub frame through frame side data.
+ */
+#define AV_CODEC_EXPORT_DATA_SUB_FRAME (1 << 4)
+
 /**
  * The decoder will keep a reference to the frame and may reuse it later.
  */
@@ -2055,6 +2061,15 @@ typedef struct AVCodecContext {
      *             The decoder can then override during decoding as needed.
      */
     AVChannelLayout ch_layout;
+
+
+    /**
+     * Set sub frame's parameters like: width/height/format etc.
+     *
+     * - decoding: set by user
+     * - encoding: unused
+     */
+    AVDictionary *sub_frame_opts;
 } AVCodecContext;
 
 /**
diff --git a/libavcodec/options_table.h b/libavcodec/options_table.h
index e72b4d12b6..b6bcbd251e 100644
--- a/libavcodec/options_table.h
+++ b/libavcodec/options_table.h
@@ -88,6 +88,7 @@ static const AVOption avcodec_options[] = {
 {"prft", "export Producer Reference Time through packet side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_EXPORT_DATA_PRFT}, INT_MIN, INT_MAX, A|V|S|E, "export_side_data"},
 {"venc_params", "export video encoding parameters through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_EXPORT_DATA_VIDEO_ENC_PARAMS}, INT_MIN, INT_MAX, V|D, "export_side_data"},
 {"film_grain", "export film grain parameters through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_EXPORT_DATA_FILM_GRAIN}, INT_MIN, INT_MAX, V|D, "export_side_data"},
+{"sub_frame", "export sub frame through frame side data", 0, AV_OPT_TYPE_CONST, {.i64 = AV_CODEC_EXPORT_DATA_SUB_FRAME}, INT_MIN, INT_MAX, V|D, "export_side_data"},
 {"time_base", NULL, OFFSET(time_base), AV_OPT_TYPE_RATIONAL, {.dbl = 0}, 0, INT_MAX},
 {"g", "set the group of picture (GOP) size", OFFSET(gop_size), AV_OPT_TYPE_INT, {.i64 = 12 }, INT_MIN, INT_MAX, V|E},
 {"ar", "set audio sampling rate (in Hz)", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = DEFAULT }, 0, INT_MAX, A|D|E},
@@ -399,6 +400,7 @@ static const AVOption avcodec_options[] = {
 {"allow_profile_mismatch", "attempt to decode anyway if HW accelerated decoder's supported profiles do not exactly match the stream", 0, AV_OPT_TYPE_CONST, {.i64 = AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH }, INT_MIN, INT_MAX, V | D, "hwaccel_flags"},
 {"extra_hw_frames", "Number of extra hardware frames to allocate for the user", OFFSET(extra_hw_frames), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, V|D },
 {"discard_damaged_percentage", "Percentage of damaged samples to discard a frame", OFFSET(discard_damaged_percentage), AV_OPT_TYPE_INT, {.i64 = 95 }, 0, 100, V|D },
+{"sub_frame_opts", "set sub frame opts", OFFSET(sub_frame_opts), AV_OPT_TYPE_DICT, {.str = NULL}, -1, INT_MAX, V|D},
 {NULL},
 };
 
diff --git a/libavcodec/version.h b/libavcodec/version.h
index 735c8b813c..87b7284a95 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -29,7 +29,7 @@
 
 #include "version_major.h"
 
-#define LIBAVCODEC_VERSION_MINOR  27
+#define LIBAVCODEC_VERSION_MINOR  28
 #define LIBAVCODEC_VERSION_MICRO 100
 
 #define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
-- 
2.25.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [FFmpeg-devel] [PATCH v1 3/4] lavc/hevc_vaapi: enable sub frame support
  2022-04-29  7:59 [FFmpeg-devel] [PATCH v1 1/4] lavu: add sub frame side data Fei Wang
  2022-04-29  7:59 ` [FFmpeg-devel] [PATCH v1 2/4] lavc: add sub frame options and flag Fei Wang
@ 2022-04-29  7:59 ` Fei Wang
  2022-04-29  7:59 ` [FFmpeg-devel] [PATCH v1 4/4] examples: seperate vaapi_decode from hw_decode Fei Wang
  2 siblings, 0 replies; 4+ messages in thread
From: Fei Wang @ 2022-04-29  7:59 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Fei Wang

Intel HW provide a feature that allows decoder output another scaled
frame beside original frame. And the scaled frame will attach to main
frame as sub frame side data.

The use case is mainly for video analysis. For example, scaled down
frame can be used for analysis, and the result can be applied back
to main frame.

Normally, we use scale_vaapi for scaling in vaapi transcode pipeline
if want to get a smaller resolution frame. While now sub frame can
be used instead. For some platforms, the sub frame scaling is much
more faster than scale_vaapi. For example, the decode + sub frame
cmd will improve ~50% performance than decode + scaling on my DG2
i3-11100B@3.6GHz.

decode + sub frame cmd:
ffmpeg -hwaccel vaapi -hwaccel_device /dev/dri/renderD128            \
-hwaccel_output_format vaapi -export_side_data sub_frame             \
-sub_frame_opts "width=300:height=300:format=nv12"                   \
-i 1920x1080.h265 -f null - &

decode + scaling cmd:
ffmpeg -hwaccel vaapi -hwaccel_device /dev/dri/renderD128            \
-hwaccel_output_format vaapi -i 1920x1080.h265                       \
-vf 'scale_vaapi=w=300:h=300:format=nv12' -f null - &

Signed-off-by: Fei Wang <fei.w.wang@intel.com>
---
 libavcodec/vaapi_decode.c   | 46 ++++++++++++++++++++++++++-
 libavcodec/vaapi_decode.h   |  4 +++
 libavcodec/vaapi_hevc.c     | 32 ++++++++++++++++++-
 libavutil/hwcontext_vaapi.c | 62 +++++++++++++++++++++++++++++++++++--
 libavutil/hwcontext_vaapi.h | 15 ++++++++-
 5 files changed, 153 insertions(+), 6 deletions(-)

diff --git a/libavcodec/vaapi_decode.c b/libavcodec/vaapi_decode.c
index a7abddb06b..920bab1ef4 100644
--- a/libavcodec/vaapi_decode.c
+++ b/libavcodec/vaapi_decode.c
@@ -160,6 +160,10 @@ int ff_vaapi_decode_issue(AVCodecContext *avctx,
     av_log(avctx, AV_LOG_DEBUG, "Decode to surface %#x.\n",
            pic->output_surface);
 
+    if (ctx->hwfc->enable_sub_frame)
+        av_log(avctx, AV_LOG_DEBUG, "Decode sub frame to surface %#x.\n",
+               pic->sub_frame_surface);
+
     vas = vaBeginPicture(ctx->hwctx->display, ctx->va_context,
                          pic->output_surface);
     if (vas != VA_STATUS_SUCCESS) {
@@ -440,6 +444,9 @@ static int vaapi_decode_make_config(AVCodecContext *avctx,
     AVHWDeviceContext    *device = (AVHWDeviceContext*)device_ref->data;
     AVVAAPIDeviceContext *hwctx = device->hwctx;
 
+    VAConfigAttrib attr;
+    int attr_num = 0, support_dec_processing = 0;
+
     codec_desc = avcodec_descriptor_get(avctx->codec_id);
     if (!codec_desc) {
         err = AVERROR(EINVAL);
@@ -518,8 +525,23 @@ static int vaapi_decode_make_config(AVCodecContext *avctx,
         }
     }
 
+    if (avctx->export_side_data & AV_CODEC_EXPORT_DATA_SUB_FRAME) {
+        attr.type = VAConfigAttribDecProcessing;
+        vas = vaGetConfigAttributes(hwctx->display, matched_va_profile,
+                                    VAEntrypointVLD, &attr, 1);
+        if (vas != VA_STATUS_SUCCESS) {
+            av_log(avctx, AV_LOG_ERROR, "Failed to query decode process "
+                   "attributes: %d (%s).\n", vas, vaErrorStr(vas));
+            return AVERROR_EXTERNAL;
+        } else if (attr.value | VA_DEC_PROCESSING) {
+            support_dec_processing = 1;
+            attr_num++;
+        } else
+            av_log(avctx, AV_LOG_WARNING, "Hardware doesn't support decode processing.\n");
+    }
+
     vas = vaCreateConfig(hwctx->display, matched_va_profile,
-                         VAEntrypointVLD, NULL, 0,
+                         VAEntrypointVLD, &attr, attr_num,
                          va_config);
     if (vas != VA_STATUS_SUCCESS) {
         av_log(avctx, AV_LOG_ERROR, "Failed to create decode "
@@ -564,10 +586,32 @@ static int vaapi_decode_make_config(AVCodecContext *avctx,
 
     if (frames_ref) {
         AVHWFramesContext *frames = (AVHWFramesContext *)frames_ref->data;
+        AVVAAPIFramesContext *avfc = frames->hwctx;
 
         frames->format = AV_PIX_FMT_VAAPI;
         frames->width = avctx->coded_width;
         frames->height = avctx->coded_height;
+        avfc->enable_sub_frame = support_dec_processing;
+
+        if (avfc->enable_sub_frame) {
+            avfc->sub_frame_width = avctx->coded_width;
+            avfc->sub_frame_height = avctx->coded_height;
+            avfc->sub_frame_sw_format = AV_PIX_FMT_NV12;
+            if (avctx->sub_frame_opts) {
+                AVDictionaryEntry *e = NULL;
+                while ((e = av_dict_get(avctx->sub_frame_opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
+                    if (!strcmp(e->key, "width"))
+                        avfc->sub_frame_width= atoi(e->value);
+                    else if (!strcmp(e->key, "height"))
+                        avfc->sub_frame_height = atoi(e->value);
+                    else if (!strcmp(e->key, "format"))
+                        avfc->sub_frame_sw_format = av_get_pix_fmt(e->value);
+                }
+            }
+            av_log(avctx, AV_LOG_DEBUG, "Sub frame set with width:%d, height:%d, "
+                   "format:%s.\n", avfc->sub_frame_width, avfc->sub_frame_height,
+                   av_get_pix_fmt_name(avfc->sub_frame_sw_format));
+        }
 
         err = vaapi_decode_find_best_format(avctx, device,
                                             *va_config, frames);
diff --git a/libavcodec/vaapi_decode.h b/libavcodec/vaapi_decode.h
index 6beda14e52..fbac7e7a8e 100644
--- a/libavcodec/vaapi_decode.h
+++ b/libavcodec/vaapi_decode.h
@@ -45,6 +45,10 @@ typedef struct VAAPIDecodePicture {
     int                nb_slices;
     VABufferID           *slice_buffers;
     int                   slices_allocated;
+
+    VASurfaceID           sub_frame_surface;
+    VARectangle           sub_frame_src;
+    VARectangle           sub_frame_dst;
 } VAAPIDecodePicture;
 
 typedef struct VAAPIDecodeContext {
diff --git a/libavcodec/vaapi_hevc.c b/libavcodec/vaapi_hevc.c
index 9083331c45..209a302a2c 100644
--- a/libavcodec/vaapi_hevc.c
+++ b/libavcodec/vaapi_hevc.c
@@ -38,6 +38,7 @@ typedef struct VAAPIDecodePictureHEVC {
     VAPictureParameterBufferHEVC pic_param;
     VASliceParameterBufferHEVC last_slice_param;
 #endif
+    VAProcPipelineParameterBuffer proc_param;
     const uint8_t *last_buffer;
     size_t         last_size;
 
@@ -122,8 +123,8 @@ static int vaapi_hevc_start_frame(AVCodecContext          *avctx,
     VAAPIDecodePictureHEVC *pic = h->ref->hwaccel_picture_private;
     const HEVCSPS          *sps = h->ps.sps;
     const HEVCPPS          *pps = h->ps.pps;
-
     const ScalingList *scaling_list = NULL;
+    AVFrameSideData *sd;
     int pic_param_size, err, i;
 
     VAPictureParameterBufferHEVC *pic_param = (VAPictureParameterBufferHEVC *)&pic->pic_param;
@@ -285,6 +286,35 @@ static int vaapi_hevc_start_frame(AVCodecContext          *avctx,
             goto fail;
     }
 
+    sd = av_frame_get_side_data(h->ref->frame, AV_FRAME_DATA_SUB_FRAME);
+    if (sd) {
+        VAProcPipelineParameterBuffer *proc_param = &pic->proc_param;
+        AVFrame *sub_frame = (AVFrame *)sd->data;
+
+        memset(proc_param, 0, sizeof(VAProcPipelineParameterBuffer));
+
+        pic->pic.sub_frame_src.x = pic->pic.sub_frame_src.y = 0;
+        pic->pic.sub_frame_src.width = sps->width;
+        pic->pic.sub_frame_src.height = sps->height;
+
+        pic->pic.sub_frame_dst.x = pic->pic.sub_frame_dst.y = 0;
+        pic->pic.sub_frame_dst.width = sub_frame->width;
+        pic->pic.sub_frame_dst.height = sub_frame->height;
+
+        pic->pic.sub_frame_surface = ff_vaapi_get_surface_id(sub_frame);
+        proc_param->surface = pic->pic.output_surface;
+        proc_param->surface_region = &pic->pic.sub_frame_src;
+        proc_param->output_region = &pic->pic.sub_frame_dst;
+        proc_param->additional_outputs = &pic->pic.sub_frame_surface;
+        proc_param->num_additional_outputs = 1;
+
+        err = ff_vaapi_decode_make_param_buffer(avctx, &pic->pic,
+                                                VAProcPipelineParameterBufferType,
+                                                &pic->proc_param, sizeof(VAProcPipelineParameterBuffer));
+        if (err < 0)
+            goto fail;
+    }
+
     return 0;
 
 fail:
diff --git a/libavutil/hwcontext_vaapi.c b/libavutil/hwcontext_vaapi.c
index c3a98bc4b1..1b3b487738 100644
--- a/libavutil/hwcontext_vaapi.c
+++ b/libavutil/hwcontext_vaapi.c
@@ -49,8 +49,7 @@
 #include "hwcontext_vaapi.h"
 #include "mem.h"
 #include "pixdesc.h"
-#include "pixfmt.h"
-
+#include "sub_frame_metadata.h"
 
 typedef struct VAAPIDevicePriv {
 #if HAVE_VAAPI_X11
@@ -82,6 +81,8 @@ typedef struct VAAPIFramesContext {
     // Caches whether VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2 is unsupported for
     // surface imports.
     int prime_2_import_unsupported;
+
+    AVBufferRef *sub_frames_ref;
 } VAAPIFramesContext;
 
 typedef struct VAAPIMapping {
@@ -511,7 +512,7 @@ static AVBufferRef *vaapi_pool_alloc(void *opaque, size_t size)
     return ref;
 }
 
-static int vaapi_frames_init(AVHWFramesContext *hwfc)
+static int vaapi_hw_frames_init(AVHWFramesContext *hwfc)
 {
     AVVAAPIFramesContext  *avfc = hwfc->hwctx;
     VAAPIFramesContext     *ctx = hwfc->internal->priv;
@@ -663,17 +664,57 @@ fail:
     return err;
 }
 
+static int vaapi_frames_init(AVHWFramesContext *hwfc)
+{
+    VAAPIFramesContext *ctx = hwfc->internal->priv;
+    AVVAAPIFramesContext *avfc = hwfc->hwctx;
+    AVHWFramesContext *sub_frames_ctx;
+    int ret;
+
+    ret = vaapi_hw_frames_init(hwfc);
+    if (ret < 0)
+        return ret;
+
+    if (avfc->enable_sub_frame){
+        ctx->sub_frames_ref = av_hwframe_ctx_alloc(hwfc->device_ref);
+        if (!ctx->sub_frames_ref) {
+            return AVERROR(ENOMEM);
+        }
+        sub_frames_ctx = (AVHWFramesContext*)ctx->sub_frames_ref->data;
+
+        sub_frames_ctx->width             = avfc->sub_frame_width;
+        sub_frames_ctx->height            = avfc->sub_frame_height;
+        sub_frames_ctx->format            = AV_PIX_FMT_VAAPI;
+        sub_frames_ctx->sw_format         = avfc->sub_frame_sw_format;
+
+        ret = av_hwframe_ctx_init(ctx->sub_frames_ref);
+        if (ret < 0) {
+            av_buffer_unref(&ctx->sub_frames_ref);
+            av_log(hwfc, AV_LOG_ERROR, "Error to init sub frame hw context.\n");
+            return ret;
+        }
+    }
+
+    return 0;
+}
+
 static void vaapi_frames_uninit(AVHWFramesContext *hwfc)
 {
     AVVAAPIFramesContext *avfc = hwfc->hwctx;
     VAAPIFramesContext    *ctx = hwfc->internal->priv;
 
+    av_buffer_unref(&ctx->sub_frames_ref);
     av_freep(&avfc->surface_ids);
     av_freep(&ctx->attributes);
 }
 
 static int vaapi_get_buffer(AVHWFramesContext *hwfc, AVFrame *frame)
 {
+    VAAPIFramesContext *ctx = hwfc->internal->priv;
+    AVVAAPIFramesContext *avfc = hwfc->hwctx;
+    AVFrame *sub_frame;
+    int ret;
+
     frame->buf[0] = av_buffer_pool_get(hwfc->pool);
     if (!frame->buf[0])
         return AVERROR(ENOMEM);
@@ -683,6 +724,21 @@ static int vaapi_get_buffer(AVHWFramesContext *hwfc, AVFrame *frame)
     frame->width   = hwfc->width;
     frame->height  = hwfc->height;
 
+    if (avfc->enable_sub_frame) {
+        if (!ctx->sub_frames_ref)
+            return AVERROR(ENOSYS);
+
+        sub_frame = av_sub_frame_create_side_data(frame);
+        if (!sub_frame)
+            return AVERROR(ENOMEM);
+
+        ret = av_hwframe_get_buffer(ctx->sub_frames_ref, sub_frame, 0);
+        if (ret < 0) {
+            av_log(ctx, AV_LOG_ERROR, "Can't get sub frame.\n");
+            return ret;
+        }
+    }
+
     return 0;
 }
 
diff --git a/libavutil/hwcontext_vaapi.h b/libavutil/hwcontext_vaapi.h
index 0b2e071cb3..aea0ec9263 100644
--- a/libavutil/hwcontext_vaapi.h
+++ b/libavutil/hwcontext_vaapi.h
@@ -19,6 +19,7 @@
 #ifndef AVUTIL_HWCONTEXT_VAAPI_H
 #define AVUTIL_HWCONTEXT_VAAPI_H
 
+#include "pixfmt.h"
 #include <va/va.h>
 
 /**
@@ -81,7 +82,7 @@ typedef struct AVVAAPIDeviceContext {
 } AVVAAPIDeviceContext;
 
 /**
- * VAAPI-specific data associated with a frame pool.
+ * VAAPI-specific data associated with a frame pool and sub frame.
  *
  * Allocated as AVHWFramesContext.hwctx.
  */
@@ -100,6 +101,18 @@ typedef struct AVVAAPIFramesContext {
      */
     VASurfaceID     *surface_ids;
     int           nb_surfaces;
+
+    /**
+     * Set by the user to indicate if need to enable sub frame support.
+     */
+    int enable_sub_frame;
+
+    /**
+     * Sub frame width/height/format. Only avaliable if enable_sub_frame
+     * is true.
+     */
+    int sub_frame_width, sub_frame_height;
+    enum AVPixelFormat sub_frame_sw_format;
 } AVVAAPIFramesContext;
 
 /**
-- 
2.25.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [FFmpeg-devel] [PATCH v1 4/4] examples: seperate vaapi_decode from hw_decode
  2022-04-29  7:59 [FFmpeg-devel] [PATCH v1 1/4] lavu: add sub frame side data Fei Wang
  2022-04-29  7:59 ` [FFmpeg-devel] [PATCH v1 2/4] lavc: add sub frame options and flag Fei Wang
  2022-04-29  7:59 ` [FFmpeg-devel] [PATCH v1 3/4] lavc/hevc_vaapi: enable sub frame support Fei Wang
@ 2022-04-29  7:59 ` Fei Wang
  2 siblings, 0 replies; 4+ messages in thread
From: Fei Wang @ 2022-04-29  7:59 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Fei Wang

Now vaapi_decode can be used to test vaapi decode and vaapi decode
with sub frame.

decode example:
$ vaapi_decode 1920x1080.h265 out.yuv

decode with sub frame example:
$ vaapi_decode 1920x1080.h265 out.yuv width=640:height=480:format=argb sub_out.argb

Signed-off-by: Fei Wang <fei.w.wang@intel.com>
---
 configure                   |   2 +
 doc/examples/Makefile       |   1 +
 doc/examples/vaapi_decode.c | 296 ++++++++++++++++++++++++++++++++++++
 3 files changed, 299 insertions(+)
 create mode 100644 doc/examples/vaapi_decode.c

diff --git a/configure b/configure
index 196873c4aa..73ed62eae7 100755
--- a/configure
+++ b/configure
@@ -1744,6 +1744,7 @@ EXAMPLE_LIST="
     scaling_video_example
     transcode_aac_example
     transcoding_example
+    vaapi_decode_example
     vaapi_encode_example
     vaapi_transcode_example
 "
@@ -3779,6 +3780,7 @@ resampling_audio_example_deps="avutil swresample"
 scaling_video_example_deps="avutil swscale"
 transcode_aac_example_deps="avcodec avformat swresample"
 transcoding_example_deps="avfilter avcodec avformat avutil"
+vaapi_decode_example_deps="avcodec avformat avutil"
 vaapi_encode_example_deps="avcodec avutil h264_vaapi_encoder"
 vaapi_transcode_example_deps="avcodec avformat avutil h264_vaapi_encoder"
 
diff --git a/doc/examples/Makefile b/doc/examples/Makefile
index 81bfd34d5d..f1b18028c5 100644
--- a/doc/examples/Makefile
+++ b/doc/examples/Makefile
@@ -19,6 +19,7 @@ EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE)  += resampling_audio
 EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE)     += scaling_video
 EXAMPLES-$(CONFIG_TRANSCODE_AAC_EXAMPLE)     += transcode_aac
 EXAMPLES-$(CONFIG_TRANSCODING_EXAMPLE)       += transcoding
+EXAMPLES-$(CONFIG_VAAPI_DECODE_EXAMPLE)      += vaapi_decode
 EXAMPLES-$(CONFIG_VAAPI_ENCODE_EXAMPLE)      += vaapi_encode
 EXAMPLES-$(CONFIG_VAAPI_TRANSCODE_EXAMPLE)   += vaapi_transcode
 
diff --git a/doc/examples/vaapi_decode.c b/doc/examples/vaapi_decode.c
new file mode 100644
index 0000000000..a9c8d48240
--- /dev/null
+++ b/doc/examples/vaapi_decode.c
@@ -0,0 +1,296 @@
+/*
+ * Video Acceleration API (video decoding) decode sample
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * VAAPI-accelerated decoding example.
+ *
+ * @example vaapi_decode.c
+ * This example shows how to do VAAPI-accelerated decoding with output
+ * frames from the HW video surfaces. Also support decoding with sub frame.
+ */
+
+#include <stdio.h>
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+#include <libavutil/pixdesc.h>
+#include <libavutil/hwcontext.h>
+#include <libavutil/opt.h>
+#include <libavutil/avassert.h>
+#include <libavutil/imgutils.h>
+
+static AVBufferRef *hw_device_ctx = NULL;
+static enum AVPixelFormat hw_pix_fmt;
+static FILE *output_file = NULL;
+static FILE *sub_frame_output = NULL;
+
+static int hw_decoder_init(AVCodecContext *ctx, const enum AVHWDeviceType type)
+{
+    int err = 0;
+
+    if ((err = av_hwdevice_ctx_create(&hw_device_ctx, type,
+                                      NULL, NULL, 0)) < 0) {
+        fprintf(stderr, "Failed to create specified HW device.\n");
+        return err;
+    }
+    ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
+
+    return err;
+}
+
+static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,
+                                        const enum AVPixelFormat *pix_fmts)
+{
+    const enum AVPixelFormat *p;
+
+    for (p = pix_fmts; *p != -1; p++) {
+        if (*p == hw_pix_fmt)
+            return *p;
+    }
+
+    fprintf(stderr, "Failed to get HW surface format.\n");
+    return AV_PIX_FMT_NONE;
+}
+
+static int retrieve_write(AVFrame *frame, AVFrame *sw_frame, FILE *file)
+{
+    AVFrame *tmp_frame = NULL;
+    uint8_t *buffer = NULL;
+    int size;
+    int ret = 0;
+
+    if (frame->format == hw_pix_fmt) {
+        /* retrieve data from GPU to CPU */
+        if ((ret = av_hwframe_transfer_data(sw_frame, frame, 0)) < 0) {
+            fprintf(stderr, "Error transferring the data to system memory\n");
+            goto fail;
+        }
+        tmp_frame = sw_frame;
+    } else
+        tmp_frame = frame;
+
+    size = av_image_get_buffer_size(tmp_frame->format, tmp_frame->width,
+                                    tmp_frame->height, 1);
+    buffer = av_malloc(size);
+    if (!buffer) {
+        fprintf(stderr, "Can not alloc buffer\n");
+        ret = AVERROR(ENOMEM);
+        goto fail;
+    }
+    ret = av_image_copy_to_buffer(buffer, size,
+                                  (const uint8_t * const *)tmp_frame->data,
+                                  (const int *)tmp_frame->linesize, tmp_frame->format,
+                                  tmp_frame->width, tmp_frame->height, 1);
+    if (ret < 0) {
+        fprintf(stderr, "Can not copy image to buffer\n");
+        goto fail;
+    }
+
+    if ((ret = fwrite(buffer, 1, size, file)) < 0) {
+        fprintf(stderr, "Failed to dump raw data.\n");
+        goto fail;
+    }
+
+fail:
+    av_freep(&buffer);
+
+    return ret;
+}
+
+static int decode_write(AVCodecContext *avctx, AVPacket *packet)
+{
+    AVFrame *frame = NULL, *sw_frame = NULL;
+    AVFrame *sub_frame = NULL, *sw_sub_frame = NULL;
+    AVFrameSideData *sd = NULL;
+    int ret = 0;
+
+    ret = avcodec_send_packet(avctx, packet);
+    if (ret < 0) {
+        fprintf(stderr, "Error during decoding\n");
+        return ret;
+    }
+
+    while (1) {
+        if (!(frame = av_frame_alloc()) || !(sw_frame = av_frame_alloc())) {
+            fprintf(stderr, "Can not alloc frame\n");
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+
+        ret = avcodec_receive_frame(avctx, frame);
+        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
+            av_frame_free(&frame);
+            av_frame_free(&sw_frame);
+            return 0;
+        } else if (ret < 0) {
+            fprintf(stderr, "Error while decoding\n");
+            goto fail;
+        }
+
+        ret = retrieve_write(frame, sw_frame, output_file);
+        if (ret < 0) {
+            fprintf(stderr, "Error while retrieve and write data\n");
+            goto fail;
+        }
+
+        sd = av_frame_get_side_data(frame, AV_FRAME_DATA_SUB_FRAME);
+        if (sd) {
+            if (!(sw_sub_frame = av_frame_alloc())) {
+                fprintf(stderr, "Can not alloc sub frame\n");
+                ret = AVERROR(ENOMEM);
+                goto fail;
+            }
+
+            sub_frame = (AVFrame *)sd->data;
+
+            ret = retrieve_write(sub_frame, sw_sub_frame, sub_frame_output);
+            if (ret < 0) {
+                fprintf(stderr, "Error while retrieve and write sub frame data\n");
+                goto fail;
+            }
+
+            av_frame_remove_side_data(frame, AV_FRAME_DATA_SUB_FRAME);
+            sd = NULL;
+        }
+
+    fail:
+        av_frame_free(&frame);
+        av_frame_free(&sw_frame);
+        av_frame_free(&sw_sub_frame);
+        if (ret < 0)
+            return ret;
+    }
+}
+
+int main(int argc, char *argv[])
+{
+    AVFormatContext *input_ctx = NULL;
+    AVStream *video = NULL;
+    AVCodecContext *decoder_ctx = NULL;
+    const AVCodec *decoder = NULL;
+    AVPacket *packet = NULL;
+    int video_stream, ret, i;
+
+    if (argc !=3 && argc != 5) {
+        fprintf(stderr, "Decode only Usage: %s <input file> <output file>\n", argv[0]);
+        fprintf(stderr, "Decode with sub frame Usage: %s <input file> <output file> <sfc_width:sfc_height:sfc_format> <sub frame output file>\n", argv[0]);
+        return -1;
+    }
+
+    // av_log_set_level(AV_LOG_DEBUG);
+
+    packet = av_packet_alloc();
+    if (!packet) {
+        fprintf(stderr, "Failed to allocate AVPacket\n");
+        return -1;
+    }
+
+    /* open the input file */
+    if (avformat_open_input(&input_ctx, argv[1], NULL, NULL) != 0) {
+        fprintf(stderr, "Cannot open input file '%s'\n", argv[1]);
+        return -1;
+    }
+
+    if (avformat_find_stream_info(input_ctx, NULL) < 0) {
+        fprintf(stderr, "Cannot find input stream information.\n");
+        return -1;
+    }
+
+    /* find the video stream information */
+    ret = av_find_best_stream(input_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0);
+    if (ret < 0) {
+        fprintf(stderr, "Cannot find a video stream in the input file\n");
+        return -1;
+    }
+    video_stream = ret;
+
+    for (i = 0;; i++) {
+        const AVCodecHWConfig *config = avcodec_get_hw_config(decoder, i);
+        if (!config) {
+            fprintf(stderr, "Decoder %s does not support device type %s.\n",
+                    decoder->name, av_hwdevice_get_type_name(AV_HWDEVICE_TYPE_VAAPI));
+            return -1;
+        }
+        if (config->methods & AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX &&
+            config->device_type == AV_HWDEVICE_TYPE_VAAPI) {
+            hw_pix_fmt = config->pix_fmt;
+            break;
+        }
+    }
+
+    if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
+        return AVERROR(ENOMEM);
+
+    video = input_ctx->streams[video_stream];
+    if (avcodec_parameters_to_context(decoder_ctx, video->codecpar) < 0)
+        return -1;
+
+    decoder_ctx->get_format  = get_hw_format;
+
+    if (argc == 5) {
+        sub_frame_output = fopen(argv[4], "w+b");
+        decoder_ctx->export_side_data =  decoder_ctx->export_side_data | AV_CODEC_EXPORT_DATA_SUB_FRAME;
+        if ((ret = av_dict_parse_string(&decoder_ctx->sub_frame_opts, argv[3], "=", ":", 0)) < 0) {
+            av_log(decoder_ctx, AV_LOG_ERROR, "Failed to parse option string '%s'.\n", argv[3]);
+            av_dict_free(&decoder_ctx->sub_frame_opts);
+            return -1;
+        }
+    }
+
+    if (hw_decoder_init(decoder_ctx, AV_HWDEVICE_TYPE_VAAPI) < 0)
+        return -1;
+
+    if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0) {
+        fprintf(stderr, "Failed to open codec for stream #%u\n", video_stream);
+        return -1;
+    }
+
+    /* open the file to dump raw data */
+    output_file = fopen(argv[2], "w+b");
+
+    /* actual decoding and dump the raw data */
+    while (ret >= 0) {
+        if ((ret = av_read_frame(input_ctx, packet)) < 0)
+            break;
+
+        if (video_stream == packet->stream_index)
+            ret = decode_write(decoder_ctx, packet);
+
+        av_packet_unref(packet);
+    }
+
+    /* flush the decoder */
+    ret = decode_write(decoder_ctx, NULL);
+
+    if (output_file)
+        fclose(output_file);
+    if (sub_frame_output)
+        fclose(sub_frame_output);
+    av_packet_free(&packet);
+    av_dict_free(&decoder_ctx->sub_frame_opts);
+    avcodec_free_context(&decoder_ctx);
+    avformat_close_input(&input_ctx);
+    av_buffer_unref(&hw_device_ctx);
+
+    return 0;
+}
-- 
2.25.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2022-04-29  8:06 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-29  7:59 [FFmpeg-devel] [PATCH v1 1/4] lavu: add sub frame side data Fei Wang
2022-04-29  7:59 ` [FFmpeg-devel] [PATCH v1 2/4] lavc: add sub frame options and flag Fei Wang
2022-04-29  7:59 ` [FFmpeg-devel] [PATCH v1 3/4] lavc/hevc_vaapi: enable sub frame support Fei Wang
2022-04-29  7:59 ` [FFmpeg-devel] [PATCH v1 4/4] examples: seperate vaapi_decode from hw_decode Fei Wang

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git