* [FFmpeg-devel] [PATCH 2/9] libavcodec: add amfdec.
2024-02-14 1:55 [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf Dmitrii Ovchinnikov
@ 2024-02-14 1:55 ` Dmitrii Ovchinnikov
2024-02-14 23:41 ` Mark Thompson
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 3/9] avcodec/amfenc: Fixes the color information in the output Dmitrii Ovchinnikov
` (8 subsequent siblings)
9 siblings, 1 reply; 18+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-02-14 1:55 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: Evgeny Pavlov, Dmitrii Ovchinnikov
From: Evgeny Pavlov <lucenticus@gmail.com>
Added AMF based h264, hevc, av1 decoders.
Co-authored-by: Dmitrii Ovchinnikov <ovchinnikov.dmitrii@gmail.com>
---
libavcodec/Makefile | 4 +-
libavcodec/allcodecs.c | 3 +
libavcodec/amfdec.c | 667 ++++++++++++++++++++++++++++++++++++++++
libavcodec/amfdec.h | 75 +++++
libavcodec/h264_slice.c | 3 +
libavcodec/h264dec.c | 3 +
libavcodec/hwconfig.h | 2 +
7 files changed, 755 insertions(+), 2 deletions(-)
create mode 100644 libavcodec/amfdec.c
create mode 100644 libavcodec/amfdec.h
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 470d7cb9b1..c2e4715f4b 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -69,7 +69,7 @@ include $(SRC_PATH)/libavcodec/x86/vvc/Makefile
OBJS-$(CONFIG_AANDCTTABLES) += aandcttab.o
OBJS-$(CONFIG_AC3DSP) += ac3dsp.o ac3.o ac3tab.o
OBJS-$(CONFIG_ADTS_HEADER) += adts_header.o mpeg4audio_sample_rates.o
-OBJS-$(CONFIG_AMF) += amfenc.o
+OBJS-$(CONFIG_AMF) += amfenc.o amfdec.o
OBJS-$(CONFIG_AUDIO_FRAME_QUEUE) += audio_frame_queue.o
OBJS-$(CONFIG_ATSC_A53) += atsc_a53.o
OBJS-$(CONFIG_AUDIODSP) += audiodsp.o
@@ -1265,7 +1265,7 @@ SKIPHEADERS += %_tablegen.h \
vulkan_video_codec_av1std.h \
$(ARCH)/vpx_arith.h \
-SKIPHEADERS-$(CONFIG_AMF) += amfenc.h
+SKIPHEADERS-$(CONFIG_AMF) += amfenc.h amfdec.h
SKIPHEADERS-$(CONFIG_D3D11VA) += d3d11va.h dxva2_internal.h
SKIPHEADERS-$(CONFIG_D3D12VA) += d3d12va_decode.h
SKIPHEADERS-$(CONFIG_DXVA2) += dxva2.h dxva2_internal.h
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index ef8c3a6d7d..c344c70e00 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -851,10 +851,12 @@ extern const FFCodec ff_av1_nvenc_encoder;
extern const FFCodec ff_av1_qsv_decoder;
extern const FFCodec ff_av1_qsv_encoder;
extern const FFCodec ff_av1_amf_encoder;
+extern const FFCodec ff_av1_amf_decoder;
extern const FFCodec ff_av1_vaapi_encoder;
extern const FFCodec ff_libopenh264_encoder;
extern const FFCodec ff_libopenh264_decoder;
extern const FFCodec ff_h264_amf_encoder;
+extern const FFCodec ff_h264_amf_decoder;
extern const FFCodec ff_h264_cuvid_decoder;
extern const FFCodec ff_h264_mf_encoder;
extern const FFCodec ff_h264_nvenc_encoder;
@@ -864,6 +866,7 @@ extern const FFCodec ff_h264_v4l2m2m_encoder;
extern const FFCodec ff_h264_vaapi_encoder;
extern const FFCodec ff_h264_videotoolbox_encoder;
extern const FFCodec ff_hevc_amf_encoder;
+extern const FFCodec ff_hevc_amf_decoder;
extern const FFCodec ff_hevc_cuvid_decoder;
extern const FFCodec ff_hevc_mediacodec_decoder;
extern const FFCodec ff_hevc_mediacodec_encoder;
diff --git a/libavcodec/amfdec.c b/libavcodec/amfdec.c
new file mode 100644
index 0000000000..9d618ff442
--- /dev/null
+++ b/libavcodec/amfdec.c
@@ -0,0 +1,667 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <AMF/core/Variant.h>
+#include <AMF/core/PropertyStorage.h>
+#include <AMF/components/FFMPEGFileDemuxer.h>
+#include "libavutil/hwcontext_amf.h"
+#include "amfdec.h"
+#include "codec_internal.h"
+#include "hwconfig.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/time.h"
+#include "decode.h"
+#include "libavutil/mastering_display_metadata.h"
+
+#if CONFIG_D3D11VA
+#include "libavutil/hwcontext_d3d11va.h"
+#endif
+#if CONFIG_DXVA2
+#define COBJMACROS
+#include "libavutil/hwcontext_dxva2.h"
+#endif
+
+#ifdef _WIN32
+#include "compat/w32dlfcn.h"
+#else
+#include <dlfcn.h>
+#endif
+
+#define propNotFound 0
+
+const enum AVPixelFormat amf_dec_pix_fmts[] = {
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_NV12,
+ AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_ARGB,
+ AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_YUYV422,
+ AV_PIX_FMT_P010,
+ AV_PIX_FMT_P012,
+ AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUV420P12,
+ AV_PIX_FMT_YUV420P16,
+#if CONFIG_D3D11VA
+ AV_PIX_FMT_D3D11,
+#endif
+#if CONFIG_DXVA2
+ AV_PIX_FMT_DXVA2_VLD,
+#endif
+ AV_PIX_FMT_AMF,
+ AV_PIX_FMT_NONE
+};
+
+static const AVCodecHWConfigInternal *const amf_hw_configs[] = {
+ &(const AVCodecHWConfigInternal) {
+ .public = {
+ .pix_fmt = AV_PIX_FMT_AMF,
+ .methods = AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX |
+ AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX,
+ .device_type = AV_HWDEVICE_TYPE_AMF,
+ },
+ .hwaccel = NULL,
+ },
+ NULL
+};
+
+static void amf_free_amfsurface(void *opaque, uint8_t *data)
+{
+ AMFSurface *surface = (AMFSurface*)(data);
+ surface->pVtbl->Release(surface);
+}
+
+static int amf_init_decoder(AVCodecContext *avctx)
+{
+ enum AMF_SURFACE_FORMAT output_format = AMF_SURFACE_UNKNOWN;
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ AVAMFDeviceContextInternal * internal = (AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data;
+ const wchar_t *codec_id = NULL;
+ AMF_RESULT res;
+ AMFBuffer *buffer;
+ amf_int64 color_profile;
+ int pool_size = 35;
+
+ if (avctx->pix_fmt == AV_PIX_FMT_AMF){
+ if (avctx->hw_frames_ctx) {
+ AVHWFramesContext *hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ output_format = av_amf_av_to_amf_format(hwframes_ctx->sw_format);
+ } else
+ output_format = av_amf_av_to_amf_format(avctx->sw_pix_fmt);
+ } else
+ output_format = av_amf_av_to_amf_format(avctx->pix_fmt);
+
+ if (output_format == AMF_SURFACE_UNKNOWN)
+ output_format = AMF_SURFACE_NV12;
+
+ ctx->drained = 0;
+
+ switch (avctx->codec->id) {
+ case AV_CODEC_ID_H264:
+ codec_id = AMFVideoDecoderUVD_H264_AVC;
+ break;
+ case AV_CODEC_ID_HEVC: {
+ if (output_format == AMF_SURFACE_P010)
+ codec_id = AMFVideoDecoderHW_H265_MAIN10;
+ else
+ codec_id = AMFVideoDecoderHW_H265_HEVC;
+ } break;
+ case AV_CODEC_ID_AV1:
+ if (output_format == AMF_SURFACE_P012)
+ codec_id = AMFVideoDecoderHW_AV1_12BIT;
+ else
+ codec_id = AMFVideoDecoderHW_AV1;
+ break;
+ default:
+ break;
+ }
+ AMF_RETURN_IF_FALSE(ctx, codec_id != NULL, AVERROR(EINVAL), "Codec %d is not supported\n", avctx->codec->id);
+
+ res = internal->factory->pVtbl->CreateComponent(internal->factory, internal->context, codec_id, &ctx->decoder);
+ AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_ENCODER_NOT_FOUND, "CreateComponent(%ls) failed with error %d\n", codec_id, res);
+
+ // Color Metadata
+ /// Color Range (Support for older Drivers)
+ if (avctx->color_range == AVCOL_RANGE_JPEG) {
+ AMF_ASSIGN_PROPERTY_BOOL(res, ctx->decoder, AMF_VIDEO_DECODER_FULL_RANGE_COLOR, 1);
+ } else if (avctx->color_range != AVCOL_RANGE_UNSPECIFIED) {
+ AMF_ASSIGN_PROPERTY_BOOL(res, ctx->decoder, AMF_VIDEO_DECODER_FULL_RANGE_COLOR, 0);
+ }
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN;
+ switch (avctx->colorspace) {
+ case AVCOL_SPC_SMPTE170M:
+ if (avctx->color_range == AVCOL_RANGE_JPEG) {
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_601;
+ } else {
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_601;
+ }
+ break;
+ case AVCOL_SPC_BT709:
+ if (avctx->color_range == AVCOL_RANGE_JPEG) {
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_709;
+ } else {
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_709;
+ }
+ break;
+ case AVCOL_SPC_BT2020_NCL:
+ case AVCOL_SPC_BT2020_CL:
+ if (avctx->color_range == AVCOL_RANGE_JPEG) {
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_2020;
+ } else {
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020;
+ }
+ break;
+ }
+ if (color_profile != AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_COLOR_PROFILE, color_profile);
+ if (avctx->color_trc != AVCOL_TRC_UNSPECIFIED)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_COLOR_TRANSFER_CHARACTERISTIC, (amf_int64)avctx->color_trc);
+
+ if (avctx->color_primaries != AVCOL_PRI_UNSPECIFIED)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_COLOR_PRIMARIES, (amf_int64)avctx->color_primaries);
+
+ if (ctx->timestamp_mode != -1)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_TIMESTAMP_MODE, ctx->timestamp_mode);
+ if (ctx->decoder_mode != -1)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_REORDER_MODE, ctx->decoder_mode);
+ if (ctx->dpb_size != -1)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_DPB_SIZE, ctx->dpb_size);
+ if (ctx->lowlatency != -1)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_LOW_LATENCY, ctx->lowlatency);
+ if (ctx->smart_access_video != -1) {
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_ENABLE_SMART_ACCESS_VIDEO, ctx->smart_access_video != 0);
+ if (res != AMF_OK) {
+ av_log(avctx, AV_LOG_ERROR, "The Smart Access Video is not supported by AMF decoder.\n");
+ return AVERROR(EINVAL);
+ } else {
+ av_log(avctx, AV_LOG_INFO, "The Smart Access Video (%d) is set.\n", ctx->smart_access_video);
+ // Set low latency mode if Smart Access Video is enabled
+ if (ctx->smart_access_video != 0) {
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_LOW_LATENCY, true);
+ av_log(avctx, AV_LOG_INFO, "The Smart Access Video set low latency mode for decoder.\n");
+ }
+ }
+ }
+ if (ctx->skip_transfer_sav != -1)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_SKIP_TRANSFER_SMART_ACCESS_VIDEO, ctx->skip_transfer_sav);
+
+ if (avctx->extradata_size) {
+ res = internal->context->pVtbl->AllocBuffer(internal->context, AMF_MEMORY_HOST, avctx->extradata_size, &buffer);
+ if (res == AMF_OK) {
+ memcpy(buffer->pVtbl->GetNative(buffer), avctx->extradata, avctx->extradata_size);
+ AMF_ASSIGN_PROPERTY_INTERFACE(res,ctx->decoder, AMF_VIDEO_DECODER_EXTRADATA, buffer);
+ buffer->pVtbl->Release(buffer);
+ buffer = NULL;
+ }
+ }
+ if (ctx->surface_pool_size == -1) {
+ ctx->surface_pool_size = pool_size;
+ if (avctx->extra_hw_frames > 0)
+ ctx->surface_pool_size += avctx->extra_hw_frames;
+ if (avctx->active_thread_type & FF_THREAD_FRAME)
+ ctx->surface_pool_size += avctx->thread_count;
+ }
+
+ //at the moment, there is such a restriction in AMF.
+ //when it is possible, I will remove this code
+ if (ctx->surface_pool_size > 100)
+ ctx->surface_pool_size = 100;
+
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_SURFACE_POOL_SIZE, ctx->surface_pool_size);
+ res = ctx->decoder->pVtbl->Init(ctx->decoder, output_format, avctx->width, avctx->height);
+ return 0;
+}
+
+static int amf_init_decoder_context(AVCodecContext *avctx)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ int ret;
+
+ if (avctx->hw_frames_ctx) {
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ ret = av_amf_context_derive((AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data, frames_ctx->device_ctx, NULL, 0);
+ if (ret < 0)
+ return ret;
+ ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
+ if (!ctx->hw_frames_ctx)
+ return AVERROR(ENOMEM);
+ }
+ else if (avctx->hw_device_ctx) {
+ AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+ ret = av_amf_context_derive((AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data, device_ctx, NULL, 0);
+ if (ret < 0)
+ return ret;
+ ctx->hw_device_ctx = av_buffer_ref(avctx->hw_device_ctx);
+ if (!ctx->hw_device_ctx)
+ return AVERROR(ENOMEM);
+ } else {
+ ret = av_amf_context_init((AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data, avctx);
+ if (ret != 0) {
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int amf_decode_close(AVCodecContext *avctx)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+
+ if (ctx->decoder) {
+ ctx->decoder->pVtbl->Terminate(ctx->decoder);
+ ctx->decoder->pVtbl->Release(ctx->decoder);
+ ctx->decoder = NULL;
+ }
+
+ av_buffer_unref(&ctx->amf_device_ctx_internal);
+ av_buffer_unref(&ctx->hw_device_ctx);
+ av_buffer_unref(&ctx->hw_frames_ctx);
+ av_buffer_unref(&ctx->amf_device_ctx);
+
+ return 0;
+
+}
+
+static int amf_decode_init(AVCodecContext *avctx)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ int ret;
+ enum AVPixelFormat pix_fmts[3] = {
+ AV_PIX_FMT_AMF,
+ avctx->pix_fmt,
+ AV_PIX_FMT_NONE };
+
+ ret = ff_get_format(avctx, pix_fmts);
+ if (ret < 0) {
+ avctx->pix_fmt = AV_PIX_FMT_NONE;
+ }
+
+ if (avctx->hw_frames_ctx){
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ if (frames_ctx->device_ctx->type == AV_HWDEVICE_TYPE_AMF) {
+ AVAMFDeviceContext * amf_ctx = frames_ctx->device_ctx->hwctx;
+ ctx->amf_device_ctx_internal = av_buffer_ref(amf_ctx->internal);
+ }
+ }
+ else if (avctx->hw_device_ctx && !avctx->hw_frames_ctx && ret == AV_PIX_FMT_AMF) {
+ AVHWDeviceContext *hwdev_ctx;
+ AVHWFramesContext *hwframes_ctx;
+ hwdev_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+ if (hwdev_ctx->type == AV_HWDEVICE_TYPE_AMF)
+ {
+ AVAMFDeviceContext * amf_ctx = hwdev_ctx->hwctx;
+ ctx->amf_device_ctx_internal = av_buffer_ref(amf_ctx->internal);
+ }
+
+ avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx);
+
+ if (!avctx->hw_frames_ctx) {
+ av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
+ return AVERROR(ENOMEM);
+ }
+
+ hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ hwframes_ctx->width = FFALIGN(avctx->coded_width, 32);
+ hwframes_ctx->height = FFALIGN(avctx->coded_height, 32);
+ hwframes_ctx->format = AV_PIX_FMT_AMF;
+ hwframes_ctx->sw_format = avctx->sw_pix_fmt == AV_PIX_FMT_YUV420P10 ? AV_PIX_FMT_P010 : AV_PIX_FMT_NV12;
+ hwframes_ctx->initial_pool_size = ctx->surface_pool_size + 8;
+ avctx->pix_fmt = AV_PIX_FMT_AMF;
+
+ ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
+
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Error initializing a AMF frame pool\n");
+ av_buffer_unref(&avctx->hw_frames_ctx);
+ return ret;
+ }
+ } else {
+ AVAMFDeviceContextInternal *wrapped = av_mallocz(sizeof(*wrapped));
+ ctx->amf_device_ctx_internal = av_buffer_create((uint8_t *)wrapped, sizeof(*wrapped),
+ av_amf_context_internal_free, NULL, 0);
+ if ((ret = av_amf_context_internal_create((AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data, avctx, "", NULL, 0)) != 0) {
+ amf_decode_close(avctx);
+ return ret;
+ }
+ if ((ret = amf_init_decoder_context(avctx)) != 0) {
+ return ret;
+ }
+ }
+ if ((ret = amf_init_decoder(avctx)) == 0) {
+ return 0;
+ }
+ amf_decode_close(avctx);
+ return ret;
+}
+
+static AMF_RESULT amf_get_property_buffer(AMFData *object, const wchar_t *name, AMFBuffer **val)
+{
+ AMF_RESULT res;
+ AMFVariantStruct var;
+ res = AMFVariantInit(&var);
+ if (res == AMF_OK) {
+ res = object->pVtbl->GetProperty(object, name, &var);
+ if (res == AMF_OK) {
+ if (var.type == AMF_VARIANT_INTERFACE) {
+ AMFGuid guid_AMFBuffer = IID_AMFBuffer();
+ AMFInterface *amf_interface = AMFVariantInterface(&var);
+ res = amf_interface->pVtbl->QueryInterface(amf_interface, &guid_AMFBuffer, (void**)val);
+ } else {
+ res = AMF_INVALID_DATA_TYPE;
+ }
+ }
+ AMFVariantClear(&var);
+ }
+ return res;
+}
+
+static int amf_amfsurface_to_avframe(AVCodecContext *avctx, AMFSurface* surface, AVFrame *frame)
+{
+ AMFVariantStruct var = {0};
+ AMFPlane *plane;
+ int i;
+ int ret;
+
+ if (avctx->hw_frames_ctx) {
+ AVHWFramesContext *hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ if (hwframes_ctx->format == AV_PIX_FMT_AMF) {
+ ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Get hw frame failed.\n");
+ return ret;
+ }
+ //we need to release surface with frame to return it to decoder
+ frame->buf[1] = av_buffer_create((uint8_t *)surface, sizeof(AMFSurface),
+ amf_free_amfsurface, (void*)avctx,
+ AV_BUFFER_FLAG_READONLY);
+ frame->data[3] = (uint8_t *)surface;
+ } else {
+ av_log(avctx, AV_LOG_ERROR, "Unknown format for hwframes_ctx\n");
+ return AVERROR(ENOMEM);
+ }
+ } else {
+ ret = surface->pVtbl->Convert(surface, AMF_MEMORY_HOST);
+ AMF_RETURN_IF_FALSE(avctx, ret == AMF_OK, AVERROR_UNKNOWN, "Convert(amf::AMF_MEMORY_HOST) failed with error %d\n", ret);
+
+ for (i = 0; i < surface->pVtbl->GetPlanesCount(surface); i++) {
+ plane = surface->pVtbl->GetPlaneAt(surface, i);
+ frame->data[i] = plane->pVtbl->GetNative(plane);
+ frame->linesize[i] = plane->pVtbl->GetHPitch(plane);
+ }
+
+ frame->buf[0] = av_buffer_create((uint8_t *)surface, sizeof(AMFSurface),
+ amf_free_amfsurface, (void*)avctx,
+ AV_BUFFER_FLAG_READONLY);
+ frame->format = av_amf_to_av_format(surface->pVtbl->GetFormat(surface));
+ }
+
+ frame->width = avctx->width;
+ frame->height = avctx->height;
+
+ frame->pts = surface->pVtbl->GetPts(surface);
+
+ surface->pVtbl->GetProperty(surface, L"FFMPEG:dts", &var);
+ frame->pkt_dts = var.int64Value;
+
+ frame->duration = surface->pVtbl->GetDuration(surface);
+ if (frame->duration < 0)
+ frame->duration = 0;
+
+ frame->color_range = avctx->color_range;
+ frame->colorspace = avctx->colorspace;
+ frame->color_trc = avctx->color_trc;
+ frame->color_primaries = avctx->color_primaries;
+
+ if (frame->color_trc == AVCOL_TRC_SMPTE2084) {
+ AMFBuffer * hdrmeta_buffer = NULL;
+ ret = amf_get_property_buffer((AMFData *)surface, AMF_VIDEO_DECODER_HDR_METADATA, &hdrmeta_buffer);
+ if (hdrmeta_buffer != NULL) {
+ AMFHDRMetadata * hdrmeta = (AMFHDRMetadata*)hdrmeta_buffer->pVtbl->GetNative(hdrmeta_buffer);
+ if (ret != AMF_OK)
+ return ret;
+ if (hdrmeta != NULL) {
+ AVMasteringDisplayMetadata *mastering = av_mastering_display_metadata_create_side_data(frame);
+ const int chroma_den = 50000;
+ const int luma_den = 10000;
+
+ if (!mastering)
+ return AVERROR(ENOMEM);
+
+ mastering->display_primaries[0][0] = av_make_q(hdrmeta->redPrimary[0], chroma_den);
+ mastering->display_primaries[0][1] = av_make_q(hdrmeta->redPrimary[1], chroma_den);
+
+ mastering->display_primaries[1][0] = av_make_q(hdrmeta->greenPrimary[0], chroma_den);
+ mastering->display_primaries[1][1] = av_make_q(hdrmeta->greenPrimary[1], chroma_den);
+
+ mastering->display_primaries[2][0] = av_make_q(hdrmeta->bluePrimary[0], chroma_den);
+ mastering->display_primaries[2][1] = av_make_q(hdrmeta->bluePrimary[1], chroma_den);
+
+ mastering->white_point[0] = av_make_q(hdrmeta->whitePoint[0], chroma_den);
+ mastering->white_point[1] = av_make_q(hdrmeta->whitePoint[1], chroma_den);
+
+ mastering->max_luminance = av_make_q(hdrmeta->maxMasteringLuminance, luma_den);
+ mastering->min_luminance = av_make_q(hdrmeta->maxMasteringLuminance, luma_den);
+
+ mastering->has_luminance = 1;
+ mastering->has_primaries = 1;
+ if (hdrmeta->maxContentLightLevel) {
+ AVContentLightMetadata *light = av_content_light_metadata_create_side_data(frame);
+
+ if (!light)
+ return AVERROR(ENOMEM);
+
+ light->MaxCLL = hdrmeta->maxContentLightLevel;
+ light->MaxFALL = hdrmeta->maxFrameAverageLightLevel;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+static AMF_RESULT amf_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ AMF_RESULT ret = AMF_OK;
+ AMFSurface *surface = NULL;
+ AMFData *data_out = NULL;
+
+ ret = ctx->decoder->pVtbl->QueryOutput(ctx->decoder, &data_out);
+ if (ret != AMF_OK && ret != AMF_REPEAT) {
+ return ret;
+ }
+ if (data_out == NULL) {
+ return AMF_FAIL;
+ }
+
+ if (data_out) {
+ AMFGuid guid = IID_AMFSurface();
+ data_out->pVtbl->QueryInterface(data_out, &guid, (void**)&surface); // query for buffer interface
+ data_out->pVtbl->Release(data_out);
+ data_out = NULL;
+ }
+
+ ret = amf_amfsurface_to_avframe(avctx, surface, frame);
+ AMF_GOTO_FAIL_IF_FALSE(avctx, ret >= 0, AMF_FAIL, "Failed to convert AMFSurface to AVFrame = %d\n", ret);
+ return AMF_OK;
+fail:
+
+ if (surface) {
+ surface->pVtbl->Release(surface);
+ surface = NULL;
+ }
+ return ret;
+}
+
+static AMF_RESULT amf_update_buffer_properties(AVCodecContext *avctx, AMFBuffer* buffer, const AVPacket* pkt)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ AVAMFDeviceContextInternal * internal = (AVAMFDeviceContextInternal * )ctx->amf_device_ctx_internal->data;
+ AMFContext *ctxt = internal->context;
+
+ AMF_RESULT res;
+
+ AMF_RETURN_IF_FALSE(ctxt, buffer != NULL, AMF_INVALID_ARG, "update_buffer_properties() - buffer not passed in");
+ AMF_RETURN_IF_FALSE(ctxt, pkt != NULL, AMF_INVALID_ARG, "update_buffer_properties() - packet not passed in");
+ buffer->pVtbl->SetPts(buffer, pkt->pts);
+ buffer->pVtbl->SetDuration(buffer, pkt->duration);
+ AMF_ASSIGN_PROPERTY_INT64(res, buffer, L"FFMPEG:dts", pkt->dts);
+ if (res != AMF_OK)
+ av_log(avctx, AV_LOG_VERBOSE, "Failed to assign dts value.");
+ return AMF_OK;
+}
+
+static AMF_RESULT amf_buffer_from_packet(AVCodecContext *avctx, const AVPacket* pkt, AMFBuffer** buffer)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ AVAMFDeviceContextInternal * internal = (AVAMFDeviceContextInternal * )ctx->amf_device_ctx_internal->data;
+ AMFContext *ctxt = internal->context;
+ void *mem;
+ AMF_RESULT err;
+ AMFBuffer *buf = NULL;
+
+ AMF_RETURN_IF_FALSE(ctxt, pkt != NULL, AMF_INVALID_ARG, "amf_buffer_from_packet() - packet not passed in");
+ AMF_RETURN_IF_FALSE(ctxt, buffer != NULL, AMF_INVALID_ARG, "amf_buffer_from_packet() - buffer pointer not passed in");
+
+ err = ctxt->pVtbl->AllocBuffer(ctxt, AMF_MEMORY_HOST, pkt->size + AV_INPUT_BUFFER_PADDING_SIZE, buffer);
+ AMF_RETURN_IF_FALSE(ctxt, err == AMF_OK, err, "amf_buffer_from_packet() - failed");
+ buf = *buffer;
+ err = buf->pVtbl->SetSize(buf, pkt->size);
+ AMF_RETURN_IF_FALSE(ctxt, err == AMF_OK, err, "amf_buffer_from_packet() - SetSize failed");
+ // get the memory location and check the buffer was indeed allocated
+ mem = buf->pVtbl->GetNative(buf);
+ AMF_RETURN_IF_FALSE(ctxt, mem != NULL, AMF_INVALID_POINTER, "amf_buffer_from_packet() - GetNative failed");
+
+ // copy the packet memory and clear data padding
+ memcpy(mem, pkt->data, pkt->size);
+ memset((amf_int8*)(mem)+pkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+
+ return amf_update_buffer_properties(avctx, buf, pkt);
+}
+
+static int amf_decode_frame(AVCodecContext *avctx, AVFrame *data,
+ int *got_frame, AVPacket *avpkt)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ AVFrame *frame = data;
+ AMFBuffer *buf;
+ AMF_RESULT res;
+
+ if (!ctx->decoder)
+ return AVERROR(EINVAL);
+
+ if (!avpkt->size && ctx->drained == 0) {
+ ctx->decoder->pVtbl->Drain(ctx->decoder);
+ ctx->drained = 1;
+ }
+ if (avpkt->size > 0) {
+ res = amf_buffer_from_packet(avctx, avpkt, &buf);
+ AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, 0, "Cannot convert AVPacket to AMFbuffer");
+ res = ctx->decoder->pVtbl->SubmitInput(ctx->decoder, (AMFData*) buf);
+ // FIXME: check other return values
+ if (res == AMF_OK || res == AMF_NEED_MORE_INPUT)
+ {
+ *got_frame = 0;
+ } else {
+ av_log(avctx, AV_LOG_VERBOSE, "SubmitInput() returned %d\n", res);
+ }
+
+ buf->pVtbl->Release(buf);
+ buf = NULL;
+ if (res == AMF_INPUT_FULL) { // handle full queue
+ *got_frame = 0;
+ }
+ }
+
+ res = amf_receive_frame(avctx, frame);
+ if (res == AMF_OK) {
+ AMF_RETURN_IF_FALSE(avctx, !*got_frame, avpkt->size, "frame already got");
+ *got_frame = 1;
+ } else if (res != AMF_EOF && res != AMF_FAIL) {
+ av_log(avctx, AV_LOG_ERROR, "Unkown result from QueryOutput %d\n", res);
+ }
+
+ return avpkt->size;
+}
+
+static void amf_decode_flush(AVCodecContext *avctx)
+{
+ AvAmfDecoderContext *ctx = avctx->priv_data;
+ ctx->decoder->pVtbl->Flush(ctx->decoder);
+}
+
+#define OFFSET(x) offsetof(AvAmfDecoderContext, x)
+#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
+
+static const AVOption options[] = {
+ // Decoder mode
+ { "decoder_mode", "Decoder mode", OFFSET(decoder_mode), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, AMF_VIDEO_DECODER_MODE_LOW_LATENCY, VD, "decoder_mode" },
+ { "regular", "DPB delay is based on number of reference frames + 1", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_DECODER_MODE_REGULAR }, 0, 0, VD, "decoder_mode" },
+ { "compliant", "DPB delay is based on profile - up to 16", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_DECODER_MODE_COMPLIANT }, 0, 0, VD, "decoder_mode" },
+ { "low_latency", "DPB delay is 0", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_DECODER_MODE_LOW_LATENCY }, 0, 0, VD, "decoder_mode" },
+
+ // Timestamp mode
+ { "timestamp_mode", "Timestamp mode", OFFSET(timestamp_mode), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, AMF_TS_DECODE, VD, "timestamp_mode" },
+ { "presentation", "Preserve timestamps from input to output", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_TS_PRESENTATION }, 0, 0, VD, "timestamp_mode" },
+ { "sort", "Resort PTS list", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_TS_SORT }, 0, 0, VD, "timestamp_mode" },
+ { "decode", "Decode order", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_TS_DECODE }, 0, 0, VD, "timestamp_mode" },
+
+ // Reference frame management
+ { "surface_pool_size", "Number of surfaces in the decode pool", OFFSET(surface_pool_size), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, INT_MAX, VD, NULL },
+ { "dpb_size", "Minimum number of surfaces for reordering", OFFSET(dpb_size), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 32, VD, NULL },
+
+ { "lowlatency", "Low latency", OFFSET(lowlatency), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD, NULL },
+ { "smart_access_video", "Smart Access Video", OFFSET(smart_access_video), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD, NULL },
+ { "skip_transfer_sav", "Skip transfer on another GPU when SAV enabled", OFFSET(skip_transfer_sav), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD, NULL },
+
+ { NULL }
+};
+
+static const AVClass amf_decode_class = {
+ .class_name = "amf",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+
+#define DEFINE_AMF_DECODER(x, X, bsf_name) \
+const FFCodec ff_##x##_amf_decoder = { \
+ .p.name = #x "_amf", \
+ CODEC_LONG_NAME(#X " AMD AMF video decoder"), \
+ .priv_data_size = sizeof(AvAmfDecoderContext), \
+ .p.type = AVMEDIA_TYPE_VIDEO, \
+ .p.id = AV_CODEC_ID_##X, \
+ .init = amf_decode_init, \
+ FF_CODEC_DECODE_CB(amf_decode_frame), \
+ .flush = amf_decode_flush, \
+ .close = amf_decode_close, \
+ .bsfs = bsf_name, \
+ .p.capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
+ .p.priv_class = &amf_decode_class, \
+ .p.pix_fmts = amf_dec_pix_fmts, \
+ .hw_configs = amf_hw_configs, \
+ .p.wrapper_name = "amf", \
+ .caps_internal = FF_CODEC_CAP_NOT_INIT_THREADSAFE, \
+}; \
+
+DEFINE_AMF_DECODER(h264, H264, "h264_mp4toannexb")
+DEFINE_AMF_DECODER(hevc, HEVC, NULL)
+DEFINE_AMF_DECODER(av1, AV1, NULL)
diff --git a/libavcodec/amfdec.h b/libavcodec/amfdec.h
new file mode 100644
index 0000000000..4c45d2426b
--- /dev/null
+++ b/libavcodec/amfdec.h
@@ -0,0 +1,75 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AMFDEC_H
+#define AVCODEC_AMFDEC_H
+
+#include <AMF/core/Buffer.h>
+#include <AMF/core/Factory.h>
+#include <AMF/core/Context.h>
+#include <AMF/core/Surface.h>
+#include <AMF/components/Component.h>
+#include <AMF/components/VideoDecoderUVD.h>
+
+#include "avcodec.h"
+#include "libavformat/avformat.h"
+#include "libavutil/fifo.h"
+#include "libavutil/frame.h"
+#include "libavutil/opt.h"
+
+/**
+* AMF decoder context
+*/
+
+typedef struct AvAmfDecoderContext {
+ AVClass *avclass;
+
+ AVBufferRef *amf_device_ctx_internal;
+ AVBufferRef *amf_device_ctx;
+
+ //decoder
+ AMFComponent *decoder; ///< AMF decoder object
+ AMF_SURFACE_FORMAT format; ///< AMF surface format
+
+ AVBufferRef *hw_device_ctx; ///< pointer to HW accelerator (decoder)
+ AVBufferRef *hw_frames_ctx; ///< pointer to HW accelerator (frame allocator)
+
+ AVBufferRef *hw_device_ref;
+ AVBufferRef *hw_frames_ref;
+
+ // shift dts back by max_b_frames in timing
+ AVFifoBuffer *timestamp_list;
+ int64_t dts_delay;
+
+ amf_uint64 version; ///< version of AMF runtime
+ // common encoder option options
+
+ int log_to_dbg;
+ // Static options, have to be set before Init() call
+ int decoder_mode;
+ int timestamp_mode;
+ int surface_pool_size;
+ int dpb_size;
+ int lowlatency;
+ int smart_access_video;
+ int skip_transfer_sav;
+ int drained;
+
+} AvAmfDecoderContext;
+
+#endif // AVCODEC_AMFDEC_H
\ No newline at end of file
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
index 8464a0b34c..d11821194f 100644
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@ -864,6 +864,9 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
#if CONFIG_H264_NVDEC_HWACCEL
*fmt++ = AV_PIX_FMT_CUDA;
#endif
+#if CONFIG_H264_AMFDEC_HWACCEL
+ *fmt++ = AV_PIX_FMT_AMF;
+#endif
#if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
if (h->avctx->colorspace != AVCOL_SPC_RGB)
*fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
diff --git a/libavcodec/h264dec.c b/libavcodec/h264dec.c
index 9f5893c512..7a2c9eecef 100644
--- a/libavcodec/h264dec.c
+++ b/libavcodec/h264dec.c
@@ -1137,6 +1137,9 @@ const FFCodec ff_h264_decoder = {
#if CONFIG_H264_NVDEC_HWACCEL
HWACCEL_NVDEC(h264),
#endif
+#if CONFIG_H264_AMFDEC_HWACCEL
+ HWACCEL_AMFDEC(h264),
+#endif
#if CONFIG_H264_VAAPI_HWACCEL
HWACCEL_VAAPI(h264),
#endif
diff --git a/libavcodec/hwconfig.h b/libavcodec/hwconfig.h
index ee29ca631d..556f724895 100644
--- a/libavcodec/hwconfig.h
+++ b/libavcodec/hwconfig.h
@@ -67,6 +67,8 @@ void ff_hwaccel_uninit(AVCodecContext *avctx);
HW_CONFIG_HWACCEL(1, 1, 0, D3D11, D3D11VA, ff_ ## codec ## _d3d11va2_hwaccel)
#define HWACCEL_NVDEC(codec) \
HW_CONFIG_HWACCEL(1, 1, 0, CUDA, CUDA, ff_ ## codec ## _nvdec_hwaccel)
+#define HWACCEL_AMFDEC(codec) \
+ HW_CONFIG_HWACCEL(1, 1, 0, AMF, AMF, ff_ ## codec ## _amfdec_hwaccel)
#define HWACCEL_VAAPI(codec) \
HW_CONFIG_HWACCEL(1, 1, 1, VAAPI, VAAPI, ff_ ## codec ## _vaapi_hwaccel)
#define HWACCEL_VDPAU(codec) \
--
2.38.1.windows.1
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [FFmpeg-devel] [PATCH 2/9] libavcodec: add amfdec.
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 2/9] libavcodec: add amfdec Dmitrii Ovchinnikov
@ 2024-02-14 23:41 ` Mark Thompson
0 siblings, 0 replies; 18+ messages in thread
From: Mark Thompson @ 2024-02-14 23:41 UTC (permalink / raw)
To: ffmpeg-devel
On 14/02/2024 01:55, Dmitrii Ovchinnikov wrote:
> From: Evgeny Pavlov <lucenticus@gmail.com>
>
> Added AMF based h264, hevc, av1 decoders.
> Co-authored-by: Dmitrii Ovchinnikov <ovchinnikov.dmitrii@gmail.com>
> ---
> libavcodec/Makefile | 4 +-
> libavcodec/allcodecs.c | 3 +
> libavcodec/amfdec.c | 667 ++++++++++++++++++++++++++++++++++++++++
> libavcodec/amfdec.h | 75 +++++
> libavcodec/h264_slice.c | 3 +
> libavcodec/h264dec.c | 3 +
> libavcodec/hwconfig.h | 2 +
> 7 files changed, 755 insertions(+), 2 deletions(-)
> create mode 100644 libavcodec/amfdec.c
> create mode 100644 libavcodec/amfdec.h
>
> ...
> +
> +static int amf_decode_init(AVCodecContext *avctx)
> +{
> + AvAmfDecoderContext *ctx = avctx->priv_data;
> + int ret;
> + enum AVPixelFormat pix_fmts[3] = {
> + AV_PIX_FMT_AMF,
> + avctx->pix_fmt,
> + AV_PIX_FMT_NONE };
> +
> + ret = ff_get_format(avctx, pix_fmts);
> + if (ret < 0) {
> + avctx->pix_fmt = AV_PIX_FMT_NONE;
> + }
I think you've misunderstood how decoder setup works. AVCodecContext.pix_fmt happens to be set to an initial value in some cases which use libavformat (including the ffmpeg utility), but there is no requirement on the user to do so (see the doxy). Also all of the format information can change at any moment mid-stream (consider adaptive streaming scenarios).
It is therefore necessary for the decoder to parse the input and determine the intended format before calling the get_format callback, and to do that again whenever the format changes. Calling it once at the beginning does not work at all.
> ...
> diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
> index 8464a0b34c..d11821194f 100644
> --- a/libavcodec/h264_slice.c
> +++ b/libavcodec/h264_slice.c
> @@ -864,6 +864,9 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
> #if CONFIG_H264_NVDEC_HWACCEL
> *fmt++ = AV_PIX_FMT_CUDA;
> #endif
> +#if CONFIG_H264_AMFDEC_HWACCEL
> + *fmt++ = AV_PIX_FMT_AMF;
> +#endif
> #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
> if (h->avctx->colorspace != AVCOL_SPC_RGB)
> *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
> diff --git a/libavcodec/h264dec.c b/libavcodec/h264dec.c
> index 9f5893c512..7a2c9eecef 100644
> --- a/libavcodec/h264dec.c
> +++ b/libavcodec/h264dec.c
> @@ -1137,6 +1137,9 @@ const FFCodec ff_h264_decoder = {
> #if CONFIG_H264_NVDEC_HWACCEL
> HWACCEL_NVDEC(h264),
> #endif
> +#if CONFIG_H264_AMFDEC_HWACCEL
> + HWACCEL_AMFDEC(h264),
> +#endif
> #if CONFIG_H264_VAAPI_HWACCEL
> HWACCEL_VAAPI(h264),
> #endif
I don't see any acceleration support here at all, this is entirely an offload decoder.
Thanks,
- Mark
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* [FFmpeg-devel] [PATCH 3/9] avcodec/amfenc: Fixes the color information in the output.
2024-02-14 1:55 [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf Dmitrii Ovchinnikov
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 2/9] libavcodec: add amfdec Dmitrii Ovchinnikov
@ 2024-02-14 1:55 ` Dmitrii Ovchinnikov
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 4/9] avcodec/amfenc: HDR metadata Dmitrii Ovchinnikov
` (7 subsequent siblings)
9 siblings, 0 replies; 18+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-02-14 1:55 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: Evgeny Pavlov, Michael Fabian 'Xaymar' Dirks
From: Michael Fabian 'Xaymar' Dirks <michael.dirks@xaymar.com>
added 10 bit support for amf hevc.
before:
command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file.mkv -an -c:v h264_amf res.dx11_hw_h264.mkv
output - Format of input frames context (p010le) is not supported by AMF.
command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v hevc_amf res.dx11_hw_hevc.mkv
output - Format of input frames context (p010le) is not supported by AMF.
after:
command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v h264_amf res.dx11_hw_h264.mkv
output - 10-bit input video is not supported by AMF H264 encoder
command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v hevc_amf res.dx11_hw_hevc.mkv
output - 10bit file
v2 - lost line returned in ff_amf_pix_fmts
v3 - fixes after review
v4 - extract duplicated code, fix incorrect processing of 10-bit input for h264
v5 - non-functional changes after review
Co-authored-by: Evgeny Pavlov <lucenticus@gmail.com>
---
libavcodec/amfenc.c | 37 +++++++++++++++++++++++++++++++++++++
libavcodec/amfenc.h | 3 +++
libavcodec/amfenc_h264.c | 24 ++++++++++++++++++++----
libavcodec/amfenc_hevc.c | 26 +++++++++++++++++++++++++-
4 files changed, 85 insertions(+), 5 deletions(-)
diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
index 061859f85c..0bd15dd812 100644
--- a/libavcodec/amfenc.c
+++ b/libavcodec/amfenc.c
@@ -60,6 +60,7 @@ const enum AVPixelFormat ff_amf_pix_fmts[] = {
#if CONFIG_DXVA2
AV_PIX_FMT_DXVA2_VLD,
#endif
+ AV_PIX_FMT_P010,
AV_PIX_FMT_NONE
};
@@ -72,6 +73,7 @@ static const FormatMap format_map[] =
{
{ AV_PIX_FMT_NONE, AMF_SURFACE_UNKNOWN },
{ AV_PIX_FMT_NV12, AMF_SURFACE_NV12 },
+ { AV_PIX_FMT_P010, AMF_SURFACE_P010 },
{ AV_PIX_FMT_BGR0, AMF_SURFACE_BGRA },
{ AV_PIX_FMT_RGB0, AMF_SURFACE_RGBA },
{ AV_PIX_FMT_GRAY8, AMF_SURFACE_GRAY8 },
@@ -785,6 +787,41 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
return ret;
}
+int ff_amf_get_color_profile(AVCodecContext *avctx)
+{
+ amf_int64 color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN;
+ if (avctx->color_range == AVCOL_RANGE_JPEG) {
+ /// Color Space for Full (JPEG) Range
+ switch (avctx->colorspace) {
+ case AVCOL_SPC_SMPTE170M:
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_601;
+ break;
+ case AVCOL_SPC_BT709:
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_709;
+ break;
+ case AVCOL_SPC_BT2020_NCL:
+ case AVCOL_SPC_BT2020_CL:
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_2020;
+ break;
+ }
+ } else {
+ /// Color Space for Limited (MPEG) range
+ switch (avctx->colorspace) {
+ case AVCOL_SPC_SMPTE170M:
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_601;
+ break;
+ case AVCOL_SPC_BT709:
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_709;
+ break;
+ case AVCOL_SPC_BT2020_NCL:
+ case AVCOL_SPC_BT2020_CL:
+ color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020;
+ break;
+ }
+ }
+ return color_profile;
+}
+
const AVCodecHWConfigInternal *const ff_amfenc_hw_configs[] = {
#if CONFIG_D3D11VA
HW_CONFIG_ENCODER_FRAMES(D3D11, D3D11VA),
diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h
index 2dbd378ef8..62736ef579 100644
--- a/libavcodec/amfenc.h
+++ b/libavcodec/amfenc.h
@@ -21,6 +21,7 @@
#include <AMF/core/Factory.h>
+#include <AMF/components/ColorSpace.h>
#include <AMF/components/VideoEncoderVCE.h>
#include <AMF/components/VideoEncoderHEVC.h>
#include <AMF/components/VideoEncoderAV1.h>
@@ -170,6 +171,8 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt);
*/
extern const enum AVPixelFormat ff_amf_pix_fmts[];
+int ff_amf_get_color_profile(AVCodecContext *avctx);
+
/**
* Error handling helper
*/
diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c
index bd544d12df..f785e091c9 100644
--- a/libavcodec/amfenc_h264.c
+++ b/libavcodec/amfenc_h264.c
@@ -199,6 +199,8 @@ static av_cold int amf_encode_init_h264(AVCodecContext *avctx)
AMFRate framerate;
AMFSize framesize = AMFConstructSize(avctx->width, avctx->height);
int deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
+ amf_int64 color_profile;
+ enum AVPixelFormat pix_fmt;
if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
framerate = AMFConstructRate(avctx->framerate.num, avctx->framerate.den);
@@ -262,10 +264,24 @@ FF_ENABLE_DEPRECATION_WARNINGS
AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDEO_ENCODER_ASPECT_RATIO, ratio);
}
- /// Color Range (Partial/TV/MPEG or Full/PC/JPEG)
- if (avctx->color_range == AVCOL_RANGE_JPEG) {
- AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_FULL_RANGE_COLOR, 1);
- }
+ color_profile = ff_amf_get_color_profile(avctx);
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_OUTPUT_COLOR_PROFILE, color_profile);
+
+ /// Color Range (Support for older Drivers)
+ AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_FULL_RANGE_COLOR, !!(avctx->color_range == AVCOL_RANGE_JPEG));
+
+ /// Color Depth
+ pix_fmt = avctx->hw_frames_ctx ? ((AVHWFramesContext*)avctx->hw_frames_ctx->data)->sw_format
+ : avctx->pix_fmt;
+
+ // 10 bit input video is not supported by AMF H264 encoder
+ AMF_RETURN_IF_FALSE(ctx, pix_fmt != AV_PIX_FMT_P010, AVERROR_INVALIDDATA, "10-bit input video is not supported by AMF H264 encoder\n");
+
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_COLOR_BIT_DEPTH, AMF_COLOR_BIT_DEPTH_8);
+ /// Color Transfer Characteristics (AMF matches ISO/IEC)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_OUTPUT_TRANSFER_CHARACTERISTIC, (amf_int64)avctx->color_trc);
+ /// Color Primaries (AMF matches ISO/IEC)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_OUTPUT_COLOR_PRIMARIES, (amf_int64)avctx->color_primaries);
// autodetect rate control method
if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_UNKNOWN) {
diff --git a/libavcodec/amfenc_hevc.c b/libavcodec/amfenc_hevc.c
index 352564a301..8c6401c646 100644
--- a/libavcodec/amfenc_hevc.c
+++ b/libavcodec/amfenc_hevc.c
@@ -34,8 +34,9 @@ static const AVOption options[] = {
{ "high_quality", "high quality trancoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_HIGH_QUALITY }, 0, 0, VE, "usage" },
{ "lowlatency_high_quality","low latency yet high quality trancoding", 0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_LOW_LATENCY_HIGH_QUALITY }, 0, 0, VE, "usage" },
- { "profile", "Set the profile (default main)", OFFSET(profile), AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, VE, "profile" },
+ { "profile", "Set the profile (default main)", OFFSET(profile), AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN_10, VE, "profile" },
{ "main", "", 0, AV_OPT_TYPE_CONST,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, 0, 0, VE, "profile" },
+ { "main10", "", 0, AV_OPT_TYPE_CONST,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN_10 }, 0, 0, VE, "profile" },
{ "profile_tier", "Set the profile tier (default main)", OFFSET(tier), AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, AMF_VIDEO_ENCODER_HEVC_TIER_MAIN, AMF_VIDEO_ENCODER_HEVC_TIER_HIGH, VE, "tier" },
{ "main", "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, 0, 0, VE, "tier" },
@@ -160,6 +161,9 @@ static av_cold int amf_encode_init_hevc(AVCodecContext *avctx)
AMFRate framerate;
AMFSize framesize = AMFConstructSize(avctx->width, avctx->height);
int deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
+ amf_int64 color_depth;
+ amf_int64 color_profile;
+ enum AVPixelFormat pix_fmt;
if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
framerate = AMFConstructRate(avctx->framerate.num, avctx->framerate.den);
@@ -187,6 +191,9 @@ FF_ENABLE_DEPRECATION_WARNINGS
case AV_PROFILE_HEVC_MAIN:
profile = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN;
break;
+ case AV_PROFILE_HEVC_MAIN_10:
+ profile = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN_10;
+ break;
default:
break;
}
@@ -215,6 +222,23 @@ FF_ENABLE_DEPRECATION_WARNINGS
AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ASPECT_RATIO, ratio);
}
+ color_profile = ff_amf_get_color_profile(avctx);
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_COLOR_PROFILE, color_profile);
+ /// Color Range (Support for older Drivers)
+ AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_NOMINAL_RANGE, !!(avctx->color_range == AVCOL_RANGE_JPEG));
+ /// Color Depth
+ color_depth = AMF_COLOR_BIT_DEPTH_8;
+ pix_fmt = avctx->hw_frames_ctx ? ((AVHWFramesContext*)avctx->hw_frames_ctx->data)->sw_format
+ : avctx->pix_fmt;
+ if (pix_fmt == AV_PIX_FMT_P010) {
+ color_depth = AMF_COLOR_BIT_DEPTH_10;
+ }
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_COLOR_BIT_DEPTH, color_depth);
+ /// Color Transfer Characteristics (AMF matches ISO/IEC)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_TRANSFER_CHARACTERISTIC, (amf_int64)avctx->color_trc);
+ /// Color Primaries (AMF matches ISO/IEC)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_COLOR_PRIMARIES, (amf_int64)avctx->color_primaries);
+
// Picture control properties
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR, ctx->gops_per_idr);
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_GOP_SIZE, avctx->gop_size);
--
2.38.1.windows.1
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* [FFmpeg-devel] [PATCH 4/9] avcodec/amfenc: HDR metadata.
2024-02-14 1:55 [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf Dmitrii Ovchinnikov
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 2/9] libavcodec: add amfdec Dmitrii Ovchinnikov
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 3/9] avcodec/amfenc: Fixes the color information in the output Dmitrii Ovchinnikov
@ 2024-02-14 1:55 ` Dmitrii Ovchinnikov
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 5/9] avcodec/amfenc: add 10 bit encoding in av1_amf Dmitrii Ovchinnikov
` (6 subsequent siblings)
9 siblings, 0 replies; 18+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-02-14 1:55 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: nyanmisaka
From: nyanmisaka <nst799610810@gmail.com>
v2: fixes for indentation
---
libavcodec/amfenc.c | 83 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 83 insertions(+)
diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
index 0bd15dd812..068bb53002 100644
--- a/libavcodec/amfenc.c
+++ b/libavcodec/amfenc.c
@@ -36,6 +36,57 @@
#include "amfenc.h"
#include "encode.h"
#include "internal.h"
+#include "libavutil/mastering_display_metadata.h"
+
+static int amf_save_hdr_metadata(AVCodecContext *avctx, const AVFrame *frame, AMFHDRMetadata *hdrmeta)
+{
+ AVFrameSideData *sd_display;
+ AVFrameSideData *sd_light;
+ AVMasteringDisplayMetadata *display_meta;
+ AVContentLightMetadata *light_meta;
+
+ sd_display = av_frame_get_side_data(frame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
+ if (sd_display) {
+ display_meta = (AVMasteringDisplayMetadata *)sd_display->data;
+ if (display_meta->has_luminance) {
+ const unsigned int luma_den = 10000;
+ hdrmeta->maxMasteringLuminance =
+ (amf_uint32)(luma_den * av_q2d(display_meta->max_luminance));
+ hdrmeta->minMasteringLuminance =
+ FFMIN((amf_uint32)(luma_den * av_q2d(display_meta->min_luminance)), hdrmeta->maxMasteringLuminance);
+ }
+ if (display_meta->has_primaries) {
+ const unsigned int chroma_den = 50000;
+ hdrmeta->redPrimary[0] =
+ FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[0][0])), chroma_den);
+ hdrmeta->redPrimary[1] =
+ FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[0][1])), chroma_den);
+ hdrmeta->greenPrimary[0] =
+ FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[1][0])), chroma_den);
+ hdrmeta->greenPrimary[1] =
+ FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[1][1])), chroma_den);
+ hdrmeta->bluePrimary[0] =
+ FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[2][0])), chroma_den);
+ hdrmeta->bluePrimary[1] =
+ FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[2][1])), chroma_den);
+ hdrmeta->whitePoint[0] =
+ FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->white_point[0])), chroma_den);
+ hdrmeta->whitePoint[1] =
+ FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->white_point[1])), chroma_den);
+ }
+
+ sd_light = av_frame_get_side_data(frame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
+ if (sd_light) {
+ light_meta = (AVContentLightMetadata *)sd_light->data;
+ if (light_meta) {
+ hdrmeta->maxContentLightLevel = (amf_uint16)light_meta->MaxCLL;
+ hdrmeta->maxFrameAverageLightLevel = (amf_uint16)light_meta->MaxFALL;
+ }
+ }
+ return 0;
+ }
+ return 1;
+}
#if CONFIG_D3D11VA
#include <d3d11.h>
@@ -683,6 +734,26 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
frame_ref_storage_buffer->pVtbl->Release(frame_ref_storage_buffer);
}
+ // HDR10 metadata
+ if (frame->color_trc == AVCOL_TRC_SMPTE2084) {
+ AMFBuffer * hdrmeta_buffer = NULL;
+ res = ctx->context->pVtbl->AllocBuffer(ctx->context, AMF_MEMORY_HOST, sizeof(AMFHDRMetadata), &hdrmeta_buffer);
+ if (res == AMF_OK) {
+ AMFHDRMetadata * hdrmeta = (AMFHDRMetadata*)hdrmeta_buffer->pVtbl->GetNative(hdrmeta_buffer);
+ if (amf_save_hdr_metadata(avctx, frame, hdrmeta) == 0) {
+ switch (avctx->codec->id) {
+ case AV_CODEC_ID_H264:
+ AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break;
+ case AV_CODEC_ID_HEVC:
+ AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break;
+ }
+ res = amf_set_property_buffer(surface, L"av_frame_hdrmeta", hdrmeta_buffer);
+ AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "SetProperty failed for \"av_frame_hdrmeta\" with error %d\n", res);
+ }
+ hdrmeta_buffer->pVtbl->Release(hdrmeta_buffer);
+ }
+ }
+
surface->pVtbl->SetPts(surface, frame->pts);
AMF_ASSIGN_PROPERTY_INT64(res, surface, PTS_PROP, frame->pts);
@@ -746,6 +817,18 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
}
res_resubmit = AMF_OK;
if (ctx->delayed_surface != NULL) { // try to resubmit frame
+ if (ctx->delayed_surface->pVtbl->HasProperty(ctx->delayed_surface, L"av_frame_hdrmeta")) {
+ AMFBuffer * hdrmeta_buffer = NULL;
+ res = amf_get_property_buffer((AMFData *)ctx->delayed_surface, L"av_frame_hdrmeta", &hdrmeta_buffer);
+ AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "GetProperty failed for \"av_frame_hdrmeta\" with error %d\n", res);
+ switch (avctx->codec->id) {
+ case AV_CODEC_ID_H264:
+ AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break;
+ case AV_CODEC_ID_HEVC:
+ AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break;
+ }
+ hdrmeta_buffer->pVtbl->Release(hdrmeta_buffer);
+ }
res_resubmit = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)ctx->delayed_surface);
if (res_resubmit != AMF_INPUT_FULL) {
int64_t pts = ctx->delayed_surface->pVtbl->GetPts(ctx->delayed_surface);
--
2.38.1.windows.1
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* [FFmpeg-devel] [PATCH 5/9] avcodec/amfenc: add 10 bit encoding in av1_amf
2024-02-14 1:55 [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf Dmitrii Ovchinnikov
` (2 preceding siblings ...)
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 4/9] avcodec/amfenc: HDR metadata Dmitrii Ovchinnikov
@ 2024-02-14 1:55 ` Dmitrii Ovchinnikov
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 6/9] avcodec/amfenc: add smart access video option Dmitrii Ovchinnikov
` (5 subsequent siblings)
9 siblings, 0 replies; 18+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-02-14 1:55 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: Evgeny Pavlov, Dmitrii Ovchinnikov
From: Evgeny Pavlov <lucenticus@gmail.com>
v2: refactored after review
Signed-off-by: Evgeny Pavlov <lucenticus@gmail.com>
Co-authored-by: Dmitrii Ovchinnikov <ovchinnikov.dmitrii@gmail.com>
---
libavcodec/amfenc.c | 2 ++
libavcodec/amfenc_av1.c | 22 ++++++++++++++++++++++
2 files changed, 24 insertions(+)
diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
index 068bb53002..f1b76bd6aa 100644
--- a/libavcodec/amfenc.c
+++ b/libavcodec/amfenc.c
@@ -826,6 +826,8 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break;
case AV_CODEC_ID_HEVC:
AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break;
+ case AV_CODEC_ID_AV1:
+ AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_INPUT_HDR_METADATA, hdrmeta_buffer); break;
}
hdrmeta_buffer->pVtbl->Release(hdrmeta_buffer);
}
diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c
index 3f164ccc59..9b32616aa8 100644
--- a/libavcodec/amfenc_av1.c
+++ b/libavcodec/amfenc_av1.c
@@ -165,6 +165,9 @@ static av_cold int amf_encode_init_av1(AVCodecContext* avctx)
AMFGuid guid;
AMFRate framerate;
AMFSize framesize = AMFConstructSize(avctx->width, avctx->height);
+ amf_int64 color_depth;
+ amf_int64 color_profile;
+ enum AVPixelFormat pix_fmt;
@@ -203,6 +206,25 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_PROFILE, profile);
+ /// Color profile
+ color_profile = ff_amf_get_color_profile(avctx);
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PROFILE, color_profile);
+
+ /// Color Depth
+ pix_fmt = avctx->hw_frames_ctx ? ((AVHWFramesContext*)avctx->hw_frames_ctx->data)->sw_format
+ : avctx->pix_fmt;
+ color_depth = AMF_COLOR_BIT_DEPTH_8;
+ if (pix_fmt == AV_PIX_FMT_P010) {
+ color_depth = AMF_COLOR_BIT_DEPTH_10;
+ }
+
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_COLOR_BIT_DEPTH, color_depth);
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PROFILE, color_profile);
+ /// Color Transfer Characteristics (AMF matches ISO/IEC)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_TRANSFER_CHARACTERISTIC, (amf_int64)avctx->color_trc);
+ /// Color Primaries (AMF matches ISO/IEC)
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PRIMARIES, (amf_int64)avctx->color_primaries);
+
profile_level = avctx->level;
if (profile_level == AV_LEVEL_UNKNOWN) {
profile_level = ctx->level;
--
2.38.1.windows.1
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* [FFmpeg-devel] [PATCH 6/9] avcodec/amfenc: add smart access video option
2024-02-14 1:55 [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf Dmitrii Ovchinnikov
` (3 preceding siblings ...)
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 5/9] avcodec/amfenc: add 10 bit encoding in av1_amf Dmitrii Ovchinnikov
@ 2024-02-14 1:55 ` Dmitrii Ovchinnikov
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 7/9] libavcodec/amfenc: redesign to use hwcontext_amf Dmitrii Ovchinnikov
` (4 subsequent siblings)
9 siblings, 0 replies; 18+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-02-14 1:55 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: Evgeny Pavlov
From: Evgeny Pavlov <lucenticus@gmail.com>
This commit adds option for enabling SmartAccess Video (SAV)
in AMF encoders. SAV is an AMD hardware-specific feature which
enables the parallelization of encode and decode streams across
multiple Video Codec Engine (VCN) hardware instances.
Signed-off-by: Evgeny Pavlov <lucenticus@gmail.com>
---
libavcodec/amfenc.h | 1 +
libavcodec/amfenc_av1.c | 18 ++++++++++++++++++
libavcodec/amfenc_h264.c | 18 ++++++++++++++++++
libavcodec/amfenc_hevc.c | 18 ++++++++++++++++++
4 files changed, 55 insertions(+)
diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h
index 62736ef579..1bda0136bd 100644
--- a/libavcodec/amfenc.h
+++ b/libavcodec/amfenc.h
@@ -90,6 +90,7 @@ typedef struct AmfContext {
int quality;
int b_frame_delta_qp;
int ref_b_frame_delta_qp;
+ int smart_access_video;
// Dynamic options, can be set after Init() call
diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c
index 9b32616aa8..109533fb2d 100644
--- a/libavcodec/amfenc_av1.c
+++ b/libavcodec/amfenc_av1.c
@@ -104,6 +104,8 @@ static const AVOption options[] = {
{ "log_to_dbg", "Enable AMF logging to debug output", OFFSET(log_to_dbg), AV_OPT_TYPE_BOOL,{.i64 = 0 }, 0, 1, VE },
+ { "smart_access_video", "Enable Smart Access Video", OFFSET(smart_access_video), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE},
+
//Pre Analysis options
{ "preanalysis", "Enable preanalysis", OFFSET(preanalysis), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
@@ -265,6 +267,22 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
}
+ if (ctx->smart_access_video != -1) {
+ AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_ENABLE_SMART_ACCESS_VIDEO, ctx->smart_access_video != 0);
+ if (res != AMF_OK) {
+ av_log(avctx, AV_LOG_ERROR, "The Smart Access Video is not supported by AMF.\n");
+ if (ctx->smart_access_video != 0)
+ return AVERROR(ENOSYS);
+ } else {
+ av_log(avctx, AV_LOG_INFO, "The Smart Access Video (%d) is set.\n", ctx->smart_access_video);
+ // Set low latency mode if Smart Access Video is enabled
+ if (ctx->smart_access_video != 0) {
+ AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_ENCODING_LATENCY_MODE, AMF_VIDEO_ENCODER_AV1_ENCODING_LATENCY_MODE_LOWEST_LATENCY);
+ av_log(avctx, AV_LOG_INFO, "The Smart Access Video set low latency mode.\n");
+ }
+ }
+ }
+
// Pre-Pass, Pre-Analysis, Two-Pass
if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CONSTANT_QP) {
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_PREENCODE, 0);
diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c
index f785e091c9..f83a9fcaa7 100644
--- a/libavcodec/amfenc_h264.c
+++ b/libavcodec/amfenc_h264.c
@@ -136,6 +136,8 @@ static const AVOption options[] = {
{ "log_to_dbg", "Enable AMF logging to debug output", OFFSET(log_to_dbg) , AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
+ { "smart_access_video", "Enable Smart Access Video", OFFSET(smart_access_video), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE},
+
//Pre Analysis options
{ "preanalysis", "Enable preanalysis", OFFSET(preanalysis), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
@@ -369,6 +371,22 @@ FF_ENABLE_DEPRECATION_WARNINGS
av_log(ctx, AV_LOG_WARNING, "rate control mode is PEAK_CONSTRAINED_VBR but rc_max_rate is not set\n");
}
+ if (ctx->smart_access_video != -1) {
+ AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENABLE_SMART_ACCESS_VIDEO, ctx->smart_access_video != 0);
+ if (res != AMF_OK) {
+ av_log(avctx, AV_LOG_ERROR, "The Smart Access Video is not supported by AMF.\n");
+ if (ctx->smart_access_video != 0)
+ return AVERROR(ENOSYS);
+ } else {
+ av_log(avctx, AV_LOG_INFO, "The Smart Access Video (%d) is set.\n", ctx->smart_access_video);
+ // Set low latency mode if Smart Access Video is enabled
+ if (ctx->smart_access_video != 0) {
+ AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_LOWLATENCY_MODE, true);
+ av_log(avctx, AV_LOG_INFO, "The Smart Access Video set low latency mode.\n");
+ }
+ }
+ }
+
if (ctx->preanalysis != -1) {
AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_PRE_ANALYSIS_ENABLE, !!((ctx->preanalysis == 0) ? false : true));
}
diff --git a/libavcodec/amfenc_hevc.c b/libavcodec/amfenc_hevc.c
index 8c6401c646..f3caa7cb29 100644
--- a/libavcodec/amfenc_hevc.c
+++ b/libavcodec/amfenc_hevc.c
@@ -100,6 +100,8 @@ static const AVOption options[] = {
{ "log_to_dbg", "Enable AMF logging to debug output", OFFSET(log_to_dbg), AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE },
+ { "smart_access_video", "Enable Smart Access Video", OFFSET(smart_access_video), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE},
+
//Pre Analysis options
{ "preanalysis", "Enable preanalysis", OFFSET(preanalysis), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE },
@@ -265,6 +267,22 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
}
+ if (ctx->smart_access_video != -1) {
+ AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ENABLE_SMART_ACCESS_VIDEO, ctx->smart_access_video != 0);
+ if (res != AMF_OK) {
+ av_log(avctx, AV_LOG_ERROR, "The Smart Access Video is not supported by AMF.\n");
+ if (ctx->smart_access_video != 0)
+ return AVERROR(ENOSYS);
+ } else {
+ av_log(avctx, AV_LOG_INFO, "The Smart Access Video (%d) is set.\n", ctx->smart_access_video);
+ // Set low latency mode if Smart Access Video is enabled
+ if (ctx->smart_access_video != 0) {
+ AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_LOWLATENCY_MODE, true);
+ av_log(avctx, AV_LOG_INFO, "The Smart Access Video set low latency mode.\n");
+ }
+ }
+ }
+
// Pre-Pass, Pre-Analysis, Two-Pass
if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP) {
AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PREENCODE_ENABLE, 0);
--
2.38.1.windows.1
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* [FFmpeg-devel] [PATCH 7/9] libavcodec/amfenc: redesign to use hwcontext_amf.
2024-02-14 1:55 [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf Dmitrii Ovchinnikov
` (4 preceding siblings ...)
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 6/9] avcodec/amfenc: add smart access video option Dmitrii Ovchinnikov
@ 2024-02-14 1:55 ` Dmitrii Ovchinnikov
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 8/9] avfilter/scale_amf: Add AMF HW scaler & color converter Dmitrii Ovchinnikov
` (3 subsequent siblings)
9 siblings, 0 replies; 18+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-02-14 1:55 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: Evgeny Pavlov, Dmitrii Ovchinnikov
Co-authored-by: Evgeny Pavlov <lucenticus@gmail.com>
---
libavcodec/amfenc.c | 560 ++++++++++++--------------------------------
libavcodec/amfenc.h | 23 +-
2 files changed, 158 insertions(+), 425 deletions(-)
diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
index f1b76bd6aa..e6e5302cca 100644
--- a/libavcodec/amfenc.c
+++ b/libavcodec/amfenc.c
@@ -29,6 +29,7 @@
#define COBJMACROS
#include "libavutil/hwcontext_dxva2.h"
#endif
+#include "libavutil/hwcontext_amf.h"
#include "libavutil/mem.h"
#include "libavutil/pixdesc.h"
#include "libavutil/time.h"
@@ -38,6 +39,18 @@
#include "internal.h"
#include "libavutil/mastering_display_metadata.h"
+#if CONFIG_D3D11VA
+#include <d3d11.h>
+#endif
+
+#ifdef _WIN32
+#include "compat/w32dlfcn.h"
+#else
+#include <dlfcn.h>
+#endif
+
+#define PTS_PROP L"PtsProp"
+
static int amf_save_hdr_metadata(AVCodecContext *avctx, const AVFrame *frame, AMFHDRMetadata *hdrmeta)
{
AVFrameSideData *sd_display;
@@ -88,20 +101,6 @@ static int amf_save_hdr_metadata(AVCodecContext *avctx, const AVFrame *frame, AM
return 1;
}
-#if CONFIG_D3D11VA
-#include <d3d11.h>
-#endif
-
-#ifdef _WIN32
-#include "compat/w32dlfcn.h"
-#else
-#include <dlfcn.h>
-#endif
-
-#define FFMPEG_AMF_WRITER_ID L"ffmpeg_amf"
-
-#define PTS_PROP L"PtsProp"
-
const enum AVPixelFormat ff_amf_pix_fmts[] = {
AV_PIX_FMT_NV12,
AV_PIX_FMT_YUV420P,
@@ -111,289 +110,18 @@ const enum AVPixelFormat ff_amf_pix_fmts[] = {
#if CONFIG_DXVA2
AV_PIX_FMT_DXVA2_VLD,
#endif
+ AV_PIX_FMT_AMF,
AV_PIX_FMT_P010,
AV_PIX_FMT_NONE
};
-typedef struct FormatMap {
- enum AVPixelFormat av_format;
- enum AMF_SURFACE_FORMAT amf_format;
-} FormatMap;
-
-static const FormatMap format_map[] =
-{
- { AV_PIX_FMT_NONE, AMF_SURFACE_UNKNOWN },
- { AV_PIX_FMT_NV12, AMF_SURFACE_NV12 },
- { AV_PIX_FMT_P010, AMF_SURFACE_P010 },
- { AV_PIX_FMT_BGR0, AMF_SURFACE_BGRA },
- { AV_PIX_FMT_RGB0, AMF_SURFACE_RGBA },
- { AV_PIX_FMT_GRAY8, AMF_SURFACE_GRAY8 },
- { AV_PIX_FMT_YUV420P, AMF_SURFACE_YUV420P },
- { AV_PIX_FMT_YUYV422, AMF_SURFACE_YUY2 },
-};
-
-static enum AMF_SURFACE_FORMAT amf_av_to_amf_format(enum AVPixelFormat fmt)
-{
- int i;
- for (i = 0; i < amf_countof(format_map); i++) {
- if (format_map[i].av_format == fmt) {
- return format_map[i].amf_format;
- }
- }
- return AMF_SURFACE_UNKNOWN;
-}
-
-static void AMF_CDECL_CALL AMFTraceWriter_Write(AMFTraceWriter *pThis,
- const wchar_t *scope, const wchar_t *message)
-{
- AmfTraceWriter *tracer = (AmfTraceWriter*)pThis;
- av_log(tracer->avctx, AV_LOG_DEBUG, "%ls: %ls", scope, message); // \n is provided from AMF
-}
-
-static void AMF_CDECL_CALL AMFTraceWriter_Flush(AMFTraceWriter *pThis)
-{
-}
-
-static AMFTraceWriterVtbl tracer_vtbl =
-{
- .Write = AMFTraceWriter_Write,
- .Flush = AMFTraceWriter_Flush,
-};
-
-static int amf_load_library(AVCodecContext *avctx)
-{
- AmfContext *ctx = avctx->priv_data;
- AMFInit_Fn init_fun;
- AMFQueryVersion_Fn version_fun;
- AMF_RESULT res;
-
- ctx->delayed_frame = av_frame_alloc();
- if (!ctx->delayed_frame) {
- return AVERROR(ENOMEM);
- }
- // hardcoded to current HW queue size - will auto-realloc if too small
- ctx->timestamp_list = av_fifo_alloc2(avctx->max_b_frames + 16, sizeof(int64_t),
- AV_FIFO_FLAG_AUTO_GROW);
- if (!ctx->timestamp_list) {
- return AVERROR(ENOMEM);
- }
- ctx->dts_delay = 0;
-
-
- ctx->library = dlopen(AMF_DLL_NAMEA, RTLD_NOW | RTLD_LOCAL);
- AMF_RETURN_IF_FALSE(ctx, ctx->library != NULL,
- AVERROR_UNKNOWN, "DLL %s failed to open\n", AMF_DLL_NAMEA);
-
- init_fun = (AMFInit_Fn)dlsym(ctx->library, AMF_INIT_FUNCTION_NAME);
- AMF_RETURN_IF_FALSE(ctx, init_fun != NULL, AVERROR_UNKNOWN, "DLL %s failed to find function %s\n", AMF_DLL_NAMEA, AMF_INIT_FUNCTION_NAME);
-
- version_fun = (AMFQueryVersion_Fn)dlsym(ctx->library, AMF_QUERY_VERSION_FUNCTION_NAME);
- AMF_RETURN_IF_FALSE(ctx, version_fun != NULL, AVERROR_UNKNOWN, "DLL %s failed to find function %s\n", AMF_DLL_NAMEA, AMF_QUERY_VERSION_FUNCTION_NAME);
-
- res = version_fun(&ctx->version);
- AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s failed with error %d\n", AMF_QUERY_VERSION_FUNCTION_NAME, res);
- res = init_fun(AMF_FULL_VERSION, &ctx->factory);
- AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s failed with error %d\n", AMF_INIT_FUNCTION_NAME, res);
- res = ctx->factory->pVtbl->GetTrace(ctx->factory, &ctx->trace);
- AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetTrace() failed with error %d\n", res);
- res = ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug);
- AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetDebug() failed with error %d\n", res);
- return 0;
-}
-
-#if CONFIG_D3D11VA
-static int amf_init_from_d3d11_device(AVCodecContext *avctx, AVD3D11VADeviceContext *hwctx)
-{
- AmfContext *ctx = avctx->priv_data;
- AMF_RESULT res;
-
- res = ctx->context->pVtbl->InitDX11(ctx->context, hwctx->device, AMF_DX11_1);
- if (res != AMF_OK) {
- if (res == AMF_NOT_SUPPORTED)
- av_log(avctx, AV_LOG_ERROR, "AMF via D3D11 is not supported on the given device.\n");
- else
- av_log(avctx, AV_LOG_ERROR, "AMF failed to initialise on the given D3D11 device: %d.\n", res);
- return AVERROR(ENODEV);
- }
-
- return 0;
-}
-#endif
-
-#if CONFIG_DXVA2
-static int amf_init_from_dxva2_device(AVCodecContext *avctx, AVDXVA2DeviceContext *hwctx)
-{
- AmfContext *ctx = avctx->priv_data;
- HANDLE device_handle;
- IDirect3DDevice9 *device;
- HRESULT hr;
- AMF_RESULT res;
- int ret;
-
- hr = IDirect3DDeviceManager9_OpenDeviceHandle(hwctx->devmgr, &device_handle);
- if (FAILED(hr)) {
- av_log(avctx, AV_LOG_ERROR, "Failed to open device handle for Direct3D9 device: %lx.\n", (unsigned long)hr);
- return AVERROR_EXTERNAL;
- }
-
- hr = IDirect3DDeviceManager9_LockDevice(hwctx->devmgr, device_handle, &device, FALSE);
- if (SUCCEEDED(hr)) {
- IDirect3DDeviceManager9_UnlockDevice(hwctx->devmgr, device_handle, FALSE);
- ret = 0;
- } else {
- av_log(avctx, AV_LOG_ERROR, "Failed to lock device handle for Direct3D9 device: %lx.\n", (unsigned long)hr);
- ret = AVERROR_EXTERNAL;
- }
-
- IDirect3DDeviceManager9_CloseDeviceHandle(hwctx->devmgr, device_handle);
-
- if (ret < 0)
- return ret;
-
- res = ctx->context->pVtbl->InitDX9(ctx->context, device);
-
- IDirect3DDevice9_Release(device);
-
- if (res != AMF_OK) {
- if (res == AMF_NOT_SUPPORTED)
- av_log(avctx, AV_LOG_ERROR, "AMF via D3D9 is not supported on the given device.\n");
- else
- av_log(avctx, AV_LOG_ERROR, "AMF failed to initialise on given D3D9 device: %d.\n", res);
- return AVERROR(ENODEV);
- }
-
- return 0;
-}
-#endif
-
-static int amf_init_context(AVCodecContext *avctx)
-{
- AmfContext *ctx = avctx->priv_data;
- AMFContext1 *context1 = NULL;
- AMF_RESULT res;
- av_unused int ret;
-
- ctx->hwsurfaces_in_queue = 0;
- ctx->hwsurfaces_in_queue_max = 16;
-
- // configure AMF logger
- // the return of these functions indicates old state and do not affect behaviour
- ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, ctx->log_to_dbg != 0 );
- if (ctx->log_to_dbg)
- ctx->trace->pVtbl->SetWriterLevel(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, AMF_TRACE_TRACE);
- ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_CONSOLE, 0);
- ctx->trace->pVtbl->SetGlobalLevel(ctx->trace, AMF_TRACE_TRACE);
-
- // connect AMF logger to av_log
- ctx->tracer.vtbl = &tracer_vtbl;
- ctx->tracer.avctx = avctx;
- ctx->trace->pVtbl->RegisterWriter(ctx->trace, FFMPEG_AMF_WRITER_ID,(AMFTraceWriter*)&ctx->tracer, 1);
- ctx->trace->pVtbl->SetWriterLevel(ctx->trace, FFMPEG_AMF_WRITER_ID, AMF_TRACE_TRACE);
-
- res = ctx->factory->pVtbl->CreateContext(ctx->factory, &ctx->context);
- AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "CreateContext() failed with error %d\n", res);
-
- // If a device was passed to the encoder, try to initialise from that.
- if (avctx->hw_frames_ctx) {
- AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
-
- if (amf_av_to_amf_format(frames_ctx->sw_format) == AMF_SURFACE_UNKNOWN) {
- av_log(avctx, AV_LOG_ERROR, "Format of input frames context (%s) is not supported by AMF.\n",
- av_get_pix_fmt_name(frames_ctx->sw_format));
- return AVERROR(EINVAL);
- }
-
- switch (frames_ctx->device_ctx->type) {
-#if CONFIG_D3D11VA
- case AV_HWDEVICE_TYPE_D3D11VA:
- ret = amf_init_from_d3d11_device(avctx, frames_ctx->device_ctx->hwctx);
- if (ret < 0)
- return ret;
- break;
-#endif
-#if CONFIG_DXVA2
- case AV_HWDEVICE_TYPE_DXVA2:
- ret = amf_init_from_dxva2_device(avctx, frames_ctx->device_ctx->hwctx);
- if (ret < 0)
- return ret;
- break;
-#endif
- default:
- av_log(avctx, AV_LOG_ERROR, "AMF initialisation from a %s frames context is not supported.\n",
- av_hwdevice_get_type_name(frames_ctx->device_ctx->type));
- return AVERROR(ENOSYS);
- }
-
- ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
- if (!ctx->hw_frames_ctx)
- return AVERROR(ENOMEM);
-
- if (frames_ctx->initial_pool_size > 0)
- ctx->hwsurfaces_in_queue_max = frames_ctx->initial_pool_size - 1;
-
- } else if (avctx->hw_device_ctx) {
- AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
-
- switch (device_ctx->type) {
-#if CONFIG_D3D11VA
- case AV_HWDEVICE_TYPE_D3D11VA:
- ret = amf_init_from_d3d11_device(avctx, device_ctx->hwctx);
- if (ret < 0)
- return ret;
- break;
-#endif
-#if CONFIG_DXVA2
- case AV_HWDEVICE_TYPE_DXVA2:
- ret = amf_init_from_dxva2_device(avctx, device_ctx->hwctx);
- if (ret < 0)
- return ret;
- break;
-#endif
- default:
- av_log(avctx, AV_LOG_ERROR, "AMF initialisation from a %s device is not supported.\n",
- av_hwdevice_get_type_name(device_ctx->type));
- return AVERROR(ENOSYS);
- }
-
- ctx->hw_device_ctx = av_buffer_ref(avctx->hw_device_ctx);
- if (!ctx->hw_device_ctx)
- return AVERROR(ENOMEM);
-
- } else {
- res = ctx->context->pVtbl->InitDX11(ctx->context, NULL, AMF_DX11_1);
- if (res == AMF_OK) {
- av_log(avctx, AV_LOG_VERBOSE, "AMF initialisation succeeded via D3D11.\n");
- } else {
- res = ctx->context->pVtbl->InitDX9(ctx->context, NULL);
- if (res == AMF_OK) {
- av_log(avctx, AV_LOG_VERBOSE, "AMF initialisation succeeded via D3D9.\n");
- } else {
- AMFGuid guid = IID_AMFContext1();
- res = ctx->context->pVtbl->QueryInterface(ctx->context, &guid, (void**)&context1);
- AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "CreateContext1() failed with error %d\n", res);
-
- res = context1->pVtbl->InitVulkan(context1, NULL);
- context1->pVtbl->Release(context1);
- if (res != AMF_OK) {
- if (res == AMF_NOT_SUPPORTED)
- av_log(avctx, AV_LOG_ERROR, "AMF via Vulkan is not supported on the given device.\n");
- else
- av_log(avctx, AV_LOG_ERROR, "AMF failed to initialise on the given Vulkan device: %d.\n", res);
- return AVERROR(ENOSYS);
- }
- av_log(avctx, AV_LOG_VERBOSE, "AMF initialisation succeeded via Vulkan.\n");
- }
- }
- }
- return 0;
-}
-
static int amf_init_encoder(AVCodecContext *avctx)
{
AmfContext *ctx = avctx->priv_data;
const wchar_t *codec_id = NULL;
AMF_RESULT res;
enum AVPixelFormat pix_fmt;
+ AVAMFDeviceContextInternal* internal = (AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data;
switch (avctx->codec->id) {
case AV_CODEC_ID_H264:
@@ -415,13 +143,16 @@ static int amf_init_encoder(AVCodecContext *avctx)
else
pix_fmt = avctx->pix_fmt;
- ctx->format = amf_av_to_amf_format(pix_fmt);
+ if (avctx->pix_fmt != AV_PIX_FMT_AMF)
+ ctx->format = av_amf_av_to_amf_format(pix_fmt);
+ else
+ ctx->format = av_amf_av_to_amf_format(avctx->sw_pix_fmt);
+
AMF_RETURN_IF_FALSE(ctx, ctx->format != AMF_SURFACE_UNKNOWN, AVERROR(EINVAL),
- "Format %s is not supported\n", av_get_pix_fmt_name(pix_fmt));
+ "Format %s is not supported\n", av_get_pix_fmt_name(pix_fmt));
- res = ctx->factory->pVtbl->CreateComponent(ctx->factory, ctx->context, codec_id, &ctx->encoder);
+ res = internal->factory->pVtbl->CreateComponent(internal->factory, internal->context, codec_id, &ctx->encoder);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_ENCODER_NOT_FOUND, "CreateComponent(%ls) failed with error %d\n", codec_id, res);
-
return 0;
}
@@ -429,49 +160,61 @@ int av_cold ff_amf_encode_close(AVCodecContext *avctx)
{
AmfContext *ctx = avctx->priv_data;
- if (ctx->delayed_surface) {
- ctx->delayed_surface->pVtbl->Release(ctx->delayed_surface);
- ctx->delayed_surface = NULL;
- }
-
if (ctx->encoder) {
ctx->encoder->pVtbl->Terminate(ctx->encoder);
ctx->encoder->pVtbl->Release(ctx->encoder);
ctx->encoder = NULL;
}
- if (ctx->context) {
- ctx->context->pVtbl->Terminate(ctx->context);
- ctx->context->pVtbl->Release(ctx->context);
- ctx->context = NULL;
- }
av_buffer_unref(&ctx->hw_device_ctx);
av_buffer_unref(&ctx->hw_frames_ctx);
+ av_buffer_unref(&ctx->amf_device_ctx_internal);
+ av_fifo_freep2(&ctx->timestamp_list);
+
+ return 0;
+}
- if (ctx->trace) {
- ctx->trace->pVtbl->UnregisterWriter(ctx->trace, FFMPEG_AMF_WRITER_ID);
+static int amf_init_encoder_context(AVCodecContext *avctx)
+{
+ AmfContext *ctx = avctx->priv_data;
+ AMFContext1 *context1 = NULL;
+ int ret;
+
+ if (avctx->hw_frames_ctx) {
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ ret = av_amf_context_derive((AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data, frames_ctx->device_ctx, NULL, 0);
+ if (ret < 0)
+ return ret;
+ ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
+ if (!ctx->hw_frames_ctx)
+ return AVERROR(ENOMEM);
}
- if (ctx->library) {
- dlclose(ctx->library);
- ctx->library = NULL;
+ else if (avctx->hw_device_ctx) {
+ AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+ ret = av_amf_context_derive((AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data, device_ctx, NULL, 0);
+ if (ret < 0)
+ return ret;
+ ctx->hw_device_ctx = av_buffer_ref(avctx->hw_device_ctx);
+ if (!ctx->hw_device_ctx)
+ return AVERROR(ENOMEM);
+
+ } else {
+ ret = av_amf_context_init((AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data, avctx);
+ if (ret != 0) {
+ return ret;
+ }
}
- ctx->trace = NULL;
- ctx->debug = NULL;
- ctx->factory = NULL;
- ctx->version = 0;
- ctx->delayed_drain = 0;
- av_frame_free(&ctx->delayed_frame);
- av_fifo_freep2(&ctx->timestamp_list);
- return 0;
+
+ return ret;
}
static int amf_copy_surface(AVCodecContext *avctx, const AVFrame *frame,
AMFSurface* surface)
{
AMFPlane *plane;
- uint8_t *dst_data[4];
- int dst_linesize[4];
+ uint8_t *dst_data[4] = {0};
+ int dst_linesize[4] = {0};
int planes;
int i;
@@ -555,13 +298,52 @@ int ff_amf_encode_init(AVCodecContext *avctx)
{
int ret;
- if ((ret = amf_load_library(avctx)) == 0) {
- if ((ret = amf_init_context(avctx)) == 0) {
- if ((ret = amf_init_encoder(avctx)) == 0) {
- return 0;
- }
+ AmfContext *ctx = avctx->priv_data;
+ AVHWDeviceContext *hwdev_ctx = NULL;
+ if (avctx->hw_device_ctx) {
+ hwdev_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+ } else if (avctx->hw_frames_ctx) {
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ hwdev_ctx = (AVHWDeviceContext*)frames_ctx->device_ctx;
+ }
+ if (av_amf_trace_writer.avctx == NULL)
+ av_amf_trace_writer.avctx = avctx;
+ // hardcoded to current HW queue size - will auto-realloc if too small
+ ctx->timestamp_list = av_fifo_alloc2(avctx->max_b_frames + 16, sizeof(int64_t),
+ AV_FIFO_FLAG_AUTO_GROW);
+ if (!ctx->timestamp_list) {
+ return AVERROR(ENOMEM);
+ }
+ ctx->dts_delay = 0;
+
+ ctx->hwsurfaces_in_queue = 0;
+ ctx->hwsurfaces_in_queue_max = 16;
+
+ if (avctx->hw_frames_ctx && hwdev_ctx && hwdev_ctx->type == AV_HWDEVICE_TYPE_AMF) {
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ AVAMFDeviceContext * amf_ctx = hwdev_ctx->hwctx;
+ ctx->amf_device_ctx_internal = av_buffer_ref(amf_ctx->internal);
+ }
+ else if (avctx->hw_device_ctx && hwdev_ctx && hwdev_ctx->type == AV_HWDEVICE_TYPE_AMF) {
+ AVAMFDeviceContext * amf_ctx = hwdev_ctx->hwctx;
+ ctx->amf_device_ctx_internal = av_buffer_ref(amf_ctx->internal);
+ } else {
+ AVAMFDeviceContextInternal *wrapped = av_mallocz(sizeof(*wrapped));
+ ctx->amf_device_ctx_internal = av_buffer_create((uint8_t *)wrapped, sizeof(*wrapped),
+ av_amf_context_internal_free, NULL, 0);
+ if ((ret = av_amf_context_internal_create((AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data, avctx, "", NULL, 0)) != 0) {
+ ff_amf_encode_close(avctx);
+ return ret;
}
+ if ((ret = amf_init_encoder_context(avctx)) != 0) {
+ ff_amf_encode_close(avctx);
+ return ret;
+ }
+ }
+ if ((ret = amf_init_encoder(avctx)) == 0) {
+ return 0;
}
+
ff_amf_encode_close(avctx);
return ret;
}
@@ -639,30 +421,28 @@ static void amf_release_buffer_with_frame_ref(AMFBuffer *frame_ref_storage_buffe
int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{
AmfContext *ctx = avctx->priv_data;
+ AVAMFDeviceContextInternal * internal = (AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data;
AMFSurface *surface;
AMF_RESULT res;
int ret;
AMF_RESULT res_query;
AMFData *data = NULL;
- AVFrame *frame = ctx->delayed_frame;
+ AVFrame *frame = av_frame_alloc();
int block_and_wait;
int query_output_data_flag = 0;
AMF_RESULT res_resubmit;
+ int count = 0;
if (!ctx->encoder)
return AVERROR(EINVAL);
- if (!frame->buf[0]) {
- ret = ff_encode_get_frame(avctx, frame);
- if (ret < 0 && ret != AVERROR_EOF)
- return ret;
- }
+ ret = ff_encode_get_frame(avctx, frame);
+ if (ret < 0 && ret != AVERROR_EOF)
+ return ret;
if (!frame->buf[0]) { // submit drain
if (!ctx->eof) { // submit drain one time only
- if (ctx->delayed_surface != NULL) {
- ctx->delayed_drain = 1; // input queue is full: resubmit Drain() in ff_amf_receive_packet
- } else if(!ctx->delayed_drain) {
+ if(!ctx->delayed_drain) {
res = ctx->encoder->pVtbl->Drain(ctx->encoder);
if (res == AMF_INPUT_FULL) {
ctx->delayed_drain = 1; // input queue is full: resubmit Drain() in ff_amf_receive_packet
@@ -674,7 +454,7 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
}
}
}
- } else if (!ctx->delayed_surface) { // submit frame
+ } else { // submit frame
int hw_surface = 0;
// prepare surface from frame
@@ -691,7 +471,7 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
texture->lpVtbl->SetPrivateData(texture, &AMFTextureArrayIndexGUID, sizeof(index), &index);
- res = ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx->context, texture, &surface, NULL); // wrap to AMF surface
+ res = internal->context->pVtbl->CreateSurfaceFromDX11Native(internal->context, texture, &surface, NULL); // wrap to AMF surface
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "CreateSurfaceFromDX11Native() failed with error %d\n", res);
hw_surface = 1;
@@ -703,16 +483,23 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{
IDirect3DSurface9 *texture = (IDirect3DSurface9 *)frame->data[3]; // actual texture
- res = ctx->context->pVtbl->CreateSurfaceFromDX9Native(ctx->context, texture, &surface, NULL); // wrap to AMF surface
+ res = internal->context->pVtbl->CreateSurfaceFromDX9Native(internal->context, texture, &surface, NULL); // wrap to AMF surface
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "CreateSurfaceFromDX9Native() failed with error %d\n", res);
hw_surface = 1;
}
break;
#endif
+ case AV_PIX_FMT_AMF:
+ {
+ surface = (AMFSurface*)frame->data[3];
+ surface->pVtbl->Acquire(surface);
+ hw_surface = 1;
+ }
+ break;
default:
{
- res = ctx->context->pVtbl->AllocSurface(ctx->context, AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);
+ res = internal->context->pVtbl->AllocSurface(internal->context, AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "AllocSurface() failed with error %d\n", res);
amf_copy_surface(avctx, frame, surface);
}
@@ -725,7 +512,7 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
// input HW surfaces can be vertically aligned by 16; tell AMF the real size
surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame->height);
- frame_ref_storage_buffer = amf_create_buffer_with_frame_ref(frame, ctx->context);
+ frame_ref_storage_buffer = amf_create_buffer_with_frame_ref(frame, internal->context);
AMF_RETURN_IF_FALSE(ctx, frame_ref_storage_buffer != NULL, AVERROR(ENOMEM), "create_buffer_with_frame_ref() returned NULL\n");
res = amf_set_property_buffer(surface, L"av_frame_ref", frame_ref_storage_buffer);
@@ -737,7 +524,7 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
// HDR10 metadata
if (frame->color_trc == AVCOL_TRC_SMPTE2084) {
AMFBuffer * hdrmeta_buffer = NULL;
- res = ctx->context->pVtbl->AllocBuffer(ctx->context, AMF_MEMORY_HOST, sizeof(AMFHDRMetadata), &hdrmeta_buffer);
+ res = internal->context->pVtbl->AllocBuffer(internal->context, AMF_MEMORY_HOST, sizeof(AMFHDRMetadata), &hdrmeta_buffer);
if (res == AMF_OK) {
AMFHDRMetadata * hdrmeta = (AMFHDRMetadata*)hdrmeta_buffer->pVtbl->GetNative(hdrmeta_buffer);
if (amf_save_hdr_metadata(avctx, frame, hdrmeta) == 0) {
@@ -772,13 +559,11 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
// submit surface
res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface);
if (res == AMF_INPUT_FULL) { // handle full queue
- //store surface for later submission
- ctx->delayed_surface = surface;
+ av_usleep(1000); // wait and poll again
} else {
int64_t pts = frame->pts;
surface->pVtbl->Release(surface);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "SubmitInput() failed with error %d\n", res);
-
av_frame_unref(frame);
ret = av_fifo_write(ctx->timestamp_list, &pts, 1);
if (ret < 0)
@@ -790,75 +575,40 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
do {
block_and_wait = 0;
// poll data
- if (!avpkt->data && !avpkt->buf) {
- res_query = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);
- if (data) {
- // copy data to packet
- AMFBuffer *buffer;
- AMFGuid guid = IID_AMFBuffer();
- query_output_data_flag = 1;
- data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); // query for buffer interface
- ret = amf_copy_buffer(avctx, avpkt, buffer);
-
- buffer->pVtbl->Release(buffer);
-
- if (data->pVtbl->HasProperty(data, L"av_frame_ref")) {
- AMFBuffer* frame_ref_storage_buffer;
- res = amf_get_property_buffer(data, L"av_frame_ref", &frame_ref_storage_buffer);
- AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetProperty failed for \"av_frame_ref\" with error %d\n", res);
- amf_release_buffer_with_frame_ref(frame_ref_storage_buffer);
- ctx->hwsurfaces_in_queue--;
- }
+ res_query = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);
+ if (data) {
+ // copy data to packet
+ AMFBuffer* buffer;
+ AMFGuid guid = IID_AMFBuffer();
+ data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); // query for buffer interface
+ ret = amf_copy_buffer(avctx, avpkt, buffer);
+
+ buffer->pVtbl->Release(buffer);
+
+ if (data->pVtbl->HasProperty(data, L"av_frame_ref")) {
+ AMFBuffer *frame_ref_storage_buffer;
+ res = amf_get_property_buffer(data, L"av_frame_ref", &frame_ref_storage_buffer);
+ AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetProperty failed for \"av_frame_ref\" with error %d\n", res);
+ amf_release_buffer_with_frame_ref(frame_ref_storage_buffer);
+ ctx->hwsurfaces_in_queue--;
+ }
+ data->pVtbl->Release(data);
- data->pVtbl->Release(data);
+ AMF_RETURN_IF_FALSE(ctx, ret >= 0, ret, "amf_copy_buffer() failed with error %d\n", ret);
- AMF_RETURN_IF_FALSE(ctx, ret >= 0, ret, "amf_copy_buffer() failed with error %d\n", ret);
- }
- }
- res_resubmit = AMF_OK;
- if (ctx->delayed_surface != NULL) { // try to resubmit frame
- if (ctx->delayed_surface->pVtbl->HasProperty(ctx->delayed_surface, L"av_frame_hdrmeta")) {
- AMFBuffer * hdrmeta_buffer = NULL;
- res = amf_get_property_buffer((AMFData *)ctx->delayed_surface, L"av_frame_hdrmeta", &hdrmeta_buffer);
- AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "GetProperty failed for \"av_frame_hdrmeta\" with error %d\n", res);
- switch (avctx->codec->id) {
- case AV_CODEC_ID_H264:
- AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break;
- case AV_CODEC_ID_HEVC:
- AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break;
- case AV_CODEC_ID_AV1:
- AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_INPUT_HDR_METADATA, hdrmeta_buffer); break;
+ if (ctx->delayed_drain) { // try to resubmit drain
+ res = ctx->encoder->pVtbl->Drain(ctx->encoder);
+ if (res != AMF_INPUT_FULL) {
+ ctx->delayed_drain = 0;
+ ctx->eof = 1; // drain started
+ AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "Repeated Drain() failed with error %d\n", res);
+ } else {
+ av_log(avctx, AV_LOG_WARNING, "Data acquired but delayed drain submission got AMF_INPUT_FULL- should not happen\n");
}
- hdrmeta_buffer->pVtbl->Release(hdrmeta_buffer);
- }
- res_resubmit = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)ctx->delayed_surface);
- if (res_resubmit != AMF_INPUT_FULL) {
- int64_t pts = ctx->delayed_surface->pVtbl->GetPts(ctx->delayed_surface);
- ctx->delayed_surface->pVtbl->Release(ctx->delayed_surface);
- ctx->delayed_surface = NULL;
- av_frame_unref(ctx->delayed_frame);
- AMF_RETURN_IF_FALSE(ctx, res_resubmit == AMF_OK, AVERROR_UNKNOWN, "Repeated SubmitInput() failed with error %d\n", res_resubmit);
-
- ret = av_fifo_write(ctx->timestamp_list, &pts, 1);
- if (ret < 0)
- return ret;
- }
- } else if (ctx->delayed_drain) { // try to resubmit drain
- res = ctx->encoder->pVtbl->Drain(ctx->encoder);
- if (res != AMF_INPUT_FULL) {
- ctx->delayed_drain = 0;
- ctx->eof = 1; // drain started
- AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "Repeated Drain() failed with error %d\n", res);
- } else {
- av_log(avctx, AV_LOG_WARNING, "Data acquired but delayed drain submission got AMF_INPUT_FULL- should not happen\n");
- }
- }
-
- if (query_output_data_flag == 0) {
- if (res_resubmit == AMF_INPUT_FULL || ctx->delayed_drain || (ctx->eof && res_query != AMF_EOF) || (ctx->hwsurfaces_in_queue >= ctx->hwsurfaces_in_queue_max)) {
- block_and_wait = 1;
- av_usleep(1000);
}
+ } else if (ctx->delayed_drain || (ctx->eof && res_query != AMF_EOF) || (ctx->hwsurfaces_in_queue >= ctx->hwsurfaces_in_queue_max)) {
+ block_and_wait = 1;
+ av_usleep(1000); // wait and poll again
}
} while (block_and_wait);
@@ -916,5 +666,7 @@ const AVCodecHWConfigInternal *const ff_amfenc_hw_configs[] = {
HW_CONFIG_ENCODER_FRAMES(DXVA2_VLD, DXVA2),
HW_CONFIG_ENCODER_DEVICE(NONE, DXVA2),
#endif
+ HW_CONFIG_ENCODER_FRAMES(AMF, AMF),
+ HW_CONFIG_ENCODER_DEVICE(NONE, AMF),
NULL,
};
diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h
index 1bda0136bd..1f680beb83 100644
--- a/libavcodec/amfenc.h
+++ b/libavcodec/amfenc.h
@@ -25,7 +25,7 @@
#include <AMF/components/VideoEncoderVCE.h>
#include <AMF/components/VideoEncoderHEVC.h>
#include <AMF/components/VideoEncoderAV1.h>
-
+#include "libavutil/hwcontext_amf.h"
#include "libavutil/fifo.h"
#include "avcodec.h"
@@ -33,16 +33,6 @@
#define MAX_LOOKAHEAD_DEPTH 41
-/**
-* AMF trace writer callback class
-* Used to capture all AMF logging
-*/
-
-typedef struct AmfTraceWriter {
- AMFTraceWriterVtbl *vtbl;
- AVCodecContext *avctx;
-} AmfTraceWriter;
-
/**
* AMF encoder context
*/
@@ -50,14 +40,7 @@ typedef struct AmfTraceWriter {
typedef struct AmfContext {
AVClass *avclass;
// access to AMF runtime
- amf_handle library; ///< handle to DLL library
- AMFFactory *factory; ///< pointer to AMF factory
- AMFDebug *debug; ///< pointer to AMF debug interface
- AMFTrace *trace; ///< pointer to AMF trace interface
-
- amf_uint64 version; ///< version of AMF runtime
- AmfTraceWriter tracer; ///< AMF writer registered with AMF
- AMFContext *context; ///< AMF context
+ AVBufferRef *amf_device_ctx_internal;
//encoder
AMFComponent *encoder; ///< AMF encoder object
amf_bool eof; ///< flag indicating EOF happened
@@ -71,8 +54,6 @@ typedef struct AmfContext {
// helpers to handle async calls
int delayed_drain;
- AMFSurface *delayed_surface;
- AVFrame *delayed_frame;
// shift dts back by max_b_frames in timing
AVFifo *timestamp_list;
--
2.38.1.windows.1
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* [FFmpeg-devel] [PATCH 8/9] avfilter/scale_amf: Add AMF HW scaler & color converter
2024-02-14 1:55 [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf Dmitrii Ovchinnikov
` (5 preceding siblings ...)
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 7/9] libavcodec/amfenc: redesign to use hwcontext_amf Dmitrii Ovchinnikov
@ 2024-02-14 1:55 ` Dmitrii Ovchinnikov
2024-02-14 15:08 ` Timo Rothenpieler
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 9/9] doc/filters: Add documentation for AMF filters Dmitrii Ovchinnikov
` (2 subsequent siblings)
9 siblings, 1 reply; 18+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-02-14 1:55 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: Evgeny Pavlov
From: Evgeny Pavlov <lucenticus@gmail.com>
This commit adds two AMF filters: scale_amf & scale_amf_hq.
Both filters are using AMF hardware acceleration.
scale_amf supports simple scaling algorithms & color conversion.
scale_amf_hq supports advanced scaling algorithms & might be used
for upscaling only.
Signed-off-by: Evgeny Pavlov <lucenticus@gmail.com>
---
configure | 1 +
libavfilter/Makefile | 2 +
libavfilter/allfilters.c | 2 +
libavfilter/vf_scale_amf.c | 266 +++++++++++++++
libavfilter/vf_scale_amf_common.c | 515 ++++++++++++++++++++++++++++++
libavfilter/vf_scale_amf_common.h | 71 ++++
libavfilter/vf_scale_amf_hq.c | 191 +++++++++++
7 files changed, 1048 insertions(+)
create mode 100644 libavfilter/vf_scale_amf.c
create mode 100644 libavfilter/vf_scale_amf_common.c
create mode 100644 libavfilter/vf_scale_amf_common.h
create mode 100644 libavfilter/vf_scale_amf_hq.c
diff --git a/configure b/configure
index f72533b7d2..3d1c44d7ae 100755
--- a/configure
+++ b/configure
@@ -3826,6 +3826,7 @@ rubberband_filter_deps="librubberband"
sab_filter_deps="gpl swscale"
scale2ref_filter_deps="swscale"
scale_filter_deps="swscale"
+scale_amf_filter_deps="amf"
scale_qsv_filter_deps="libmfx"
scale_qsv_filter_select="qsvvpp"
scdet_filter_select="scene_sad"
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index f6c1d641d6..a87e519b85 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -456,6 +456,8 @@ OBJS-$(CONFIG_ROBERTS_OPENCL_FILTER) += vf_convolution_opencl.o opencl.o
OBJS-$(CONFIG_ROTATE_FILTER) += vf_rotate.o
OBJS-$(CONFIG_SAB_FILTER) += vf_sab.o
OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o scale_eval.o
+OBJS-$(CONFIG_SCALE_AMF_FILTER) += vf_scale_amf.o scale_eval.o vf_scale_amf_common.o
+OBJS-$(CONFIG_SCALE_AMF_FILTER) += vf_scale_amf_hq.o scale_eval.o vf_scale_amf_common.o
OBJS-$(CONFIG_SCALE_CUDA_FILTER) += vf_scale_cuda.o scale_eval.o \
vf_scale_cuda.ptx.o cuda/load_helper.o
OBJS-$(CONFIG_SCALE_NPP_FILTER) += vf_scale_npp.o scale_eval.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 149bf50997..299cfb148b 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -430,6 +430,8 @@ extern const AVFilter ff_vf_roberts_opencl;
extern const AVFilter ff_vf_rotate;
extern const AVFilter ff_vf_sab;
extern const AVFilter ff_vf_scale;
+extern const AVFilter ff_vf_scale_amf;
+extern const AVFilter ff_vf_scale_amf_hq;
extern const AVFilter ff_vf_scale_cuda;
extern const AVFilter ff_vf_scale_npp;
extern const AVFilter ff_vf_scale_qsv;
diff --git a/libavfilter/vf_scale_amf.c b/libavfilter/vf_scale_amf.c
new file mode 100644
index 0000000000..6d2c211d83
--- /dev/null
+++ b/libavfilter/vf_scale_amf.c
@@ -0,0 +1,266 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * scale video filter - AMF
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/time.h"
+
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_amf.h"
+
+#include "AMF/components/VideoConverter.h"
+#include "vf_scale_amf_common.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "video.h"
+#include "scale_eval.h"
+#include "internal.h"
+
+#if CONFIG_DXVA2
+#include <d3d9.h>
+#endif
+
+#if CONFIG_D3D11VA
+#include <d3d11.h>
+#endif
+
+static int amf_scale_query_formats(AVFilterContext *avctx)
+{
+ const enum AVPixelFormat *output_pix_fmts;
+ static const enum AVPixelFormat input_pix_fmts[] = {
+ AV_PIX_FMT_NV12,
+ AV_PIX_FMT_P010,
+ AV_PIX_FMT_0RGB,
+ AV_PIX_FMT_BGR0,
+ AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_RGB0,
+ AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_GRAY8,
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_YUV420P10,
+ AV_PIX_FMT_YUYV422,
+ AV_PIX_FMT_AMF,
+ AV_PIX_FMT_NONE,
+ };
+ static const enum AVPixelFormat output_pix_fmts_default[] = {
+ AV_PIX_FMT_AMF,
+ AV_PIX_FMT_D3D11,
+ AV_PIX_FMT_DXVA2_VLD,
+ AV_PIX_FMT_NV12,
+ AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_YUV420P,
+ AV_PIX_FMT_NONE,
+ };
+ output_pix_fmts = output_pix_fmts_default;
+
+ return amf_setup_input_output_formats(avctx, input_pix_fmts, output_pix_fmts);
+}
+
+static int amf_scale_config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *avctx = outlink->src;
+ AVFilterLink *inlink = avctx->inputs[0];
+ AMFScaleContext *ctx = avctx->priv;
+ AVAMFDeviceContextInternal * internal = NULL;
+ AVHWFramesContext *hwframes_out = NULL;
+ AMFSize out_size;
+ int err;
+ AMF_RESULT res;
+ enum AMF_VIDEO_CONVERTER_COLOR_PROFILE_ENUM amf_color_profile;
+ enum AVPixelFormat in_format;
+
+ err = amf_init_scale_config(outlink, &in_format);
+ if (err < 0)
+ return err;
+ // FIXME: add checks whether we have HW context
+ hwframes_out = (AVHWFramesContext*)ctx->hwframes_out_ref->data;
+
+ internal = (AVAMFDeviceContextInternal * )ctx->amf_device_ctx_internal->data;
+ res = internal->factory->pVtbl->CreateComponent(internal->factory, internal->context, AMFVideoConverter, &ctx->scaler);
+ AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_FILTER_NOT_FOUND, "CreateComponent(%ls) failed with error %d\n", AMFVideoConverter, res);
+ // FIXME: add checks whether we have HW context
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->scaler, AMF_VIDEO_CONVERTER_OUTPUT_FORMAT, (amf_int32)av_amf_av_to_amf_format(hwframes_out->sw_format));
+ AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "AMFConverter-SetProperty() failed with error %d\n", res);
+
+ out_size.width = outlink->w;
+ out_size.height = outlink->h;
+ AMF_ASSIGN_PROPERTY_SIZE(res, ctx->scaler, AMF_VIDEO_CONVERTER_OUTPUT_SIZE, out_size);
+ AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "AMFConverter-SetProperty() failed with error %d\n", res);
+
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->scaler, AMF_VIDEO_CONVERTER_SCALE, (amf_int32)ctx->scale_type);
+ AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "AMFConverter-SetProperty() failed with error %d\n", res);
+
+ amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN;
+
+ switch(ctx->color_profile) {
+ case AMF_VIDEO_CONVERTER_COLOR_PROFILE_601:
+ if (ctx->color_range == AMF_COLOR_RANGE_FULL) {
+ amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_601;
+ } else {
+ amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_601;
+ }
+ break;
+ case AMF_VIDEO_CONVERTER_COLOR_PROFILE_709:
+ if (ctx->color_range == AMF_COLOR_RANGE_FULL) {
+ amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_709;
+ } else {
+ amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_709;
+ }
+ break;
+ case AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020:
+ if (ctx->color_range == AMF_COLOR_RANGE_FULL) {
+ amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_2020;
+ } else {
+ amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020;
+ }
+ break;
+ default:
+ amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN;
+ break;
+ }
+
+ if (amf_color_profile != AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN) {
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->scaler, AMF_VIDEO_CONVERTER_COLOR_PROFILE, amf_color_profile);
+ }
+
+ if (ctx->color_range != AMF_COLOR_RANGE_UNDEFINED) {
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->scaler, AMF_VIDEO_CONVERTER_OUTPUT_COLOR_RANGE, ctx->color_range);
+ }
+
+ if (ctx->primaries != AMF_COLOR_PRIMARIES_UNDEFINED) {
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->scaler, AMF_VIDEO_CONVERTER_OUTPUT_COLOR_PRIMARIES, ctx->primaries);
+ }
+
+ if (ctx->trc != AMF_COLOR_TRANSFER_CHARACTERISTIC_UNDEFINED) {
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->scaler, AMF_VIDEO_CONVERTER_OUTPUT_TRANSFER_CHARACTERISTIC, ctx->trc);
+ }
+
+ res = ctx->scaler->pVtbl->Init(ctx->scaler, av_amf_av_to_amf_format(in_format), inlink->w, inlink->h);
+ AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "AMFConverter-Init() failed with error %d\n", res);
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(AMFScaleContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption scale_amf_options[] = {
+ { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS },
+ { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS },
+ { "format", "Output pixel format", OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
+
+ { "scale_type", "Scale type", OFFSET(scale_type), AV_OPT_TYPE_INT, { .i64 = AMF_VIDEO_CONVERTER_SCALE_BILINEAR }, AMF_VIDEO_CONVERTER_SCALE_BILINEAR, AMF_VIDEO_CONVERTER_SCALE_BICUBIC, FLAGS, "scale_type" },
+ { "bilinear", "Bilinear", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_CONVERTER_SCALE_BILINEAR }, 0, 0, FLAGS, "scale_type" },
+ { "bicubic", "Bicubic", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_CONVERTER_SCALE_BICUBIC }, 0, 0, FLAGS, "scale_type" },
+
+ { "color_profile", "Color profile", OFFSET(color_profile), AV_OPT_TYPE_INT, { .i64 = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN }, AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN, AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_2020, FLAGS, "color_profile" },
+ { "bt601", "BT.601", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_CONVERTER_COLOR_PROFILE_601 }, 0, 0, FLAGS, "color_profile" },
+ { "bt709", "BT.709", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_CONVERTER_COLOR_PROFILE_709 }, 0, 0, FLAGS, "color_profile" },
+ { "bt2020", "BT.2020", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020 }, 0, 0, FLAGS, "color_profile" },
+
+ { "color_range", "Color range", OFFSET(color_range), AV_OPT_TYPE_INT, { .i64 = AMF_COLOR_RANGE_UNDEFINED }, AMF_COLOR_RANGE_UNDEFINED, AMF_COLOR_RANGE_FULL, FLAGS, "color_range" },
+ { "studio", "Studio", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_RANGE_STUDIO }, 0, 0, FLAGS, "color_range" },
+ { "full", "Full", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_RANGE_FULL }, 0, 0, FLAGS, "color_range" },
+
+ { "primaries", "Output color primaries", OFFSET(primaries), AV_OPT_TYPE_INT, { .i64 = AMF_COLOR_PRIMARIES_UNDEFINED }, AMF_COLOR_PRIMARIES_UNDEFINED, AMF_COLOR_PRIMARIES_JEDEC_P22, FLAGS, "primaries" },
+ { "bt709", "BT.709", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_BT709 }, 0, 0, FLAGS, "primaries" },
+ { "bt470m", "BT.470M", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_BT470M }, 0, 0, FLAGS, "primaries" },
+ { "bt470bg", "BT.470BG", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_BT470BG }, 0, 0, FLAGS, "primaries" },
+ { "smpte170m", "SMPTE170M", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_SMPTE170M }, 0, 0, FLAGS, "primaries" },
+ { "smpte240m", "SMPTE240M", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_SMPTE240M }, 0, 0, FLAGS, "primaries" },
+ { "film", "FILM", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_FILM }, 0, 0, FLAGS, "primaries" },
+ { "bt2020", "BT2020", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_BT2020 }, 0, 0, FLAGS, "primaries" },
+ { "smpte428", "SMPTE428", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_SMPTE428 }, 0, 0, FLAGS, "primaries" },
+ { "smpte431", "SMPTE431", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_SMPTE431 }, 0, 0, FLAGS, "primaries" },
+ { "smpte432", "SMPTE432", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_SMPTE432 }, 0, 0, FLAGS, "primaries" },
+ { "jedec-p22", "JEDEC_P22", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_JEDEC_P22 }, 0, 0, FLAGS, "primaries" },
+
+ { "trc", "Output transfer characteristics", OFFSET(trc), AV_OPT_TYPE_INT, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_UNDEFINED }, AMF_COLOR_TRANSFER_CHARACTERISTIC_UNDEFINED, AMF_COLOR_TRANSFER_CHARACTERISTIC_ARIB_STD_B67, FLAGS, "trc" },
+ { "bt709", "BT.709", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_BT709 }, 0, 0, FLAGS, "trc" },
+ { "gamma22", "GAMMA22", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_GAMMA22 }, 0, 0, FLAGS, "trc" },
+ { "gamma28", "GAMMA28", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_GAMMA28 }, 0, 0, FLAGS, "trc" },
+ { "smpte170m", "SMPTE170M", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_SMPTE170M }, 0, 0, FLAGS, "trc" },
+ { "smpte240m", "SMPTE240M", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_SMPTE240M }, 0, 0, FLAGS, "trc" },
+ { "linear", "Linear", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_LINEAR }, 0, 0, FLAGS, "trc" },
+ { "log", "LOG", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_LOG }, 0, 0, FLAGS, "trc" },
+ { "log-sqrt", "LOG_SQRT", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_LOG_SQRT }, 0, 0, FLAGS, "trc" },
+ { "iec61966-2-4", "IEC61966_2_4", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_IEC61966_2_4 }, 0, 0, FLAGS, "trc" },
+ { "bt1361-ecg", "BT1361_ECG", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_BT1361_ECG }, 0, 0, FLAGS, "trc" },
+ { "iec61966-2-1", "IEC61966_2_1", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_IEC61966_2_1 }, 0, 0, FLAGS, "trc" },
+ { "bt2020-10", "BT.2020_10", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_BT2020_10 }, 0, 0, FLAGS, "trc" },
+ { "bt2020-12", "BT.2020-12", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_BT2020_12 }, 0, 0, FLAGS, "trc" },
+ { "smpte2084", "SMPTE2084", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_SMPTE2084 }, 0, 0, FLAGS, "trc" },
+ { "smpte428", "SMPTE428", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_SMPTE428 }, 0, 0, FLAGS, "trc" },
+ { "arib-std-b67", "ARIB_STD_B67", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_ARIB_STD_B67 }, 0, 0, FLAGS, "trc" },
+
+ { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
+ { "disable", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
+ { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
+ { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
+ { "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1}, 1, 256, FLAGS },
+
+ { NULL },
+};
+
+
+AVFILTER_DEFINE_CLASS(scale_amf);
+
+static const AVFilterPad amf_scale_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = amf_scale_filter_frame,
+ }
+};
+
+static const AVFilterPad amf_scale_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = amf_scale_config_output,
+ }
+};
+
+AVFilter ff_vf_scale_amf = {
+ .name = "scale_amf",
+ .description = NULL_IF_CONFIG_SMALL("AMF video scaling and format conversion"),
+
+ .init = amf_scale_init,
+ .uninit = amf_scale_uninit,
+ FILTER_QUERY_FUNC(&amf_scale_query_formats),
+
+ .priv_size = sizeof(AMFScaleContext),
+ .priv_class = &scale_amf_class,
+
+ FILTER_INPUTS(amf_scale_inputs),
+ FILTER_OUTPUTS(amf_scale_outputs),
+ FILTER_SINGLE_PIXFMT(AV_PIX_FMT_AMF),
+
+ .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+ .flags = AVFILTER_FLAG_HWDEVICE,
+};
diff --git a/libavfilter/vf_scale_amf_common.c b/libavfilter/vf_scale_amf_common.c
new file mode 100644
index 0000000000..5c26ebd2c9
--- /dev/null
+++ b/libavfilter/vf_scale_amf_common.c
@@ -0,0 +1,515 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "vf_scale_amf_common.h"
+
+#include "libavutil/avassert.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "libavutil/imgutils.h"
+
+#include "libavutil/hwcontext_amf.h"
+#include "AMF/components/ColorSpace.h"
+#include "scale_eval.h"
+
+#if CONFIG_DXVA2
+#include <d3d9.h>
+#endif
+
+#if CONFIG_D3D11VA
+#include <d3d11.h>
+#endif
+
+int amf_scale_init(AVFilterContext *avctx)
+{
+ AMFScaleContext *ctx = avctx->priv;
+
+ if (!strcmp(ctx->format_str, "same")) {
+ ctx->format = AV_PIX_FMT_NONE;
+ } else {
+ ctx->format = av_get_pix_fmt(ctx->format_str);
+ if (ctx->format == AV_PIX_FMT_NONE) {
+ av_log(avctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", ctx->format_str);
+ return AVERROR(EINVAL);
+ }
+ }
+
+ return 0;
+}
+
+void amf_scale_uninit(AVFilterContext *avctx)
+{
+ AMFScaleContext *ctx = avctx->priv;
+
+ if (ctx->scaler) {
+ ctx->scaler->pVtbl->Terminate(ctx->scaler);
+ ctx->scaler->pVtbl->Release(ctx->scaler);
+ ctx->scaler = NULL;
+ }
+
+ av_buffer_unref(&ctx->amf_device_ref);
+ av_buffer_unref(&ctx->hwdevice_ref);
+ av_buffer_unref(&ctx->hwframes_in_ref);
+ av_buffer_unref(&ctx->hwframes_out_ref);
+}
+
+int amf_scale_filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+ AVFilterContext *avctx = inlink->dst;
+ AMFScaleContext *ctx = avctx->priv;
+ AVFilterLink *outlink = avctx->outputs[0];
+ AMF_RESULT res;
+ AMFSurface *surface_in;
+ AMFSurface *surface_out;
+ AMFData *data_out = NULL;
+ enum AVColorSpace out_colorspace;
+ enum AVColorRange out_color_range;
+
+ AVFrame *out = NULL;
+ int ret = 0;
+
+ if (!ctx->scaler)
+ return AVERROR(EINVAL);
+
+ ret = amf_avframe_to_amfsurface(avctx, in, &surface_in);
+ if (ret < 0)
+ goto fail;
+
+ res = ctx->scaler->pVtbl->SubmitInput(ctx->scaler, (AMFData*)surface_in);
+ AMF_GOTO_FAIL_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "SubmitInput() failed with error %d\n", res);
+ res = ctx->scaler->pVtbl->QueryOutput(ctx->scaler, &data_out);
+ AMF_GOTO_FAIL_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "QueryOutput() failed with error %d\n", res);
+
+ if (data_out) {
+ AMFGuid guid = IID_AMFSurface();
+ data_out->pVtbl->QueryInterface(data_out, &guid, (void**)&surface_out); // query for buffer interface
+ data_out->pVtbl->Release(data_out);
+ }
+
+ out = amf_amfsurface_to_avframe(avctx, surface_out);
+
+ ret = av_frame_copy_props(out, in);
+ av_frame_unref(in);
+
+ out_colorspace = AVCOL_SPC_UNSPECIFIED;
+
+ if (ctx->color_profile != AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN) {
+ switch(ctx->color_profile) {
+ case AMF_VIDEO_CONVERTER_COLOR_PROFILE_601:
+ out_colorspace = AVCOL_SPC_SMPTE170M;
+ break;
+ case AMF_VIDEO_CONVERTER_COLOR_PROFILE_709:
+ out_colorspace = AVCOL_SPC_BT709;
+ break;
+ case AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020:
+ out_colorspace = AVCOL_SPC_BT2020_NCL;
+ break;
+ case AMF_VIDEO_CONVERTER_COLOR_PROFILE_JPEG:
+ out_colorspace = AVCOL_SPC_RGB;
+ break;
+ default:
+ out_colorspace = AVCOL_SPC_UNSPECIFIED;
+ break;
+ }
+ out->colorspace = out_colorspace;
+ }
+
+ out_color_range = AVCOL_RANGE_UNSPECIFIED;
+ if (ctx->color_range == AMF_COLOR_RANGE_FULL)
+ out_color_range = AVCOL_RANGE_JPEG;
+ else if (ctx->color_range == AMF_COLOR_RANGE_STUDIO)
+ out_color_range = AVCOL_RANGE_MPEG;
+
+ if (ctx->color_range != AMF_COLOR_RANGE_UNDEFINED)
+ out->color_range = out_color_range;
+
+ if (ctx->primaries != AMF_COLOR_PRIMARIES_UNDEFINED)
+ out->color_primaries = ctx->primaries;
+
+ if (ctx->trc != AMF_COLOR_TRANSFER_CHARACTERISTIC_UNDEFINED)
+ out->color_trc = ctx->trc;
+
+
+ if (ret < 0)
+ goto fail;
+
+ out->format = outlink->format;
+ out->width = outlink->w;
+ out->height = outlink->h;
+
+ out->hw_frames_ctx = av_buffer_ref(ctx->hwframes_out_ref);
+ if (!out->hw_frames_ctx) {
+ ret = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ if (inlink->sample_aspect_ratio.num) {
+ outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink->w, outlink->w * inlink->h}, inlink->sample_aspect_ratio);
+ } else
+ outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+
+ av_frame_free(&in);
+ return ff_filter_frame(outlink, out);
+fail:
+ av_frame_free(&in);
+ av_frame_free(&out);
+ return ret;
+}
+
+
+
+int amf_setup_input_output_formats(AVFilterContext *avctx,
+ const enum AVPixelFormat *input_pix_fmts,
+ const enum AVPixelFormat *output_pix_fmts)
+{
+ int err;
+ int i;
+ AVFilterFormats *input_formats;
+
+ //in case if hw_device_ctx is set to DXVA2 we change order of pixel formats to set DXVA2 be choosen by default
+ //The order is ignored if hw_frames_ctx is not NULL on the config_output stage
+ if (avctx->hw_device_ctx) {
+ AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+
+ switch (device_ctx->type) {
+ #if CONFIG_D3D11VA
+ case AV_HWDEVICE_TYPE_D3D11VA:
+ {
+ static const enum AVPixelFormat output_pix_fmts_d3d11[] = {
+ AV_PIX_FMT_D3D11,
+ AV_PIX_FMT_NONE,
+ };
+ output_pix_fmts = output_pix_fmts_d3d11;
+ }
+ break;
+ #endif
+ #if CONFIG_DXVA2
+ case AV_HWDEVICE_TYPE_DXVA2:
+ {
+ static const enum AVPixelFormat output_pix_fmts_dxva2[] = {
+ AV_PIX_FMT_DXVA2_VLD,
+ AV_PIX_FMT_NONE,
+ };
+ output_pix_fmts = output_pix_fmts_dxva2;
+ }
+ break;
+ #endif
+ default:
+ {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported device : %s\n", av_hwdevice_get_type_name(device_ctx->type));
+ return AVERROR(EINVAL);
+ }
+ break;
+ }
+ }
+
+ input_formats = ff_make_format_list(output_pix_fmts);
+ if (!input_formats) {
+ return AVERROR(ENOMEM);
+ }
+
+ for (i = 0; input_pix_fmts[i] != AV_PIX_FMT_NONE; i++) {
+ err = ff_add_format(&input_formats, input_pix_fmts[i]);
+ if (err < 0)
+ return err;
+ }
+
+ if ((err = ff_formats_ref(input_formats, &avctx->inputs[0]->outcfg.formats)) < 0 ||
+ (err = ff_formats_ref(ff_make_format_list(output_pix_fmts),
+ &avctx->outputs[0]->incfg.formats)) < 0)
+ return err;
+ return 0;
+}
+
+int amf_copy_surface(AVFilterContext *avctx, const AVFrame *frame,
+ AMFSurface* surface)
+{
+ AMFPlane *plane;
+ uint8_t *dst_data[4];
+ int dst_linesize[4];
+ int planes;
+ int i;
+
+ planes = surface->pVtbl->GetPlanesCount(surface);
+ av_assert0(planes < FF_ARRAY_ELEMS(dst_data));
+
+ for (i = 0; i < planes; i++) {
+ plane = surface->pVtbl->GetPlaneAt(surface, i);
+ dst_data[i] = plane->pVtbl->GetNative(plane);
+ dst_linesize[i] = plane->pVtbl->GetHPitch(plane);
+ }
+ av_image_copy(dst_data, dst_linesize,
+ (const uint8_t**)frame->data, frame->linesize, frame->format,
+ frame->width, frame->height);
+
+ return 0;
+}
+
+int amf_init_scale_config(AVFilterLink *outlink, enum AVPixelFormat *in_format)
+{
+ AVFilterContext *avctx = outlink->src;
+ AVFilterLink *inlink = avctx->inputs[0];
+ AMFScaleContext *ctx = avctx->priv;
+ AVHWFramesContext *hwframes_out;
+ int err;
+ AMF_RESULT res;
+
+ if ((err = ff_scale_eval_dimensions(avctx,
+ ctx->w_expr, ctx->h_expr,
+ inlink, outlink,
+ &ctx->width, &ctx->height)) < 0)
+ return err;
+
+ ff_scale_adjust_dimensions(inlink, &ctx->width, &ctx->height,
+ ctx->force_original_aspect_ratio, ctx->force_divisible_by);
+
+ av_buffer_unref(&ctx->amf_device_ref);
+ av_buffer_unref(&ctx->hwframes_in_ref);
+ av_buffer_unref(&ctx->hwframes_out_ref);
+
+ if (inlink->hw_frames_ctx) {
+ AVHWFramesContext *frames_ctx = (AVHWFramesContext*)inlink->hw_frames_ctx->data;
+ if (frames_ctx->device_ctx->type == AV_HWDEVICE_TYPE_AMF) {
+ AVAMFDeviceContext * amf_ctx = frames_ctx->device_ctx->hwctx;
+ ctx->amf_device_ctx_internal = av_buffer_ref(amf_ctx->internal);
+ }
+ if (av_amf_av_to_amf_format(frames_ctx->sw_format) == AMF_SURFACE_UNKNOWN) {
+ av_log(avctx, AV_LOG_ERROR, "Format of input frames context (%s) is not supported by AMF.\n",
+ av_get_pix_fmt_name(frames_ctx->sw_format));
+ return AVERROR(EINVAL);
+ }
+
+ err = av_hwdevice_ctx_create_derived(&ctx->amf_device_ref, AV_HWDEVICE_TYPE_AMF, frames_ctx->device_ref, 0);
+ if (err < 0)
+ return err;
+
+ ctx->hwframes_in_ref = av_buffer_ref(inlink->hw_frames_ctx);
+ if (!ctx->hwframes_in_ref)
+ return AVERROR(ENOMEM);
+
+ ctx->hwframes_out_ref = av_hwframe_ctx_alloc(frames_ctx->device_ref);
+ if (!ctx->hwframes_out_ref)
+ return AVERROR(ENOMEM);
+
+ hwframes_out = (AVHWFramesContext*)ctx->hwframes_out_ref->data;
+ hwframes_out->format = outlink->format;
+ hwframes_out->sw_format = frames_ctx->sw_format;
+ } else if (avctx->hw_device_ctx) {
+ AVHWDeviceContext *hwdev_ctx;
+ err = av_hwdevice_ctx_create_derived(&ctx->amf_device_ref, AV_HWDEVICE_TYPE_AMF, avctx->hw_device_ctx, 0);
+ if (err < 0)
+ return err;
+ hwdev_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+ if (hwdev_ctx->type == AV_HWDEVICE_TYPE_AMF)
+ {
+ AVAMFDeviceContext * amf_ctx = hwdev_ctx->hwctx;
+ ctx->amf_device_ctx_internal = av_buffer_ref(amf_ctx->internal);
+ }
+ ctx->hwdevice_ref = av_buffer_ref(avctx->hw_device_ctx);
+ if (!ctx->hwdevice_ref)
+ return AVERROR(ENOMEM);
+
+ ctx->hwframes_out_ref = av_hwframe_ctx_alloc(ctx->hwdevice_ref);
+ if (!ctx->hwframes_out_ref)
+ return AVERROR(ENOMEM);
+
+ hwframes_out = (AVHWFramesContext*)ctx->hwframes_out_ref->data;
+ hwframes_out->format = AV_PIX_FMT_AMF;
+ hwframes_out->sw_format = outlink->format;
+ } else {
+ AVAMFDeviceContextInternal *wrapped = av_mallocz(sizeof(*wrapped));
+ ctx->amf_device_ctx_internal = av_buffer_create((uint8_t *)wrapped, sizeof(*wrapped),
+ av_amf_context_internal_free, NULL, 0);
+ if ((res == av_amf_context_internal_create((AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data, avctx, "", NULL, 0)) != 0) {
+ return res;
+ }
+ ctx->hwframes_out_ref = av_hwframe_ctx_alloc(ctx->amf_device_ref);
+ if (!ctx->hwframes_out_ref)
+ return AVERROR(ENOMEM);
+
+ hwframes_out = (AVHWFramesContext*)ctx->hwframes_out_ref->data;
+ hwframes_out->format = outlink->format;
+ hwframes_out->sw_format = inlink->format;
+ }
+
+ if (ctx->format != AV_PIX_FMT_NONE) {
+ hwframes_out->sw_format = ctx->format;
+ }
+
+ if (inlink->format == AV_PIX_FMT_AMF) {
+ if (!inlink->hw_frames_ctx || !inlink->hw_frames_ctx->data)
+ return AVERROR(EINVAL);
+ else
+ *in_format = ((AVHWFramesContext*)inlink->hw_frames_ctx->data)->sw_format;
+ } else
+ *in_format = inlink->format;
+
+ outlink->w = ctx->width;
+ outlink->h = ctx->height;
+
+ hwframes_out->width = outlink->w;
+ hwframes_out->height = outlink->h;
+
+ err = av_hwframe_ctx_init(ctx->hwframes_out_ref);
+ if (err < 0)
+ return err;
+
+ outlink->hw_frames_ctx = av_buffer_ref(ctx->hwframes_out_ref);
+ if (!outlink->hw_frames_ctx) {
+ return AVERROR(ENOMEM);
+ }
+ return 0;
+}
+
+void amf_free_amfsurface(void *opaque, uint8_t *data)
+{
+ AMFSurface *surface = (AMFSurface*)data;
+ surface->pVtbl->Release(surface);
+}
+
+AVFrame *amf_amfsurface_to_avframe(AVFilterContext *avctx, AMFSurface* pSurface)
+{
+ AVFrame *frame = av_frame_alloc();
+ AMFScaleContext *ctx = avctx->priv;
+
+ if (!frame)
+ return NULL;
+
+ if (ctx->hwframes_out_ref) {
+ AVHWFramesContext *hwframes_out = (AVHWFramesContext *)ctx->hwframes_out_ref->data;
+ if (hwframes_out->format == AV_PIX_FMT_AMF) {
+ int ret = av_hwframe_get_buffer(ctx->hwframes_out_ref, frame, 0);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_ERROR, "Get hw frame failed.\n");
+ av_frame_free(&frame);
+ return NULL;
+ }
+ frame->data[3] = (uint8_t *)pSurface;
+ frame->buf[1] = av_buffer_create((uint8_t *)pSurface, sizeof(AMFSurface),
+ amf_free_amfsurface,
+ (void*)avctx,
+ AV_BUFFER_FLAG_READONLY);
+ } else { // FIXME: add processing of other hw formats
+ av_log(ctx, AV_LOG_ERROR, "Unknown pixel format\n");
+ return NULL;
+ }
+ } else {
+
+ switch (pSurface->pVtbl->GetMemoryType(pSurface))
+ {
+ #if CONFIG_D3D11VA
+ case AMF_MEMORY_DX11:
+ {
+ AMFPlane *plane0 = pSurface->pVtbl->GetPlaneAt(pSurface, 0);
+ frame->data[0] = plane0->pVtbl->GetNative(plane0);
+ frame->data[1] = (uint8_t*)(intptr_t)0;
+
+ frame->buf[0] = av_buffer_create(NULL,
+ 0,
+ amf_free_amfsurface,
+ pSurface,
+ AV_BUFFER_FLAG_READONLY);
+ }
+ break;
+ #endif
+ #if CONFIG_DXVA2
+ case AMF_MEMORY_DX9:
+ {
+ AMFPlane *plane0 = pSurface->pVtbl->GetPlaneAt(pSurface, 0);
+ frame->data[3] = plane0->pVtbl->GetNative(plane0);
+
+ frame->buf[0] = av_buffer_create(NULL,
+ 0,
+ amf_free_amfsurface,
+ pSurface,
+ AV_BUFFER_FLAG_READONLY);
+ }
+ break;
+ #endif
+ default:
+ {
+ av_log(avctx, AV_LOG_ERROR, "Unsupported memory type : %d\n", pSurface->pVtbl->GetMemoryType(pSurface));
+ return NULL;
+ }
+ }
+ }
+
+ return frame;
+}
+
+int amf_avframe_to_amfsurface(AVFilterContext *avctx, const AVFrame *frame, AMFSurface** ppSurface)
+{
+ AMFScaleContext *ctx = avctx->priv;
+ AVAMFDeviceContextInternal* internal = (AVAMFDeviceContextInternal *)ctx->amf_device_ctx_internal->data;
+ AMFSurface *surface;
+ AMF_RESULT res;
+ int hw_surface = 0;
+
+ switch (frame->format) {
+#if CONFIG_D3D11VA
+ case AV_PIX_FMT_D3D11:
+ {
+ static const GUID AMFTextureArrayIndexGUID = { 0x28115527, 0xe7c3, 0x4b66, { 0x99, 0xd3, 0x4f, 0x2a, 0xe6, 0xb4, 0x7f, 0xaf } };
+ ID3D11Texture2D *texture = (ID3D11Texture2D*)frame->data[0]; // actual texture
+ int index = (intptr_t)frame->data[1]; // index is a slice in texture array is - set to tell AMF which slice to use
+ texture->lpVtbl->SetPrivateData(texture, &AMFTextureArrayIndexGUID, sizeof(index), &index);
+
+ res = internal->context->pVtbl->CreateSurfaceFromDX11Native(internal->context, texture, &surface, NULL); // wrap to AMF surface
+ AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR(ENOMEM), "CreateSurfaceFromDX11Native() failed with error %d\n", res);
+ hw_surface = 1;
+ }
+ break;
+#endif
+ case AV_PIX_FMT_AMF:
+ {
+ surface = (AMFSurface*)frame->data[3]; // actual surface
+ hw_surface = 1;
+ }
+ break;
+
+#if CONFIG_DXVA2
+ case AV_PIX_FMT_DXVA2_VLD:
+ {
+ IDirect3DSurface9 *texture = (IDirect3DSurface9 *)frame->data[3]; // actual texture
+
+ res = internal->context->pVtbl->CreateSurfaceFromDX9Native(internal->context, texture, &surface, NULL); // wrap to AMF surface
+ AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR(ENOMEM), "CreateSurfaceFromDX9Native() failed with error %d\n", res);
+ hw_surface = 1;
+ }
+ break;
+#endif
+ default:
+ {
+ AMF_SURFACE_FORMAT amf_fmt = av_amf_av_to_amf_format(frame->format);
+ res = internal->context->pVtbl->AllocSurface(internal->context, AMF_MEMORY_HOST, amf_fmt, frame->width, frame->height, &surface);
+ AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR(ENOMEM), "AllocSurface() failed with error %d\n", res);
+ amf_copy_surface(avctx, frame, surface);
+ }
+ break;
+ }
+
+ if (hw_surface) {
+ // input HW surfaces can be vertically aligned by 16; tell AMF the real size
+ surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame->height);
+ }
+
+ surface->pVtbl->SetPts(surface, frame->pts);
+ *ppSurface = surface;
+ return 0;
+}
diff --git a/libavfilter/vf_scale_amf_common.h b/libavfilter/vf_scale_amf_common.h
new file mode 100644
index 0000000000..a43c7602cb
--- /dev/null
+++ b/libavfilter/vf_scale_amf_common.h
@@ -0,0 +1,71 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_AMF_COMMON_H
+#define AVFILTER_AMF_COMMON_H
+
+#include "avfilter.h"
+
+#include "AMF/core/Surface.h"
+#include "AMF/components/Component.h"
+
+typedef struct AMFScaleContext {
+ const AVClass *class;
+
+ int width, height;
+ enum AVPixelFormat format;
+ int scale_type;
+ int color_profile;
+ int color_range;
+ int primaries;
+ int trc;
+ int fill;
+ int fill_color;
+ int keep_ratio;
+
+ // HQScaler properties
+ int algorithm;
+ float sharpness;
+
+ char *w_expr;
+ char *h_expr;
+ char *format_str;
+ int force_original_aspect_ratio;
+ int force_divisible_by;
+
+ AMFComponent *scaler;
+ AVBufferRef *amf_device_ref;
+
+ AVBufferRef *hwframes_in_ref;
+ AVBufferRef *hwframes_out_ref;
+ AVBufferRef *hwdevice_ref;
+
+ AVBufferRef *amf_device_ctx_internal;
+} AMFScaleContext;
+
+int amf_scale_init(AVFilterContext *avctx);
+void amf_scale_uninit(AVFilterContext *avctx);
+int amf_init_scale_config(AVFilterLink *outlink, enum AVPixelFormat *in_format);
+int amf_copy_surface(AVFilterContext *avctx, const AVFrame *frame, AMFSurface* surface);
+void amf_free_amfsurface(void *opaque, uint8_t *data);
+AVFrame *amf_amfsurface_to_avframe(AVFilterContext *avctx, AMFSurface* pSurface);
+int amf_avframe_to_amfsurface(AVFilterContext *avctx, const AVFrame *frame, AMFSurface** ppSurface);
+int amf_setup_input_output_formats(AVFilterContext *avctx, const enum AVPixelFormat *input_pix_fmts, const enum AVPixelFormat *output_pix_fmts);
+int amf_scale_filter_frame(AVFilterLink *inlink, AVFrame *in);
+
+#endif /* AVFILTER_AMF_COMMON_H */
diff --git a/libavfilter/vf_scale_amf_hq.c b/libavfilter/vf_scale_amf_hq.c
new file mode 100644
index 0000000000..63c9bd8301
--- /dev/null
+++ b/libavfilter/vf_scale_amf_hq.c
@@ -0,0 +1,191 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * scale video filter - AMF
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/time.h"
+
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_amf.h"
+
+#include "AMF/components/HQScaler.h"
+#include "AMF/components/ColorSpace.h"
+#include "vf_scale_amf_common.h"
+
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "video.h"
+
+#if CONFIG_DXVA2
+#include <d3d9.h>
+#endif
+
+#if CONFIG_D3D11VA
+#include <d3d11.h>
+#endif
+
+
+static int amf_scale_query_formats(AVFilterContext *avctx)
+{
+ const enum AVPixelFormat *output_pix_fmts;
+ static const enum AVPixelFormat input_pix_fmts[] = {
+ AV_PIX_FMT_NV12,
+ AV_PIX_FMT_P010,
+ AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_AMF,
+ AV_PIX_FMT_RGBAF16,
+ AV_PIX_FMT_NONE,
+ };
+ static const enum AVPixelFormat output_pix_fmts_default[] = {
+ AV_PIX_FMT_NV12,
+ AV_PIX_FMT_P010,
+ AV_PIX_FMT_BGRA,
+ AV_PIX_FMT_RGBA,
+ AV_PIX_FMT_AMF,
+ AV_PIX_FMT_D3D11,
+ AV_PIX_FMT_DXVA2_VLD,
+ AV_PIX_FMT_RGBAF16,
+ AV_PIX_FMT_NONE,
+ };
+ output_pix_fmts = output_pix_fmts_default;
+
+ return amf_setup_input_output_formats(avctx, input_pix_fmts, output_pix_fmts);
+}
+
+static int amf_scale_config_output(AVFilterLink *outlink)
+{
+ AVFilterContext *avctx = outlink->src;
+ AVFilterLink *inlink = avctx->inputs[0];
+ AMFScaleContext *ctx = avctx->priv;
+ AVAMFDeviceContextInternal * internal = NULL;
+ AMFSize out_size;
+ int err;
+ AMF_RESULT res;
+ enum AVPixelFormat in_format;
+
+ err = amf_init_scale_config(outlink, &in_format);
+ if (err < 0)
+ return err;
+
+ // HQ scaler should be used for upscaling only
+ if (inlink->w > outlink->w || inlink->h > outlink->h) {
+ av_log(avctx, AV_LOG_ERROR, "AMF HQ scaler should be used for upscaling only.\n");
+ return AVERROR_UNKNOWN;
+ }
+ // FIXME: add checks whether we have HW context
+
+ internal = (AVAMFDeviceContextInternal * )ctx->amf_device_ctx_internal->data;
+ res = internal->factory->pVtbl->CreateComponent(internal->factory, internal->context, AMFHQScaler, &ctx->scaler);
+ AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_FILTER_NOT_FOUND, "CreateComponent(%ls) failed with error %d\n", AMFHQScaler, res);
+
+ out_size.width = outlink->w;
+ out_size.height = outlink->h;
+ AMF_ASSIGN_PROPERTY_SIZE(res, ctx->scaler, AMF_HQ_SCALER_OUTPUT_SIZE, out_size);
+ AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "AMFHQScaler-SetProperty() failed with error %d\n", res);
+
+ if (ctx->algorithm != -1) {
+ AMF_ASSIGN_PROPERTY_INT64(res, ctx->scaler, AMF_HQ_SCALER_ALGORITHM, ctx->algorithm);
+ }
+ if (ctx->sharpness != -1) {
+ AMF_ASSIGN_PROPERTY_DOUBLE(res, ctx->scaler, AMF_HQ_SCALER_SHARPNESS, ctx->sharpness);
+ }
+ AMF_ASSIGN_PROPERTY_BOOL(res, ctx->scaler, AMF_HQ_SCALER_FILL, ctx->fill);
+ AMF_ASSIGN_PROPERTY_BOOL(res, ctx->scaler, AMF_HQ_SCALER_KEEP_ASPECT_RATIO, ctx->keep_ratio);
+ // Setup default options to skip color conversion
+ ctx->color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN;
+ ctx->color_range = AMF_COLOR_RANGE_UNDEFINED;
+ ctx->primaries = AMF_COLOR_PRIMARIES_UNDEFINED;
+ ctx->trc = AMF_COLOR_TRANSFER_CHARACTERISTIC_UNDEFINED;
+
+ res = ctx->scaler->pVtbl->Init(ctx->scaler, av_amf_av_to_amf_format(in_format), inlink->w, inlink->h);
+ AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "AMFHQScaler-Init() failed with error %d\n", res);
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(AMFScaleContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption scale_amf_hq_options[] = {
+ { "w", "Output video width", OFFSET(w_expr), AV_OPT_TYPE_STRING, { .str = "iw" }, .flags = FLAGS },
+ { "h", "Output video height", OFFSET(h_expr), AV_OPT_TYPE_STRING, { .str = "ih" }, .flags = FLAGS },
+
+ { "format", "Output pixel format", OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
+ { "sharpness", "Sharpness", OFFSET(sharpness), AV_OPT_TYPE_FLOAT, { .dbl = -1 }, -1, 2., FLAGS, "sharpness" },
+ { "keep-ratio", "Keep aspect ratio", OFFSET(keep_ratio), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS, "keep_ration" },
+ { "fill", "Fill", OFFSET(fill), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS, "fill" },
+
+ { "algorithm", "Scaling algorithm", OFFSET(algorithm), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, AMF_HQ_SCALER_ALGORITHM_VIDEOSR1_1, FLAGS, "algorithm" },
+ { "bilinear", "Bilinear", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_HQ_SCALER_ALGORITHM_BILINEAR }, 0, 0, FLAGS, "algorithm" },
+ { "bicubic", "Bicubic", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_HQ_SCALER_ALGORITHM_BICUBIC }, 0, 0, FLAGS, "algorithm" },
+ { "sr1-0", "Video SR1.0", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_HQ_SCALER_ALGORITHM_VIDEOSR1_0 }, 0, 0, FLAGS, "algorithm" },
+ { "point", "Point", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_HQ_SCALER_ALGORITHM_POINT }, 0, 0, FLAGS, "algorithm" },
+ { "sr1-1", "Video SR1.1", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_HQ_SCALER_ALGORITHM_VIDEOSR1_1 }, 0, 0, FLAGS, "algorithm" },
+
+ { NULL },
+};
+
+
+AVFILTER_DEFINE_CLASS(scale_amf_hq);
+
+static const AVFilterPad amf_scale_inputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .filter_frame = amf_scale_filter_frame,
+ }
+};
+
+static const AVFilterPad amf_scale_outputs[] = {
+ {
+ .name = "default",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .config_props = amf_scale_config_output,
+ }
+};
+
+AVFilter ff_vf_scale_amf_hq = {
+ .name = "scale_amf_hq",
+ .description = NULL_IF_CONFIG_SMALL("AMF HQ video upscaling"),
+
+ .init = amf_scale_init,
+ .uninit = amf_scale_uninit,
+ FILTER_QUERY_FUNC(&amf_scale_query_formats),
+
+ .priv_size = sizeof(AMFScaleContext),
+ .priv_class = &scale_amf_hq_class,
+
+ FILTER_INPUTS(amf_scale_inputs),
+ FILTER_OUTPUTS(amf_scale_outputs),
+
+ FILTER_SINGLE_PIXFMT(AV_PIX_FMT_AMF),
+
+ .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+ .flags = AVFILTER_FLAG_HWDEVICE,
+};
\ No newline at end of file
--
2.38.1.windows.1
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [FFmpeg-devel] [PATCH 8/9] avfilter/scale_amf: Add AMF HW scaler & color converter
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 8/9] avfilter/scale_amf: Add AMF HW scaler & color converter Dmitrii Ovchinnikov
@ 2024-02-14 15:08 ` Timo Rothenpieler
2024-02-14 15:27 ` Evgeny Pavlov
0 siblings, 1 reply; 18+ messages in thread
From: Timo Rothenpieler @ 2024-02-14 15:08 UTC (permalink / raw)
To: ffmpeg-devel
On 14/02/2024 02:55, Dmitrii Ovchinnikov wrote:
> From: Evgeny Pavlov <lucenticus@gmail.com>
>
> This commit adds two AMF filters: scale_amf & scale_amf_hq.
> Both filters are using AMF hardware acceleration.
> scale_amf supports simple scaling algorithms & color conversion.
> scale_amf_hq supports advanced scaling algorithms & might be used
> for upscaling only.
Haven't looked at the patch yet, but can't this be one filter, and it
picks the best possible method depending on options/inputs/whatever?
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [FFmpeg-devel] [PATCH 8/9] avfilter/scale_amf: Add AMF HW scaler & color converter
2024-02-14 15:08 ` Timo Rothenpieler
@ 2024-02-14 15:27 ` Evgeny Pavlov
2024-02-14 16:26 ` Dennis Mungai
0 siblings, 1 reply; 18+ messages in thread
From: Evgeny Pavlov @ 2024-02-14 15:27 UTC (permalink / raw)
To: FFmpeg development discussions and patches
On Wed, Feb 14, 2024 at 4:08 PM Timo Rothenpieler <timo@rothenpieler.org>
wrote:
> On 14/02/2024 02:55, Dmitrii Ovchinnikov wrote:
> > From: Evgeny Pavlov <lucenticus@gmail.com>
> >
> > This commit adds two AMF filters: scale_amf & scale_amf_hq.
> > Both filters are using AMF hardware acceleration.
> > scale_amf supports simple scaling algorithms & color conversion.
> > scale_amf_hq supports advanced scaling algorithms & might be used
> > for upscaling only.
>
> Haven't looked at the patch yet, but can't this be one filter, and it
> picks the best possible method depending on options/inputs/whatever?
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>
AMF has 2 separate components for color conversion + simple scaling
(VideoConverter) and for advanced scaling (HQScaler).
We've got a recommendation from the AMD AMF team to implement these
components as separate ffmpeg filters.
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [FFmpeg-devel] [PATCH 8/9] avfilter/scale_amf: Add AMF HW scaler & color converter
2024-02-14 15:27 ` Evgeny Pavlov
@ 2024-02-14 16:26 ` Dennis Mungai
2024-02-19 11:18 ` Evgeny Pavlov
0 siblings, 1 reply; 18+ messages in thread
From: Dennis Mungai @ 2024-02-14 16:26 UTC (permalink / raw)
To: FFmpeg development discussions and patches
On Wed, 14 Feb 2024, 18:28 Evgeny Pavlov, <lucenticus@gmail.com> wrote:
> On Wed, Feb 14, 2024 at 4:08 PM Timo Rothenpieler <timo@rothenpieler.org>
> wrote:
>
> > On 14/02/2024 02:55, Dmitrii Ovchinnikov wrote:
> > > From: Evgeny Pavlov <lucenticus@gmail.com>
> > >
> > > This commit adds two AMF filters: scale_amf & scale_amf_hq.
> > > Both filters are using AMF hardware acceleration.
> > > scale_amf supports simple scaling algorithms & color conversion.
> > > scale_amf_hq supports advanced scaling algorithms & might be used
> > > for upscaling only.
> >
> > Haven't looked at the patch yet, but can't this be one filter, and it
> > picks the best possible method depending on options/inputs/whatever?
> > _______________________________________________
> > ffmpeg-devel mailing list
> > ffmpeg-devel@ffmpeg.org
> > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> >
> > To unsubscribe, visit link above, or email
> > ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
> >
>
> AMF has 2 separate components for color conversion + simple scaling
> (VideoConverter) and for advanced scaling (HQScaler).
> We've got a recommendation from the AMD AMF team to implement these
> components as separate ffmpeg filters.
>
Still, this should be a single VIdeo Post-Processing (VPP) -style filter,
exposing these scaling and video post processing options as tunables
therein.
A perfect example of such an implementation that excels in such an
abstraction is intel's vpp_qsv filter, from which multiple compute, color
space conversion methods, tonemapping, etc are made available through
tunables.
Another benefit of such an abstraction would be that re-using this filter
on other GPU derivatives of the discrete silicon, eg in smaller IGPs
sharing these offload blocks would be auto-detecting logic to ensure that
even with defaults, the filter chains run.
Taking another example from Intel, they have a full H/W path for low power
encode and Post-Processing that can be automatically toggled on by specific
filter options without user intervention, guaranteeing runtime safety for
the same command(s) even on newer GPUs.
Food for thought.
>
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [FFmpeg-devel] [PATCH 8/9] avfilter/scale_amf: Add AMF HW scaler & color converter
2024-02-14 16:26 ` Dennis Mungai
@ 2024-02-19 11:18 ` Evgeny Pavlov
2024-02-19 14:43 ` Dennis Mungai
0 siblings, 1 reply; 18+ messages in thread
From: Evgeny Pavlov @ 2024-02-19 11:18 UTC (permalink / raw)
To: FFmpeg development discussions and patches
On Wed, Feb 14, 2024 at 5:26 PM Dennis Mungai <dmngaie@gmail.com> wrote:
> On Wed, 14 Feb 2024, 18:28 Evgeny Pavlov, <lucenticus@gmail.com> wrote:
>
> > On Wed, Feb 14, 2024 at 4:08 PM Timo Rothenpieler <timo@rothenpieler.org
> >
> > wrote:
> >
> > > On 14/02/2024 02:55, Dmitrii Ovchinnikov wrote:
> > > > From: Evgeny Pavlov <lucenticus@gmail.com>
> > > >
> > > > This commit adds two AMF filters: scale_amf & scale_amf_hq.
> > > > Both filters are using AMF hardware acceleration.
> > > > scale_amf supports simple scaling algorithms & color conversion.
> > > > scale_amf_hq supports advanced scaling algorithms & might be used
> > > > for upscaling only.
> > >
> > > Haven't looked at the patch yet, but can't this be one filter, and it
> > > picks the best possible method depending on options/inputs/whatever?
> > > _______________________________________________
> > > ffmpeg-devel mailing list
> > > ffmpeg-devel@ffmpeg.org
> > > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> > >
> > > To unsubscribe, visit link above, or email
> > > ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
> > >
> >
> > AMF has 2 separate components for color conversion + simple scaling
> > (VideoConverter) and for advanced scaling (HQScaler).
> > We've got a recommendation from the AMD AMF team to implement these
> > components as separate ffmpeg filters.
> >
>
>
> Still, this should be a single VIdeo Post-Processing (VPP) -style filter,
> exposing these scaling and video post processing options as tunables
> therein.
>
> A perfect example of such an implementation that excels in such an
> abstraction is intel's vpp_qsv filter, from which multiple compute, color
> space conversion methods, tonemapping, etc are made available through
> tunables.
>
> Another benefit of such an abstraction would be that re-using this filter
> on other GPU derivatives of the discrete silicon, eg in smaller IGPs
> sharing these offload blocks would be auto-detecting logic to ensure that
> even with defaults, the filter chains run.
>
> Taking another example from Intel, they have a full H/W path for low power
> encode and Post-Processing that can be automatically toggled on by specific
> filter options without user intervention, guaranteeing runtime safety for
> the same command(s) even on newer GPUs.
>
> Food for thought.
>
> >
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>
AMF Video Converter (which is used in scale_amf) is the full analog of
Intel's VPP. And in the future, it may have an option for dedicated HW
block in GPU. It has scaling, color conversion, tonemapping, HDR <> SDR
conversion features.
But HQ Scaler brings advanced scaling algorithms that are similar to AMD
FSR (https://www.amd.com/en/technologies/fidelityfx-super-resolution ) and
don’t have color conversion and cannot be mapped to HW blocks.
Do you think that it would be better to rename scale_amf to vpp_amf or some
other name? Maybe we should rename scale_amf_hq as well for better
usability (e.g. sr_amf)?
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [FFmpeg-devel] [PATCH 8/9] avfilter/scale_amf: Add AMF HW scaler & color converter
2024-02-19 11:18 ` Evgeny Pavlov
@ 2024-02-19 14:43 ` Dennis Mungai
0 siblings, 0 replies; 18+ messages in thread
From: Dennis Mungai @ 2024-02-19 14:43 UTC (permalink / raw)
To: FFmpeg development discussions and patches
On Mon, 19 Feb 2024, 14:18 Evgeny Pavlov, <lucenticus@gmail.com> wrote:
> On Wed, Feb 14, 2024 at 5:26 PM Dennis Mungai <dmngaie@gmail.com> wrote:
>
> > On Wed, 14 Feb 2024, 18:28 Evgeny Pavlov, <lucenticus@gmail.com> wrote:
> >
> > > On Wed, Feb 14, 2024 at 4:08 PM Timo Rothenpieler <
> timo@rothenpieler.org
> > >
> > > wrote:
> > >
> > > > On 14/02/2024 02:55, Dmitrii Ovchinnikov wrote:
> > > > > From: Evgeny Pavlov <lucenticus@gmail.com>
> > > > >
> > > > > This commit adds two AMF filters: scale_amf & scale_amf_hq.
> > > > > Both filters are using AMF hardware acceleration.
> > > > > scale_amf supports simple scaling algorithms & color conversion.
> > > > > scale_amf_hq supports advanced scaling algorithms & might be used
> > > > > for upscaling only.
> > > >
> > > > Haven't looked at the patch yet, but can't this be one filter, and it
> > > > picks the best possible method depending on options/inputs/whatever?
> > > > _______________________________________________
> > > > ffmpeg-devel mailing list
> > > > ffmpeg-devel@ffmpeg.org
> > > > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> > > >
> > > > To unsubscribe, visit link above, or email
> > > > ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
> > > >
> > >
> > > AMF has 2 separate components for color conversion + simple scaling
> > > (VideoConverter) and for advanced scaling (HQScaler).
> > > We've got a recommendation from the AMD AMF team to implement these
> > > components as separate ffmpeg filters.
> > >
> >
> >
> > Still, this should be a single VIdeo Post-Processing (VPP) -style filter,
> > exposing these scaling and video post processing options as tunables
> > therein.
> >
> > A perfect example of such an implementation that excels in such an
> > abstraction is intel's vpp_qsv filter, from which multiple compute, color
> > space conversion methods, tonemapping, etc are made available through
> > tunables.
> >
> > Another benefit of such an abstraction would be that re-using this filter
> > on other GPU derivatives of the discrete silicon, eg in smaller IGPs
> > sharing these offload blocks would be auto-detecting logic to ensure that
> > even with defaults, the filter chains run.
> >
> > Taking another example from Intel, they have a full H/W path for low
> power
> > encode and Post-Processing that can be automatically toggled on by
> specific
> > filter options without user intervention, guaranteeing runtime safety for
> > the same command(s) even on newer GPUs.
> >
> > Food for thought.
> >
> > >
> > _______________________________________________
> > ffmpeg-devel mailing list
> > ffmpeg-devel@ffmpeg.org
> > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> >
> > To unsubscribe, visit link above, or email
> > ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
> >
>
> AMF Video Converter (which is used in scale_amf) is the full analog of
> Intel's VPP. And in the future, it may have an option for dedicated HW
> block in GPU. It has scaling, color conversion, tonemapping, HDR <> SDR
> conversion features.
> But HQ Scaler brings advanced scaling algorithms that are similar to AMD
> FSR (https://www.amd.com/en/technologies/fidelityfx-super-resolution ) and
> don’t have color conversion and cannot be mapped to HW blocks.
> Do you think that it would be better to rename scale_amf to vpp_amf or some
> other name? Maybe we should rename scale_amf_hq as well for better
> usability (e.g. sr_amf)?
>
Evgeny,
That's an excellent proposal.
With the renaming of scale_amf to vpp_amf and the extra filter abstracting
FSR style upscaling stuff to sr_amf, it becomes absolutely clear what each
filter is meant to accomplish.
>
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* [FFmpeg-devel] [PATCH 9/9] doc/filters: Add documentation for AMF filters
2024-02-14 1:55 [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf Dmitrii Ovchinnikov
` (6 preceding siblings ...)
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 8/9] avfilter/scale_amf: Add AMF HW scaler & color converter Dmitrii Ovchinnikov
@ 2024-02-14 1:55 ` Dmitrii Ovchinnikov
2024-02-14 2:56 ` [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf James Almer
2024-02-14 20:56 ` Mark Thompson
9 siblings, 0 replies; 18+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-02-14 1:55 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: Evgeny Pavlov
From: Evgeny Pavlov <lucenticus@gmail.com>
Signed-off-by: Evgeny Pavlov <lucenticus@gmail.com>
---
doc/filters.texi | 238 +++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 238 insertions(+)
diff --git a/doc/filters.texi b/doc/filters.texi
index e0436a5755..4c5b9c1f63 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -21330,6 +21330,244 @@ If the specified expression is not valid, it is kept at its current
value.
@end table
+@anchor{scale_amf}
+@section scale_amf
+
+Scale (resize) and convert colorspace, transfer characteristics or color primaries for the input video, using AMD Advanced Media Framework library for hardware acceleration.
+Setting the output width and height works in the same way as for the @ref{scale} filter.
+
+The filter accepts the following options:
+@table @option
+@item w
+@item h
+Set the output video dimension expression. Default value is the input dimension.
+
+Allows for the same expressions as the @ref{scale} filter.
+
+@item scale_type
+Sets the algorithm used for scaling:
+
+@table @var
+@item bilinear
+Bilinear
+
+This is the default.
+
+@item bicubic
+Bicubic
+
+@end table
+
+@item format
+Controls the output pixel format. By default, or if none is specified, the input
+pixel format is used.
+
+
+@item force_original_aspect_ratio
+@item force_divisible_by
+Work the same as the identical @ref{scale} filter options.
+
+@anchor{color_profile}
+@item color_profile
+Specify all color properties at once.
+
+The accepted values are:
+@table @samp
+@item bt601
+BT.601
+
+@item bt709
+BT.709
+
+@item bt2020
+BT.2020
+
+@end table
+
+@anchor{trc}
+@item trc
+Specify output transfer characteristics.
+
+The accepted values are:
+@table @samp
+@item bt709
+BT.709
+
+@item gamma22
+Constant gamma of 2.2
+
+@item gamma28
+Constant gamma of 2.8
+
+@item smpte170m
+SMPTE-170M
+
+@item smpte240m
+SMPTE-240M
+
+@item linear
+Linear
+
+@item log
+LOG
+
+@item log-sqrt
+LOG_SQRT
+
+@item iec61966-2-4
+iec61966-2-4
+
+@item bt1361-ecg
+BT1361_ECG
+
+@item iec61966-2-1
+iec61966-2-1
+
+@item bt2020-10
+BT.2020 for 10-bits content
+
+@item bt2020-12
+BT.2020 for 12-bits content
+
+@item smpte2084
+SMPTE2084
+
+@item smpte428
+SMPTE428
+
+@item arib-std-b67
+ARIB_STD_B67
+
+@end table
+
+@anchor{primaries}
+@item primaries
+Specify output color primaries.
+
+The accepted values are:
+@table @samp
+@item bt709
+BT.709
+
+@item bt470m
+BT.470M
+
+@item bt470bg
+BT.470BG or BT.601-6 625
+
+@item smpte170m
+SMPTE-170M or BT.601-6 525
+
+@item smpte240m
+SMPTE-240M
+
+@item film
+film
+
+@item bt2020
+BT.2020
+
+@item smpte428
+SMPTE-428
+
+@item smpte431
+SMPTE-431
+
+@item smpte432
+SMPTE-432
+
+@item jedec-p22
+JEDEC P22 phosphors
+
+@end table
+@end table
+
+@subsection Examples
+
+@itemize
+@item
+Scale input to 720p, keeping aspect ratio and ensuring the output is yuv420p.
+@example
+scale_amf=-2:720:format=yuv420p
+@end example
+
+@item
+Upscale to 4K and change color profile to bt2020.
+@example
+scale_amf=4096:2160:color_profile=bt2020
+@end example
+@end itemize
+
+@anchor{scale_amf_hq}
+@section scale_amf_hq
+
+Upscale (size increasing) for the input video using AMD Advanced Media Framework library for hardware acceleration.
+Use advanced algorithms for upscaling with higher output quality.
+Setting the output width and height works in the same way as for the @ref{scale} filter.
+
+The filter accepts the following options:
+@table @option
+@item w
+@item h
+Set the output video dimension expression. Default value is the input dimension.
+
+Allows for the same expressions as the @ref{scale} filter.
+
+@item algorithm
+Sets the algorithm used for scaling:
+
+@table @var
+@item bilinear
+Bilinear
+
+@item bicubic
+Bicubic
+
+@item sr1-0
+Video SR1.0
+This is a default value
+
+@item point
+Point
+
+@item sr1-1
+Video SR1.1
+
+@end table
+
+@item sharpness
+Control hq scaler sharpening. The value is a float in the range of [0.0, 2.0]
+
+@item format
+Controls the output pixel format. By default, or if none is specified, the input
+pixel format is used.
+
+@item keep-ratio
+Force the scaler to keep the aspect ratio of the input image when the output size has a different aspect ratio.
+Default value is false.
+
+@item fill
+Specifies whether the output image outside the region of interest,
+which does not fill the entire output surface should be filled with a solid color.
+
+@end table
+
+@subsection Examples
+
+@itemize
+@item
+Scale input to 720p, keeping aspect ratio and ensuring the output is yuv420p.
+@example
+scale_amf_hq=-2:720:format=yuv420p
+@end example
+
+@item
+Upscale to 4K with algorithm video SR1.1.
+@example
+scale_amf_hq=4096:2160:algorithm=sr1-1
+@end example
+@end itemize
+
@anchor{scale_cuda}
@section scale_cuda
--
2.38.1.windows.1
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf.
2024-02-14 1:55 [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf Dmitrii Ovchinnikov
` (7 preceding siblings ...)
2024-02-14 1:55 ` [FFmpeg-devel] [PATCH 9/9] doc/filters: Add documentation for AMF filters Dmitrii Ovchinnikov
@ 2024-02-14 2:56 ` James Almer
2024-02-14 16:48 ` Dmitrii Ovchinnikov
2024-02-14 20:56 ` Mark Thompson
9 siblings, 1 reply; 18+ messages in thread
From: James Almer @ 2024-02-14 2:56 UTC (permalink / raw)
To: ffmpeg-devel
On 2/13/2024 10:55 PM, Dmitrii Ovchinnikov wrote:
> +const FormatMap format_map[] =
> +{
> + { AV_PIX_FMT_NONE, AMF_SURFACE_UNKNOWN },
> + { AV_PIX_FMT_NV12, AMF_SURFACE_NV12 },
> + { AV_PIX_FMT_BGR0, AMF_SURFACE_BGRA },
> + { AV_PIX_FMT_RGB0, AMF_SURFACE_RGBA },
> + { AV_PIX_FMT_BGRA, AMF_SURFACE_BGRA },
> + { AV_PIX_FMT_ARGB, AMF_SURFACE_ARGB },
> + { AV_PIX_FMT_RGBA, AMF_SURFACE_RGBA },
> + { AV_PIX_FMT_GRAY8, AMF_SURFACE_GRAY8 },
> + { AV_PIX_FMT_YUV420P, AMF_SURFACE_YUV420P },
> + { AV_PIX_FMT_YUYV422, AMF_SURFACE_YUY2 },
> + { AV_PIX_FMT_P010, AMF_SURFACE_P010 },
> + { AV_PIX_FMT_YUV420P10, AMF_SURFACE_P010 },
yuv420p10 is not equal to p010.
> + { AV_PIX_FMT_YUV420P12, AMF_SURFACE_P012 },
> + { AV_PIX_FMT_YUV420P12, AMF_SURFACE_P012 },
Why the duplication? And there's AV_PIX_FMT_P012.
> + { AV_PIX_FMT_YUV420P16, AMF_SURFACE_P016 },
AV_PIX_FMT_P016?
> + { AV_PIX_FMT_YUV422P10LE, AMF_SURFACE_Y210 },
AV_PIX_FMT_Y210?
> + { AV_PIX_FMT_YUV444P10LE, AMF_SURFACE_Y416 },
> +};
[...]
> diff --git a/libavutil/hwcontext_amf.h b/libavutil/hwcontext_amf.h
> new file mode 100644
> index 0000000000..0161b9a29c
> --- /dev/null
> +++ b/libavutil/hwcontext_amf.h
> @@ -0,0 +1,105 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +
> +#ifndef AVUTIL_HWCONTEXT_AMF_H
> +#define AVUTIL_HWCONTEXT_AMF_H
> +#include <AMF/core/Factory.h>
> +#include <AMF/core/Context.h>
> +#include <AMF/core/Trace.h>
> +#include "pixfmt.h"
> +
> +#include "libavformat/avformat.h"
lavu can't depend on lavf.
> +#include "libavutil/hwcontext.h"
> +
> +#define FFMPEG_AMF_WRITER_ID L"ffmpeg_amf"
Bad namespace. And this doesn't need to be public.
> +
> +typedef struct AmfTraceWriter {
Ditto namespace. Does this need to be public?
> + AMFTraceWriterVtbl *vtbl;
> + void *avctx;
> + void *avcl;
> +} AmfTraceWriter;
> +
> +typedef struct AVAMFDeviceContextInternal {
If this is internal, then define it in an internal header and put only
the typedef here.
> + amf_handle library; ///< handle to DLL library
> + AMFFactory *factory; ///< pointer to AMF factory
> + AMFDebug *debug; ///< pointer to AMF debug interface
> + AMFTrace *trace; ///< pointer to AMF trace interface
> +
> + amf_uint64 version; ///< version of AMF runtime
> + AMFContext *context; ///< AMF context
> + AMF_MEMORY_TYPE mem_type;
> +} AVAMFDeviceContextInternal;
> +
> +/**
> + * This struct is allocated as AVHWDeviceContext.hwctx
> + */
> +
> +typedef struct AVAMFDeviceContext {
> + AVBufferRef *internal;
AVAMFDeviceContextInternal *internal. Can't say if this is needed here
at all or not.
> +} AVAMFDeviceContext;
> +
> +typedef struct AMFFramesContext {
Again namespace.
> + AMFSurface * surfaces;
> + int nb_surfaces;
> +} AMFFramesContext;
> +
> +/**
> +* Error handling helper
> +*/
> +#define AMF_RETURN_IF_FALSE(avctx, exp, ret_value, /*message,*/ ...) \
> + if (!(exp)) { \
> + av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
> + return ret_value; \
> + }
> +
> +#define AMF_GOTO_FAIL_IF_FALSE(avctx, exp, ret_value, /*message,*/ ...) \
> + if (!(exp)) { \
> + av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
> + ret = ret_value; \
> + goto fail; \
> + }
> +
> +#define AMF_TIME_BASE_Q (AVRational){1, AMF_SECOND}
These should probably be internal.
> +
> +typedef struct FormatMap {
> + enum AVPixelFormat av_format;
> + enum AMF_SURFACE_FORMAT amf_format;
> +} FormatMap;
> +
> +extern const FormatMap format_map[];
This doesn't need to be in a public header.
> +enum AMF_SURFACE_FORMAT av_amf_av_to_amf_format(enum AVPixelFormat fmt);
Namespace.
> +enum AVPixelFormat av_amf_to_av_format(enum AMF_SURFACE_FORMAT fmt);
> +extern AmfTraceWriter av_amf_trace_writer;
Same as AmfTraceWriter, this probably doesn't need to be public.
> +
> +int av_amf_context_init(AVAMFDeviceContextInternal* internal, void* avcl);
> +int av_amf_load_library(AVAMFDeviceContextInternal* internal, void* avcl);
> +int av_amf_create_context( AVAMFDeviceContextInternal * internal,
> + void* avcl,
> + const char *device,
> + AVDictionary *opts, int flags);
> +int av_amf_context_internal_create(AVAMFDeviceContextInternal * internal,
> + void* avcl,
> + const char *device,
> + AVDictionary *opts, int flags);
> +void av_amf_context_internal_free(void *opaque, uint8_t *data);
> +int av_amf_context_derive(AVAMFDeviceContextInternal * internal,
> + AVHWDeviceContext *child_device_ctx, AVDictionary *opts,
> + int flags);
These should probably take a AVAMFDeviceContext pointer.
> +
> +#endif /* AVUTIL_HWCONTEXT_AMF_H */
> diff --git a/libavutil/hwcontext_internal.h b/libavutil/hwcontext_internal.h
> index 4df516ee6a..48d2dc012c 100644
> --- a/libavutil/hwcontext_internal.h
> +++ b/libavutil/hwcontext_internal.h
> @@ -175,5 +175,6 @@ extern const HWContextType ff_hwcontext_type_vdpau;
> extern const HWContextType ff_hwcontext_type_videotoolbox;
> extern const HWContextType ff_hwcontext_type_mediacodec;
> extern const HWContextType ff_hwcontext_type_vulkan;
> +extern const HWContextType ff_hwcontext_type_amf;
>
> #endif /* AVUTIL_HWCONTEXT_INTERNAL_H */
> diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c
> index f6d4d01460..ebc79b0c74 100644
> --- a/libavutil/pixdesc.c
> +++ b/libavutil/pixdesc.c
> @@ -2125,6 +2125,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
> .name = "cuda",
> .flags = AV_PIX_FMT_FLAG_HWACCEL,
> },
> + [AV_PIX_FMT_AMF] = {
> + .name = "amf",
> + .flags = AV_PIX_FMT_FLAG_HWACCEL,
> + },
> [AV_PIX_FMT_AYUV64LE] = {
> .name = "ayuv64le",
> .nb_components = 4,
> diff --git a/libavutil/pixfmt.h b/libavutil/pixfmt.h
> index 9c87571f49..06459be62c 100644
> --- a/libavutil/pixfmt.h
> +++ b/libavutil/pixfmt.h
> @@ -251,6 +251,10 @@ enum AVPixelFormat {
> * exactly as for system memory frames.
> */
> AV_PIX_FMT_CUDA,
> + /**
> + * HW acceleration through AMF. data[3] contain AMFSurface pointer
> + */
> + AV_PIX_FMT_AMF,
This is an ABI break, it needs to be the last entry before AV_PIX_FMT_NB.
>
> AV_PIX_FMT_0RGB, ///< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined
> AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf.
2024-02-14 2:56 ` [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf James Almer
@ 2024-02-14 16:48 ` Dmitrii Ovchinnikov
0 siblings, 0 replies; 18+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-02-14 16:48 UTC (permalink / raw)
To: FFmpeg development discussions and patches
>>AVAMFDeviceContextInternal *internal. Can't say if this is needed here
>>at all or not.
I have separated AVAMFDeviceContextInternal into a separate structure,
since it is also used in the decoder and encoder, even when
hwaccel amf is not used
>>These should probably take a AVAMFDeviceContext pointer.
they are also used from the decoder and encoder for initialization,
even when hwaccel amf is not used
I did this to avoid massive code duplication.
If you have any thoughts on how to improve this, it would be great.
For the rest of the comments, everything is clear,
I will correct and send an updated version.
--
Sincerely, Ovchinnikov D.A.
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread
* Re: [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf.
2024-02-14 1:55 [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf Dmitrii Ovchinnikov
` (8 preceding siblings ...)
2024-02-14 2:56 ` [FFmpeg-devel] [PATCH 1/9] libavutil: add hwcontext_amf James Almer
@ 2024-02-14 20:56 ` Mark Thompson
9 siblings, 0 replies; 18+ messages in thread
From: Mark Thompson @ 2024-02-14 20:56 UTC (permalink / raw)
To: ffmpeg-devel
On 14/02/2024 01:55, Dmitrii Ovchinnikov wrote:
> Adds hwcontext_amf, which allows to use shared AMF
> context for the encoder, decoder and AMF-based filters,
> without copy to the host memory.
> It will also allow you to use some optimizations in
> the interaction of components (for example, SAV) and make a more
> manageable and optimal setup for using GPU devices with AMF
> in the case of a fully AMF pipeline.
> It will be a significant performance uplift when full AMF pipeline
> with filters is used.
>
> We also plan to add Compression artefact removal filter in near feature.
> ---
> libavutil/Makefile | 3 +
> libavutil/hwcontext.c | 4 +
> libavutil/hwcontext.h | 1 +
> libavutil/hwcontext_amf.c | 580 +++++++++++++++++++++++++++++++++
> libavutil/hwcontext_amf.h | 105 ++++++
> libavutil/hwcontext_internal.h | 1 +
> libavutil/pixdesc.c | 4 +
> libavutil/pixfmt.h | 4 +
> 8 files changed, 702 insertions(+)
> create mode 100644 libavutil/hwcontext_amf.c
> create mode 100644 libavutil/hwcontext_amf.h
>
> ...
> diff --git a/libavutil/hwcontext_amf.h b/libavutil/hwcontext_amf.h
> new file mode 100644
> index 0000000000..0161b9a29c
> --- /dev/null
> +++ b/libavutil/hwcontext_amf.h
> @@ -0,0 +1,105 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +
> +#ifndef AVUTIL_HWCONTEXT_AMF_H
> +#define AVUTIL_HWCONTEXT_AMF_H
> +#include <AMF/core/Factory.h>
> +#include <AMF/core/Context.h>
> +#include <AMF/core/Trace.h>
> +#include "pixfmt.h"
> +
> +#include "libavformat/avformat.h"
> +#include "libavutil/hwcontext.h"
> +
> +#define FFMPEG_AMF_WRITER_ID L"ffmpeg_amf"
> +
> +typedef struct AmfTraceWriter {
> + AMFTraceWriterVtbl *vtbl;
> + void *avctx;
> + void *avcl;
> +} AmfTraceWriter;
> +
> +typedef struct AVAMFDeviceContextInternal {
> + amf_handle library; ///< handle to DLL library
> + AMFFactory *factory; ///< pointer to AMF factory
> + AMFDebug *debug; ///< pointer to AMF debug interface
> + AMFTrace *trace; ///< pointer to AMF trace interface
> +
> + amf_uint64 version; ///< version of AMF runtime
> + AMFContext *context; ///< AMF context
> + AMF_MEMORY_TYPE mem_type;
> +} AVAMFDeviceContextInternal;
> +
> +/**
> + * This struct is allocated as AVHWDeviceContext.hwctx
> + */
> +
> +typedef struct AVAMFDeviceContext {
> + AVBufferRef *internal;
> +} AVAMFDeviceContext;
> +
> +typedef struct AMFFramesContext {
> + AMFSurface * surfaces;
> + int nb_surfaces;
> +} AMFFramesContext;
> +
> +/**
> +* Error handling helper
> +*/
> +#define AMF_RETURN_IF_FALSE(avctx, exp, ret_value, /*message,*/ ...) \
> + if (!(exp)) { \
> + av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
> + return ret_value; \
> + }
> +
> +#define AMF_GOTO_FAIL_IF_FALSE(avctx, exp, ret_value, /*message,*/ ...) \
> + if (!(exp)) { \
> + av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
> + ret = ret_value; \
> + goto fail; \
> + }
> +
> +#define AMF_TIME_BASE_Q (AVRational){1, AMF_SECOND}
> +
> +typedef struct FormatMap {
> + enum AVPixelFormat av_format;
> + enum AMF_SURFACE_FORMAT amf_format;
> +} FormatMap;
> +
> +extern const FormatMap format_map[];
> +enum AMF_SURFACE_FORMAT av_amf_av_to_amf_format(enum AVPixelFormat fmt);
> +enum AVPixelFormat av_amf_to_av_format(enum AMF_SURFACE_FORMAT fmt);
> +extern AmfTraceWriter av_amf_trace_writer;
> +
> +int av_amf_context_init(AVAMFDeviceContextInternal* internal, void* avcl);
> +int av_amf_load_library(AVAMFDeviceContextInternal* internal, void* avcl);
> +int av_amf_create_context( AVAMFDeviceContextInternal * internal,
> + void* avcl,
> + const char *device,
> + AVDictionary *opts, int flags);
> +int av_amf_context_internal_create(AVAMFDeviceContextInternal * internal,
> + void* avcl,
> + const char *device,
> + AVDictionary *opts, int flags);
> +void av_amf_context_internal_free(void *opaque, uint8_t *data);
> +int av_amf_context_derive(AVAMFDeviceContextInternal * internal,
> + AVHWDeviceContext *child_device_ctx, AVDictionary *opts,
> + int flags);
> +
> +#endif /* AVUTIL_HWCONTEXT_AMF_H */
We need to sort out the content of the public header file before doing anything else. It needs to contain exactly what a user has to see to set up an AMF hwcontext instance:
* A device hwctx structure, with notes on how to set the fields to put an existing device into the device context before calling av_hwdevice_ctx_init().
* Notes on what the buffer pool in a frames context contains, which might involve another structure to act as the buffer entry.
* Optionally a frame hwctx structure, with notes on how to set the fields when putting a pool of existing frames into the frame context before calling av_hwframe_ctx_init() (omit if it would contain nothing).
* Optionally a hwconfig structure, with notes on how to set the fields when requesting the capabilities of some particular operation (omit if all operations have identical capabilities).
* If the pixfmt needs to define additional structures, they go here as well.
It shouldn't contain anything else unless there are hard ABI requirements (e.g. to allocate some sort of non-fixed structure).
> diff --git a/libavutil/hwcontext_internal.h b/libavutil/hwcontext_internal.h
> index 4df516ee6a..48d2dc012c 100644
> --- a/libavutil/hwcontext_internal.h
> +++ b/libavutil/hwcontext_internal.h
> @@ -175,5 +175,6 @@ extern const HWContextType ff_hwcontext_type_vdpau;
> extern const HWContextType ff_hwcontext_type_videotoolbox;
> extern const HWContextType ff_hwcontext_type_mediacodec;
> extern const HWContextType ff_hwcontext_type_vulkan;
> +extern const HWContextType ff_hwcontext_type_amf;
>
> #endif /* AVUTIL_HWCONTEXT_INTERNAL_H */
> diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c
> index f6d4d01460..ebc79b0c74 100644
> --- a/libavutil/pixdesc.c
> +++ b/libavutil/pixdesc.c
> @@ -2125,6 +2125,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
> .name = "cuda",
> .flags = AV_PIX_FMT_FLAG_HWACCEL,
> },
> + [AV_PIX_FMT_AMF] = {
> + .name = "amf",
> + .flags = AV_PIX_FMT_FLAG_HWACCEL,
> + },
> [AV_PIX_FMT_AYUV64LE] = {
> .name = "ayuv64le",
> .nb_components = 4,
> diff --git a/libavutil/pixfmt.h b/libavutil/pixfmt.h
> index 9c87571f49..06459be62c 100644
> --- a/libavutil/pixfmt.h
> +++ b/libavutil/pixfmt.h
> @@ -251,6 +251,10 @@ enum AVPixelFormat {
> * exactly as for system memory frames.
> */
> AV_PIX_FMT_CUDA,
> + /**
> + * HW acceleration through AMF. data[3] contain AMFSurface pointer
> + */
> + AV_PIX_FMT_AMF,
IMO naming pixel formats to be identical to the API hosting them was a mistake. It's an AMFSurface, call the format AV_PIX_FMT_AMF_SURFACE.
Also no reason to copy old formats by putting your pointer in data[3], put it in data[0].
Thanks,
- Mark
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 18+ messages in thread