Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
* [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf.
@ 2024-05-30 13:08 Dmitrii Ovchinnikov
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 02/10, v3] avcodec: add amfdec Dmitrii Ovchinnikov
                   ` (11 more replies)
  0 siblings, 12 replies; 17+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-05-30 13:08 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Dmitrii Ovchinnikov

Adds hwcontext_amf, which allows to use shared AMF
context for the encoder, decoder and AMF-based filters,
without copy to the host memory.
It will also allow you to use some optimisations in
the interaction of components (for example, SAV) and make a more
manageable and optimal setup for using GPU devices with AMF
in the case of a fully AMF pipeline.
It will be a significant performance uplift when full AMF pipeline
with filters is used.

We also plan to add Compression artefact removal filter in near feature.
v2: cleanup header files
v3: an unnecessary class has been removed.
---
 libavutil/Makefile                 |   4 +
 libavutil/hwcontext.c              |   4 +
 libavutil/hwcontext.h              |   1 +
 libavutil/hwcontext_amf.c          | 585 +++++++++++++++++++++++++++++
 libavutil/hwcontext_amf.h          |  64 ++++
 libavutil/hwcontext_amf_internal.h |  44 +++
 libavutil/hwcontext_internal.h     |   1 +
 libavutil/pixdesc.c                |   4 +
 libavutil/pixfmt.h                 |   5 +
 9 files changed, 712 insertions(+)
 create mode 100644 libavutil/hwcontext_amf.c
 create mode 100644 libavutil/hwcontext_amf.h
 create mode 100644 libavutil/hwcontext_amf_internal.h

diff --git a/libavutil/Makefile b/libavutil/Makefile
index 6e6fa8d800..13c318560d 100644
--- a/libavutil/Makefile
+++ b/libavutil/Makefile
@@ -45,6 +45,7 @@ HEADERS = adler32.h                                                     \
           hwcontext_d3d12va.h                                           \
           hwcontext_drm.h                                               \
           hwcontext_dxva2.h                                             \
+          hwcontext_amf.h                                               \
           hwcontext_qsv.h                                               \
           hwcontext_mediacodec.h                                        \
           hwcontext_opencl.h                                            \
@@ -196,6 +197,7 @@ OBJS-$(CONFIG_CUDA)                     += hwcontext_cuda.o
 OBJS-$(CONFIG_D3D11VA)                  += hwcontext_d3d11va.o
 OBJS-$(CONFIG_D3D12VA)                  += hwcontext_d3d12va.o
 OBJS-$(CONFIG_DXVA2)                    += hwcontext_dxva2.o
+OBJS-$(CONFIG_AMF)                      += hwcontext_amf.o
 OBJS-$(CONFIG_LIBDRM)                   += hwcontext_drm.o
 OBJS-$(CONFIG_MACOS_KPERF)              += macos_kperf.o
 OBJS-$(CONFIG_MEDIACODEC)               += hwcontext_mediacodec.o
@@ -220,6 +222,8 @@ SKIPHEADERS-$(CONFIG_CUDA)             += hwcontext_cuda_internal.h     \
 SKIPHEADERS-$(CONFIG_D3D11VA)          += hwcontext_d3d11va.h
 SKIPHEADERS-$(CONFIG_D3D12VA)          += hwcontext_d3d12va.h
 SKIPHEADERS-$(CONFIG_DXVA2)            += hwcontext_dxva2.h
+SKIPHEADERS-$(CONFIG_AMF)              += hwcontext_amf.h               \
+                                          hwcontext_amf_internal
 SKIPHEADERS-$(CONFIG_QSV)              += hwcontext_qsv.h
 SKIPHEADERS-$(CONFIG_OPENCL)           += hwcontext_opencl.h
 SKIPHEADERS-$(CONFIG_VAAPI)            += hwcontext_vaapi.h
diff --git a/libavutil/hwcontext.c b/libavutil/hwcontext.c
index fa99a0d8a4..f06d49c45c 100644
--- a/libavutil/hwcontext.c
+++ b/libavutil/hwcontext.c
@@ -65,6 +65,9 @@ static const HWContextType * const hw_table[] = {
 #endif
 #if CONFIG_VULKAN
     &ff_hwcontext_type_vulkan,
+#endif
+#if CONFIG_AMF
+    &ff_hwcontext_type_amf,
 #endif
     NULL,
 };
@@ -82,6 +85,7 @@ static const char *const hw_type_names[] = {
     [AV_HWDEVICE_TYPE_VIDEOTOOLBOX] = "videotoolbox",
     [AV_HWDEVICE_TYPE_MEDIACODEC] = "mediacodec",
     [AV_HWDEVICE_TYPE_VULKAN] = "vulkan",
+    [AV_HWDEVICE_TYPE_AMF] = "amf",
 };
 
 typedef struct FFHWDeviceContext {
diff --git a/libavutil/hwcontext.h b/libavutil/hwcontext.h
index bac30debae..96042ba197 100644
--- a/libavutil/hwcontext.h
+++ b/libavutil/hwcontext.h
@@ -38,6 +38,7 @@ enum AVHWDeviceType {
     AV_HWDEVICE_TYPE_MEDIACODEC,
     AV_HWDEVICE_TYPE_VULKAN,
     AV_HWDEVICE_TYPE_D3D12VA,
+    AV_HWDEVICE_TYPE_AMF,
 };
 
 /**
diff --git a/libavutil/hwcontext_amf.c b/libavutil/hwcontext_amf.c
new file mode 100644
index 0000000000..1c589669e1
--- /dev/null
+++ b/libavutil/hwcontext_amf.c
@@ -0,0 +1,585 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "buffer.h"
+#include "common.h"
+#include "hwcontext.h"
+#include "hwcontext_amf.h"
+#include "hwcontext_internal.h"
+#include "hwcontext_amf_internal.h"
+#if CONFIG_VULKAN
+#include "hwcontext_vulkan.h"
+#endif
+#if CONFIG_D3D11VA
+#include "libavutil/hwcontext_d3d11va.h"
+#endif
+#if CONFIG_DXVA2
+#define COBJMACROS
+#include "libavutil/hwcontext_dxva2.h"
+#endif
+#include "mem.h"
+#include "pixdesc.h"
+#include "pixfmt.h"
+#include "imgutils.h"
+#include "libavutil/avassert.h"
+#include <AMF/core/Surface.h>
+#include <AMF/core/Trace.h>
+#ifdef _WIN32
+#include "compat/w32dlfcn.h"
+#else
+#include <dlfcn.h>
+#endif
+#define FFMPEG_AMF_WRITER_ID L"ffmpeg_amf"
+int av_amf_load_library(AVAMFDeviceContext* amf_ctx,  void* avcl);
+
+typedef struct AMFFramesContext {
+    AMFSurface * surfaces;
+    int            nb_surfaces;
+} AMFFramesContext;
+
+typedef struct AmfTraceWriter {
+    AMFTraceWriterVtbl  *vtbl;
+    void                *avctx;
+} AmfTraceWriter;
+
+static void AMF_CDECL_CALL AMFTraceWriter_Write(AMFTraceWriter *pThis,
+    const wchar_t *scope, const wchar_t *message)
+{
+    AmfTraceWriter *tracer = (AmfTraceWriter*)pThis;
+    av_log(tracer->avctx, AV_LOG_DEBUG, "%ls: %ls", scope, message); // \n is provided from AMF
+}
+
+static void AMF_CDECL_CALL AMFTraceWriter_Flush(AMFTraceWriter *pThis)
+{
+}
+
+AmfTraceWriter * amf_writer_alloc(void  *avctx)
+{
+    AmfTraceWriter * writer = av_mallocz(sizeof(AmfTraceWriter));
+    if (!writer)
+        return NULL;
+    writer->vtbl = av_mallocz(sizeof(AmfTraceWriter));
+    if (writer->vtbl) {
+        av_freep(&writer);
+        return NULL;
+    }
+    writer->vtbl->Write = AMFTraceWriter_Write;
+    writer->vtbl->Flush = AMFTraceWriter_Flush;
+    writer->avctx = avctx;
+    return writer;
+}
+
+void amf_writer_free(void  *opaque)
+{
+    AmfTraceWriter *writer = (AmfTraceWriter *)opaque;
+    av_freep(&writer->vtbl);
+    av_freep(&writer);
+}
+
+
+typedef struct AVAMFFormatMap {
+    enum AVPixelFormat       av_format;
+    enum AMF_SURFACE_FORMAT  amf_format;
+} FormatMap;
+
+const FormatMap format_map[] =
+{
+    { AV_PIX_FMT_NONE,          AMF_SURFACE_UNKNOWN },
+    { AV_PIX_FMT_NV12,          AMF_SURFACE_NV12 },
+    { AV_PIX_FMT_BGR0,          AMF_SURFACE_BGRA },
+    { AV_PIX_FMT_RGB0,          AMF_SURFACE_RGBA },
+    { AV_PIX_FMT_BGRA,          AMF_SURFACE_BGRA },
+    { AV_PIX_FMT_ARGB,          AMF_SURFACE_ARGB },
+    { AV_PIX_FMT_RGBA,          AMF_SURFACE_RGBA },
+    { AV_PIX_FMT_GRAY8,         AMF_SURFACE_GRAY8 },
+    { AV_PIX_FMT_YUV420P,       AMF_SURFACE_YUV420P },
+    { AV_PIX_FMT_YUYV422,       AMF_SURFACE_YUY2 },
+    { AV_PIX_FMT_P010,          AMF_SURFACE_P010 },
+};
+
+enum AMF_SURFACE_FORMAT av_amf_av_to_amf_format(enum AVPixelFormat fmt)
+{
+    int i;
+    for (i = 0; i < amf_countof(format_map); i++) {
+        if (format_map[i].av_format == fmt) {
+            return format_map[i].amf_format;
+        }
+    }
+    return AMF_SURFACE_UNKNOWN;
+}
+
+enum AVPixelFormat av_amf_to_av_format(enum AMF_SURFACE_FORMAT fmt)
+{
+    int i;
+    for (i = 0; i < amf_countof(format_map); i++) {
+        if (format_map[i].amf_format == fmt) {
+            return format_map[i].av_format;
+        }
+    }
+    return AMF_SURFACE_UNKNOWN;
+}
+
+static const enum AVPixelFormat supported_formats[] = {
+    AV_PIX_FMT_NV12,
+    AV_PIX_FMT_YUV420P,
+    AV_PIX_FMT_BGRA,
+    AV_PIX_FMT_P010,
+    AV_PIX_FMT_YUV420P10,
+#if CONFIG_D3D11VA
+    AV_PIX_FMT_D3D11,
+#endif
+#if CONFIG_DXVA2
+    AV_PIX_FMT_DXVA2_VLD,
+#endif
+    AV_PIX_FMT_AMF_SURFACE
+};
+
+static int amf_frames_get_constraints(AVHWDeviceContext *ctx,
+                                       const void *hwconfig,
+                                       AVHWFramesConstraints *constraints)
+{
+    int i;
+
+    constraints->valid_sw_formats = av_malloc_array(FF_ARRAY_ELEMS(supported_formats) + 1,
+                                                    sizeof(*constraints->valid_sw_formats));
+    if (!constraints->valid_sw_formats)
+        return AVERROR(ENOMEM);
+
+    for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
+        constraints->valid_sw_formats[i] = supported_formats[i];
+    constraints->valid_sw_formats[FF_ARRAY_ELEMS(supported_formats)] = AV_PIX_FMT_NONE;
+
+    constraints->valid_hw_formats = av_malloc_array(2, sizeof(*constraints->valid_hw_formats));
+    if (!constraints->valid_hw_formats)
+        return AVERROR(ENOMEM);
+
+    constraints->valid_hw_formats[0] = AV_PIX_FMT_AMF_SURFACE;
+    constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE;
+
+    return 0;
+}
+
+static void amf_dummy_free(void *opaque, uint8_t *data)
+{
+
+}
+
+static AVBufferRef *amf_pool_alloc(void *opaque, size_t size)
+{
+    AVHWFramesContext *hwfc = (AVHWFramesContext *)opaque;
+    AVBufferRef *buf;
+
+    buf = av_buffer_create(NULL, NULL, amf_dummy_free, hwfc, AV_BUFFER_FLAG_READONLY);
+    if (!buf) {
+        av_log(hwfc, AV_LOG_ERROR, "Failed to create buffer for AMF context.\n");
+        return NULL;
+    }
+    return buf;
+}
+
+static int amf_frames_init(AVHWFramesContext *ctx)
+{
+    int i;
+
+    for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) {
+        if (ctx->sw_format == supported_formats[i])
+            break;
+    }
+    if (i == FF_ARRAY_ELEMS(supported_formats)) {
+        av_log(ctx, AV_LOG_ERROR, "Pixel format '%s' is not supported\n",
+               av_get_pix_fmt_name(ctx->sw_format));
+        return AVERROR(ENOSYS);
+    }
+
+    ffhwframesctx(ctx)->pool_internal =
+            av_buffer_pool_init2(sizeof(AMFSurface), ctx,
+                                 &amf_pool_alloc, NULL);
+
+    return 0;
+}
+
+int amf_context_create(  AVAMFDeviceContext * amf_ctx,
+                                void* avcl,
+                                const char *device,
+                                AVDictionary *opts, int flags)
+{
+    AMF_RESULT         res;
+
+    amf_ctx->trace->pVtbl->EnableWriter(amf_ctx->trace, AMF_TRACE_WRITER_CONSOLE, 0);
+    amf_ctx->trace->pVtbl->SetGlobalLevel(amf_ctx->trace, AMF_TRACE_TRACE);
+
+     // connect AMF logger to av_log
+    amf_ctx->trace_writer = amf_writer_alloc(avcl);
+    amf_ctx->trace->pVtbl->RegisterWriter(amf_ctx->trace, FFMPEG_AMF_WRITER_ID, (AMFTraceWriter*)amf_ctx->trace_writer, 1);
+    amf_ctx->trace->pVtbl->SetWriterLevel(amf_ctx->trace, FFMPEG_AMF_WRITER_ID, AMF_TRACE_TRACE);
+
+    res = amf_ctx->factory->pVtbl->CreateContext(amf_ctx->factory, &amf_ctx->context);
+    AMF_RETURN_IF_FALSE(avcl, res == AMF_OK, AVERROR_UNKNOWN, "CreateContext() failed with error %d\n", res);
+
+    return 0;
+}
+
+static int amf_get_buffer(AVHWFramesContext *ctx, AVFrame *frame)
+{
+    frame->buf[0] = av_buffer_pool_get(ctx->pool);
+    if (!frame->buf[0])
+        return AVERROR(ENOMEM);
+
+    frame->data[0] = frame->buf[0]->data;
+    frame->format  = AV_PIX_FMT_AMF_SURFACE;
+    frame->width   = ctx->width;
+    frame->height  = ctx->height;
+    return 0;
+}
+
+static int amf_transfer_get_formats(AVHWFramesContext *ctx,
+                                     enum AVHWFrameTransferDirection dir,
+                                     enum AVPixelFormat **formats)
+{
+    enum AVPixelFormat *fmts;
+
+    fmts = av_malloc_array(2, sizeof(*fmts));
+    if (!fmts)
+        return AVERROR(ENOMEM);
+
+    fmts[0] = ctx->sw_format;
+    fmts[1] = AV_PIX_FMT_NONE;
+
+    *formats = fmts;
+
+    return 0;
+}
+
+int av_amf_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
+                                 const AVFrame *src)
+{
+    AMFSurface* surface = (AMFSurface*)dst->data[0];
+    AMFPlane *plane;
+    uint8_t  *dst_data[4];
+    int       dst_linesize[4];
+    int       planes;
+    int       i;
+    int w = FFMIN(dst->width,  src->width);
+    int h = FFMIN(dst->height, src->height);
+
+    planes = (int)surface->pVtbl->GetPlanesCount(surface);
+    av_assert0(planes < FF_ARRAY_ELEMS(dst_data));
+
+    for (i = 0; i < planes; i++) {
+        plane = surface->pVtbl->GetPlaneAt(surface, i);
+        dst_data[i] = plane->pVtbl->GetNative(plane);
+        dst_linesize[i] = plane->pVtbl->GetHPitch(plane);
+    }
+    av_image_copy(dst_data, dst_linesize,
+        (const uint8_t**)src->data, src->linesize, src->format,
+        w, h);
+
+    return 0;
+}
+int av_amf_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
+                                    const AVFrame *src)
+{
+    AMFSurface* surface = (AMFSurface*)src->data[0];
+    AMFPlane *plane;
+    uint8_t  *src_data[4];
+    int       src_linesize[4];
+    int       planes;
+    int       i;
+    int w = FFMIN(dst->width,  src->width);
+    int h = FFMIN(dst->height, src->height);
+    int ret;
+
+    ret = surface->pVtbl->Convert(surface, AMF_MEMORY_HOST);
+    AMF_RETURN_IF_FALSE(ctx, ret == AMF_OK, AVERROR_UNKNOWN, "Convert(amf::AMF_MEMORY_HOST) failed with error %d\n", AVERROR_UNKNOWN);
+
+    planes = (int)surface->pVtbl->GetPlanesCount(surface);
+    av_assert0(planes < FF_ARRAY_ELEMS(src_data));
+
+    for (i = 0; i < planes; i++) {
+        plane = surface->pVtbl->GetPlaneAt(surface, i);
+        src_data[i] = plane->pVtbl->GetNative(plane);
+        src_linesize[i] = plane->pVtbl->GetHPitch(plane);
+    }
+    av_image_copy(dst->data, dst->linesize,
+                  (const uint8_t **)src_data, src_linesize, dst->format,
+                  w, h);
+    surface->pVtbl->Release(surface);
+    return 0;
+}
+
+
+static void amf_device_uninit(AVHWDeviceContext *device_ctx)
+{
+    AVAMFDeviceContext      *amf_ctx = device_ctx->hwctx;
+    av_amf_context_free(0, (uint8_t *)amf_ctx);
+}
+
+static int amf_device_init(AVHWDeviceContext *ctx)
+{
+    AVAMFDeviceContext *amf_ctx = ctx->hwctx;
+    return av_amf_context_init(amf_ctx, ctx);
+}
+
+static int amf_device_create(AVHWDeviceContext *device_ctx,
+                              const char *device,
+                              AVDictionary *opts, int flags)
+{
+    AVAMFDeviceContext        *ctx = device_ctx->hwctx;
+    int ret;
+    if ((ret = av_amf_load_library(ctx, device_ctx)) == 0) {
+        if ((ret = amf_context_create(ctx, device_ctx, "", opts, flags)) == 0){
+            return 0;
+        }
+    }
+    amf_device_uninit(device_ctx);
+    return ret;
+}
+
+static int amf_device_derive(AVHWDeviceContext *device_ctx,
+                              AVHWDeviceContext *child_device_ctx, AVDictionary *opts,
+                              int flags)
+{
+    AVAMFDeviceContext        *ctx = device_ctx->hwctx;
+    int ret;
+
+    ret = amf_device_create(device_ctx, "", opts, flags);
+    if(ret < 0)
+        return ret;
+
+    return av_amf_context_derive(ctx, child_device_ctx, opts, flags);
+}
+
+#if CONFIG_DXVA2
+static int amf_init_from_dxva2_device(AVAMFDeviceContext * amf_ctx, AVDXVA2DeviceContext *hwctx)
+{
+    IDirect3DDevice9    *device;
+    HANDLE              device_handle;
+    HRESULT             hr;
+    AMF_RESULT          res;
+    int ret;
+
+    hr = IDirect3DDeviceManager9_OpenDeviceHandle(hwctx->devmgr, &device_handle);
+    if (FAILED(hr)) {
+        av_log(hwctx, AV_LOG_ERROR, "Failed to open device handle for Direct3D9 device: %lx.\n", (unsigned long)hr);
+        return AVERROR_EXTERNAL;
+    }
+
+    hr = IDirect3DDeviceManager9_LockDevice(hwctx->devmgr, device_handle, &device, FALSE);
+    if (SUCCEEDED(hr)) {
+        IDirect3DDeviceManager9_UnlockDevice(hwctx->devmgr, device_handle, FALSE);
+        ret = 0;
+    } else {
+        av_log(hwctx, AV_LOG_ERROR, "Failed to lock device handle for Direct3D9 device: %lx.\n", (unsigned long)hr);
+        ret = AVERROR_EXTERNAL;
+    }
+
+
+    IDirect3DDeviceManager9_CloseDeviceHandle(hwctx->devmgr, device_handle);
+
+    if (ret < 0)
+        return ret;
+
+    res = amf_ctx->context->pVtbl->InitDX9(amf_ctx->context, device);
+
+    IDirect3DDevice9_Release(device);
+
+    if (res != AMF_OK) {
+        if (res == AMF_NOT_SUPPORTED)
+            av_log(hwctx, AV_LOG_ERROR, "AMF via D3D9 is not supported on the given device.\n");
+        else
+            av_log(hwctx, AV_LOG_ERROR, "AMF failed to initialise on given D3D9 device: %d.\n", res);
+        return AVERROR(ENODEV);
+    }
+    amf_ctx->mem_type = AMF_MEMORY_DX9;
+    return 0;
+}
+#endif
+
+#if CONFIG_D3D11VA
+static int amf_init_from_d3d11_device(AVAMFDeviceContext* amf_ctx, AVD3D11VADeviceContext *hwctx)
+{
+    AMF_RESULT res;
+    res = amf_ctx->context->pVtbl->InitDX11(amf_ctx->context, hwctx->device, AMF_DX11_1);
+    if (res != AMF_OK) {
+        if (res == AMF_NOT_SUPPORTED)
+            av_log(hwctx, AV_LOG_ERROR, "AMF via D3D11 is not supported on the given device.\n");
+        else
+            av_log(hwctx, AV_LOG_ERROR, "AMF failed to initialise on the given D3D11 device: %d.\n", res);
+        return AVERROR(ENODEV);
+    }
+    amf_ctx->mem_type = AMF_MEMORY_DX11;
+    return 0;
+}
+#endif
+
+int av_amf_context_init(AVAMFDeviceContext* amf_ctx, void* avcl)
+{
+     AMFContext1 *context1 = NULL;
+     AMF_RESULT res;
+
+    res = amf_ctx->context->pVtbl->InitDX11(amf_ctx->context, NULL, AMF_DX11_1);
+    if (res == AMF_OK) {
+        amf_ctx->mem_type = AMF_MEMORY_DX11;
+        av_log(avcl, AV_LOG_VERBOSE, "AMF initialisation succeeded via D3D11.\n");
+    } else {
+        res = amf_ctx->context->pVtbl->InitDX9(amf_ctx->context, NULL);
+        if (res == AMF_OK) {
+            amf_ctx->mem_type = AMF_MEMORY_DX9;
+            av_log(avcl, AV_LOG_VERBOSE, "AMF initialisation succeeded via D3D9.\n");
+        } else {
+            AMFGuid guid = IID_AMFContext1();
+            res = amf_ctx->context->pVtbl->QueryInterface(amf_ctx->context, &guid, (void**)&context1);
+            AMF_RETURN_IF_FALSE(avcl, res == AMF_OK, AVERROR_UNKNOWN, "CreateContext1() failed with error %d\n", res);
+
+            res = context1->pVtbl->InitVulkan(context1, NULL);
+            context1->pVtbl->Release(context1);
+            if (res != AMF_OK) {
+                if (res == AMF_NOT_SUPPORTED)
+                    av_log(avcl, AV_LOG_ERROR, "AMF via Vulkan is not supported on the given device.\n");
+                 else
+                    av_log(avcl, AV_LOG_ERROR, "AMF failed to initialise on the given Vulkan device: %d.\n", res);
+                 return AVERROR(ENOSYS);
+            }
+            amf_ctx->mem_type = AMF_MEMORY_VULKAN;
+            av_log(avcl, AV_LOG_VERBOSE, "AMF initialisation succeeded via Vulkan.\n");
+         }
+     }
+     return 0;
+}
+int av_amf_load_library(AVAMFDeviceContext* amf_ctx,  void* avcl)
+{
+    AMFInit_Fn         init_fun;
+    AMFQueryVersion_Fn version_fun;
+    AMF_RESULT         res;
+
+    amf_ctx->library = dlopen(AMF_DLL_NAMEA, RTLD_NOW | RTLD_LOCAL);
+    AMF_RETURN_IF_FALSE(avcl, amf_ctx->library != NULL,
+        AVERROR_UNKNOWN, "DLL %s failed to open\n", AMF_DLL_NAMEA);
+
+    init_fun = (AMFInit_Fn)dlsym(amf_ctx->library, AMF_INIT_FUNCTION_NAME);
+    AMF_RETURN_IF_FALSE(avcl, init_fun != NULL, AVERROR_UNKNOWN, "DLL %s failed to find function %s\n", AMF_DLL_NAMEA, AMF_INIT_FUNCTION_NAME);
+
+    version_fun = (AMFQueryVersion_Fn)dlsym(amf_ctx->library, AMF_QUERY_VERSION_FUNCTION_NAME);
+    AMF_RETURN_IF_FALSE(avcl, version_fun != NULL, AVERROR_UNKNOWN, "DLL %s failed to find function %s\n", AMF_DLL_NAMEA, AMF_QUERY_VERSION_FUNCTION_NAME);
+
+    res = version_fun(&amf_ctx->version);
+    AMF_RETURN_IF_FALSE(avcl, res == AMF_OK, AVERROR_UNKNOWN, "%s failed with error %d\n", AMF_QUERY_VERSION_FUNCTION_NAME, res);
+    res = init_fun(AMF_FULL_VERSION, &amf_ctx->factory);
+    AMF_RETURN_IF_FALSE(avcl, res == AMF_OK, AVERROR_UNKNOWN, "%s failed with error %d\n", AMF_INIT_FUNCTION_NAME, res);
+    res = amf_ctx->factory->pVtbl->GetTrace(amf_ctx->factory, &amf_ctx->trace);
+    AMF_RETURN_IF_FALSE(avcl, res == AMF_OK, AVERROR_UNKNOWN, "GetTrace() failed with error %d\n", res);
+    res = amf_ctx->factory->pVtbl->GetDebug(amf_ctx->factory, &amf_ctx->debug);
+    AMF_RETURN_IF_FALSE(avcl, res == AMF_OK, AVERROR_UNKNOWN, "GetDebug() failed with error %d\n", res);
+    return 0;
+}
+
+int av_amf_context_derive(AVAMFDeviceContext * amf_ctx,
+                               AVHWDeviceContext *child_device_ctx, AVDictionary *opts,
+                               int flags)
+{
+
+    switch (child_device_ctx->type) {
+
+#if CONFIG_DXVA2
+    case AV_HWDEVICE_TYPE_DXVA2:
+        {
+            AVDXVA2DeviceContext *child_device_hwctx = child_device_ctx->hwctx;
+            return amf_init_from_dxva2_device(amf_ctx, child_device_hwctx);
+        }
+        break;
+#endif
+
+#if CONFIG_D3D11VA
+    case AV_HWDEVICE_TYPE_D3D11VA:
+        {
+            AVD3D11VADeviceContext *child_device_hwctx = child_device_ctx->hwctx;
+            return amf_init_from_d3d11_device(amf_ctx, child_device_hwctx);
+        }
+        break;
+#endif
+    default:
+        {
+            av_log(child_device_ctx, AV_LOG_ERROR, "AMF initialisation from a %s device is not supported.\n",
+                av_hwdevice_get_type_name(child_device_ctx->type));
+            return AVERROR(ENOSYS);
+        }
+    }
+    return 0;
+}
+
+int av_amf_context_create(AVAMFDeviceContext * context,
+                            void* avcl,
+                            const char *device,
+                            AVDictionary *opts, int flags)
+{
+    int ret;
+    if ((ret = av_amf_load_library(context, avcl)) == 0) {
+        if ((ret = amf_context_create(context, avcl, "", opts, flags)) == 0){
+            return 0;
+        }
+    }
+    av_amf_context_free(0, (uint8_t *)context);
+    return ret;
+}
+
+void av_amf_context_free(void *opaque, uint8_t *data)
+{
+    AVAMFDeviceContext *amf_ctx = (AVAMFDeviceContext *)data;
+    if (amf_ctx->context) {
+        amf_ctx->context->pVtbl->Terminate(amf_ctx->context);
+        amf_ctx->context->pVtbl->Release(amf_ctx->context);
+        amf_ctx->context = NULL;
+    }
+
+    if (amf_ctx->trace) {
+        amf_ctx->trace->pVtbl->UnregisterWriter(amf_ctx->trace, FFMPEG_AMF_WRITER_ID);
+    }
+
+    if(amf_ctx->library) {
+        dlclose(amf_ctx->library);
+        amf_ctx->library = NULL;
+    }
+    if (amf_ctx->trace_writer) {
+        amf_writer_free(amf_ctx->trace_writer);
+    }
+
+    amf_ctx->debug = NULL;
+    amf_ctx->version = 0;
+}
+
+
+const HWContextType ff_hwcontext_type_amf = {
+    .type                 = AV_HWDEVICE_TYPE_AMF,
+    .name                 = "AMF",
+
+    .device_hwctx_size    = sizeof(AVAMFDeviceContext),
+    .frames_hwctx_size    = sizeof(AMFFramesContext),
+
+    .device_create        = amf_device_create,
+    .device_derive        = amf_device_derive,
+    .device_init          = amf_device_init,
+    .device_uninit        = amf_device_uninit,
+    .frames_get_constraints = amf_frames_get_constraints,
+    .frames_init          = amf_frames_init,
+    .frames_get_buffer    = amf_get_buffer,
+    .transfer_get_formats = amf_transfer_get_formats,
+    .transfer_data_to     = av_amf_transfer_data_to,
+    .transfer_data_from   = av_amf_transfer_data_from,
+
+    .pix_fmts             = (const enum AVPixelFormat[]){ AV_PIX_FMT_AMF_SURFACE, AV_PIX_FMT_NONE },
+};
diff --git a/libavutil/hwcontext_amf.h b/libavutil/hwcontext_amf.h
new file mode 100644
index 0000000000..ef2118dd4e
--- /dev/null
+++ b/libavutil/hwcontext_amf.h
@@ -0,0 +1,64 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#ifndef AVUTIL_HWCONTEXT_AMF_H
+#define AVUTIL_HWCONTEXT_AMF_H
+
+#include "pixfmt.h"
+#include "hwcontext.h"
+#include <AMF/core/Factory.h>
+#include <AMF/core/Context.h>
+#include <AMF/core/Trace.h>
+#include <AMF/core/Debug.h>
+
+/**
+ * This struct is allocated as AVHWDeviceContext.hwctx
+ */
+typedef struct AVAMFDeviceContext {
+    HMODULE            library;
+    AMFFactory         *factory;
+    AMFDebug           *debug;
+    AMFTrace           *trace;
+    void               *trace_writer;
+
+    int64_t            version; ///< version of AMF runtime
+    AMFContext         *context;
+    int                mem_type;
+} AVAMFDeviceContext;
+
+enum AMF_SURFACE_FORMAT av_amf_av_to_amf_format(enum AVPixelFormat fmt);
+enum AVPixelFormat av_amf_to_av_format(enum AMF_SURFACE_FORMAT fmt);
+
+int av_amf_context_create(AVAMFDeviceContext * context,
+                          void* avcl,
+                          const char *device,
+                          AVDictionary *opts, int flags);
+int av_amf_context_init(AVAMFDeviceContext* internal, void* avcl);
+void av_amf_context_free(void *opaque, uint8_t *data);
+int av_amf_context_derive(AVAMFDeviceContext * internal,
+                          AVHWDeviceContext *child_device_ctx, AVDictionary *opts,
+                          int flags);
+
+int av_amf_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
+                                    const AVFrame *src);
+
+int av_amf_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
+                                 const AVFrame *src);
+
+#endif /* AVUTIL_HWCONTEXT_AMF_H */
diff --git a/libavutil/hwcontext_amf_internal.h b/libavutil/hwcontext_amf_internal.h
new file mode 100644
index 0000000000..b991f357a6
--- /dev/null
+++ b/libavutil/hwcontext_amf_internal.h
@@ -0,0 +1,44 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#ifndef AVUTIL_HWCONTEXT_AMF_INTERNAL_H
+#define AVUTIL_HWCONTEXT_AMF_INTERNAL_H
+#include <AMF/core/Factory.h>
+#include <AMF/core/Context.h>
+
+/**
+* Error handling helper
+*/
+#define AMF_RETURN_IF_FALSE(avctx, exp, ret_value, /*message,*/ ...) \
+    if (!(exp)) { \
+        av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
+        return ret_value; \
+    }
+
+#define AMF_GOTO_FAIL_IF_FALSE(avctx, exp, ret_value, /*message,*/ ...) \
+    if (!(exp)) { \
+        av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
+        ret = ret_value; \
+        goto fail; \
+    }
+
+#define AMF_TIME_BASE_Q          (AVRational){1, AMF_SECOND}
+
+
+#endif /* AVUTIL_HWCONTEXT_AMF_INTERNAL_H */
\ No newline at end of file
diff --git a/libavutil/hwcontext_internal.h b/libavutil/hwcontext_internal.h
index e32b786238..db23579c9e 100644
--- a/libavutil/hwcontext_internal.h
+++ b/libavutil/hwcontext_internal.h
@@ -163,5 +163,6 @@ extern const HWContextType ff_hwcontext_type_vdpau;
 extern const HWContextType ff_hwcontext_type_videotoolbox;
 extern const HWContextType ff_hwcontext_type_mediacodec;
 extern const HWContextType ff_hwcontext_type_vulkan;
+extern const HWContextType ff_hwcontext_type_amf;
 
 #endif /* AVUTIL_HWCONTEXT_INTERNAL_H */
diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c
index 1c0bcf2232..5438c97a8f 100644
--- a/libavutil/pixdesc.c
+++ b/libavutil/pixdesc.c
@@ -2119,6 +2119,10 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
         .name = "cuda",
         .flags = AV_PIX_FMT_FLAG_HWACCEL,
     },
+    [AV_PIX_FMT_AMF_SURFACE] = {
+        .name = "amf",
+        .flags = AV_PIX_FMT_FLAG_HWACCEL,
+    },
     [AV_PIX_FMT_AYUV64LE] = {
         .name = "ayuv64le",
         .nb_components = 4,
diff --git a/libavutil/pixfmt.h b/libavutil/pixfmt.h
index a7f50e1690..29f67e707f 100644
--- a/libavutil/pixfmt.h
+++ b/libavutil/pixfmt.h
@@ -439,6 +439,11 @@ enum AVPixelFormat {
      */
     AV_PIX_FMT_D3D12,
 
+    /**
+     * HW acceleration through AMF. data[0] contain AMFSurface pointer
+     */
+    AV_PIX_FMT_AMF_SURFACE,
+
     AV_PIX_FMT_NB         ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
 };
 
-- 
2.39.3 (Apple Git-146)

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [FFmpeg-devel] [PATCH 02/10, v3] avcodec: add amfdec.
  2024-05-30 13:08 [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Dmitrii Ovchinnikov
@ 2024-05-30 13:08 ` Dmitrii Ovchinnikov
  2024-06-04 19:25   ` Mark Thompson
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 03/10, v3] avcodec/amfenc: Fixes the color information in the output Dmitrii Ovchinnikov
                   ` (10 subsequent siblings)
  11 siblings, 1 reply; 17+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-05-30 13:08 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Evgeny Pavlov, Dmitrii Ovchinnikov

From: Evgeny Pavlov <lucenticus@gmail.com>

Added AMF based h264, hevc, av1 decoders.
Co-authored-by: Dmitrii Ovchinnikov <ovchinnikov.dmitrii@gmail.com>
v2: added encoder reinitialisation
v3: use AMF_SURFACE_UNKNOWN to int decoder(ctx->output_format before)
---
 libavcodec/Makefile    |   7 +-
 libavcodec/allcodecs.c |   3 +
 libavcodec/amfdec.c    | 696 +++++++++++++++++++++++++++++++++++++++++
 libavcodec/amfdec.h    |  63 ++++
 4 files changed, 767 insertions(+), 2 deletions(-)
 create mode 100644 libavcodec/amfdec.c
 create mode 100644 libavcodec/amfdec.h

diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 2443d2c6fd..69918903ff 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -70,7 +70,7 @@ include $(SRC_PATH)/libavcodec/vvc/Makefile
 OBJS-$(CONFIG_AANDCTTABLES)            += aandcttab.o
 OBJS-$(CONFIG_AC3DSP)                  += ac3dsp.o ac3.o ac3tab.o
 OBJS-$(CONFIG_ADTS_HEADER)             += adts_header.o mpeg4audio_sample_rates.o
-OBJS-$(CONFIG_AMF)                     += amfenc.o
+OBJS-$(CONFIG_AMF)                     += amfenc.o amfdec.o
 OBJS-$(CONFIG_AUDIO_FRAME_QUEUE)       += audio_frame_queue.o
 OBJS-$(CONFIG_ATSC_A53)                += atsc_a53.o
 OBJS-$(CONFIG_AUDIODSP)                += audiodsp.o
@@ -167,6 +167,7 @@ OBJS-$(CONFIG_TEXTUREDSPENC)           += texturedspenc.o
 OBJS-$(CONFIG_TPELDSP)                 += tpeldsp.o
 OBJS-$(CONFIG_VAAPI_ENCODE)            += vaapi_encode.o
 OBJS-$(CONFIG_AV1_AMF_ENCODER)         += amfenc_av1.o
+OBJS-$(CONFIG_AV1_AMF_DECODER)         += amfdec.o
 OBJS-$(CONFIG_VC1DSP)                  += vc1dsp.o
 OBJS-$(CONFIG_VIDEODSP)                += videodsp.o
 OBJS-$(CONFIG_VP3DSP)                  += vp3dsp.o
@@ -409,6 +410,7 @@ OBJS-$(CONFIG_H264_DECODER)            += h264dec.o h264_cabac.o h264_cavlc.o \
                                           h264_refs.o \
                                           h264_slice.o h264data.o h274.o
 OBJS-$(CONFIG_H264_AMF_ENCODER)        += amfenc_h264.o
+OBJS-$(CONFIG_H264_AMF_DECODER)        += amfdec.o
 OBJS-$(CONFIG_H264_CUVID_DECODER)      += cuviddec.o
 OBJS-$(CONFIG_H264_MEDIACODEC_DECODER) += mediacodecdec.o
 OBJS-$(CONFIG_H264_MEDIACODEC_ENCODER) += mediacodecenc.o
@@ -435,6 +437,7 @@ OBJS-$(CONFIG_HEVC_DECODER)            += hevcdec.o hevc_mvs.o \
                                           hevcdsp.o hevc_filter.o hevc_data.o \
                                           h274.o aom_film_grain.o
 OBJS-$(CONFIG_HEVC_AMF_ENCODER)        += amfenc_hevc.o
+OBJS-$(CONFIG_HEVC_AMF_DECODER)        += amfdec.o
 OBJS-$(CONFIG_HEVC_CUVID_DECODER)      += cuviddec.o
 OBJS-$(CONFIG_HEVC_MEDIACODEC_DECODER) += mediacodecdec.o
 OBJS-$(CONFIG_HEVC_MEDIACODEC_ENCODER) += mediacodecenc.o
@@ -1263,7 +1266,7 @@ SKIPHEADERS                            += %_tablegen.h                  \
                                           bitstream_template.h          \
                                           $(ARCH)/vpx_arith.h           \
 
-SKIPHEADERS-$(CONFIG_AMF)              += amfenc.h
+SKIPHEADERS-$(CONFIG_AMF)              += amfenc.h amfdec.h
 SKIPHEADERS-$(CONFIG_D3D11VA)          += d3d11va.h dxva2_internal.h
 SKIPHEADERS-$(CONFIG_D3D12VA)          += d3d12va_decode.h
 SKIPHEADERS-$(CONFIG_DXVA2)            += dxva2.h dxva2_internal.h
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index b102a8069e..d215c9f0d4 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -834,10 +834,12 @@ extern const FFCodec ff_av1_nvenc_encoder;
 extern const FFCodec ff_av1_qsv_decoder;
 extern const FFCodec ff_av1_qsv_encoder;
 extern const FFCodec ff_av1_amf_encoder;
+extern const FFCodec ff_av1_amf_decoder;
 extern const FFCodec ff_av1_vaapi_encoder;
 extern const FFCodec ff_libopenh264_encoder;
 extern const FFCodec ff_libopenh264_decoder;
 extern const FFCodec ff_h264_amf_encoder;
+extern const FFCodec ff_h264_amf_decoder;
 extern const FFCodec ff_h264_cuvid_decoder;
 extern const FFCodec ff_h264_mf_encoder;
 extern const FFCodec ff_h264_nvenc_encoder;
@@ -847,6 +849,7 @@ extern const FFCodec ff_h264_v4l2m2m_encoder;
 extern const FFCodec ff_h264_vaapi_encoder;
 extern const FFCodec ff_h264_videotoolbox_encoder;
 extern const FFCodec ff_hevc_amf_encoder;
+extern const FFCodec ff_hevc_amf_decoder;
 extern const FFCodec ff_hevc_cuvid_decoder;
 extern const FFCodec ff_hevc_mediacodec_decoder;
 extern const FFCodec ff_hevc_mediacodec_encoder;
diff --git a/libavcodec/amfdec.c b/libavcodec/amfdec.c
new file mode 100644
index 0000000000..f365d3084c
--- /dev/null
+++ b/libavcodec/amfdec.c
@@ -0,0 +1,696 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/hwcontext_amf.h"
+#include "libavutil/hwcontext_amf_internal.h"
+#include "amfdec.h"
+#include "codec_internal.h"
+#include "hwconfig.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/time.h"
+#include "decode.h"
+#include "libavutil/mastering_display_metadata.h"
+
+#if CONFIG_D3D11VA
+#include "libavutil/hwcontext_d3d11va.h"
+#endif
+#if CONFIG_DXVA2
+#define COBJMACROS
+#include "libavutil/hwcontext_dxva2.h"
+#endif
+
+#ifdef _WIN32
+#include "compat/w32dlfcn.h"
+#else
+#include <dlfcn.h>
+#endif
+//will be in public headers soon
+#define AMF_VIDEO_DECODER_OUTPUT_FORMAT                L"OutputDecodeFormat"
+
+const enum AVPixelFormat amf_dec_pix_fmts[] = {
+    AV_PIX_FMT_YUV420P,
+    AV_PIX_FMT_NV12,
+    AV_PIX_FMT_BGRA,
+    AV_PIX_FMT_ARGB,
+    AV_PIX_FMT_RGBA,
+    AV_PIX_FMT_GRAY8,
+    AV_PIX_FMT_BGR0,
+    AV_PIX_FMT_YUYV422,
+    AV_PIX_FMT_P010,
+    AV_PIX_FMT_P012,
+    AV_PIX_FMT_YUV420P10,
+    AV_PIX_FMT_YUV420P12,
+    AV_PIX_FMT_YUV420P16,
+#if CONFIG_D3D11VA
+    AV_PIX_FMT_D3D11,
+#endif
+#if CONFIG_DXVA2
+    AV_PIX_FMT_DXVA2_VLD,
+#endif
+    AV_PIX_FMT_AMF_SURFACE,
+    AV_PIX_FMT_NONE
+};
+
+static const AVCodecHWConfigInternal *const amf_hw_configs[] = {
+    &(const AVCodecHWConfigInternal) {
+        .public = {
+            .pix_fmt     = AV_PIX_FMT_AMF_SURFACE,
+            .methods     = AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX |
+                           AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX,
+            .device_type = AV_HWDEVICE_TYPE_AMF,
+        },
+        .hwaccel = NULL,
+    },
+    NULL
+};
+
+static void amf_free_amfsurface(void *opaque, uint8_t *data)
+{
+    AMFSurface *surface = (AMFSurface*)(data);
+    surface->pVtbl->Release(surface);
+}
+
+static int amf_init_decoder(AVCodecContext *avctx)
+{
+    AMFDecoderContext     *ctx = avctx->priv_data;
+    AVAMFDeviceContext * internal = ctx->amf_device_ctx;
+    const wchar_t           *codec_id = NULL;
+    AMF_RESULT              res;
+    AMFBuffer               *buffer;
+    amf_int64               color_profile;
+    int                     pool_size = 36;
+
+    ctx->drain = 0;
+    ctx->resolution_changed = 0;
+
+    switch (avctx->codec->id) {
+        case AV_CODEC_ID_H264:
+            codec_id = AMFVideoDecoderUVD_H264_AVC;
+            break;
+        case AV_CODEC_ID_HEVC: {
+            if (avctx->profile == AV_PROFILE_HEVC_MAIN_10)
+                codec_id = AMFVideoDecoderHW_H265_MAIN10;
+            else
+                codec_id = AMFVideoDecoderHW_H265_HEVC;
+        } break;
+        case AV_CODEC_ID_AV1:
+            if (avctx->profile == AV_PROFILE_AV1_PROFESSIONAL)
+                codec_id = AMFVideoDecoderHW_AV1_12BIT;
+            else
+                codec_id = AMFVideoDecoderHW_AV1;
+            break;
+        default:
+            break;
+    }
+    AMF_RETURN_IF_FALSE(ctx, codec_id != NULL, AVERROR(EINVAL), "Codec %d is not supported\n", avctx->codec->id);
+
+    res = internal->factory->pVtbl->CreateComponent(internal->factory, internal->context, codec_id, &ctx->decoder);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_ENCODER_NOT_FOUND, "CreateComponent(%ls) failed with error %d\n", codec_id, res);
+
+    // Color Metadata
+    /// Color Range (Support for older Drivers)
+    if (avctx->color_range == AVCOL_RANGE_JPEG) {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->decoder, AMF_VIDEO_DECODER_FULL_RANGE_COLOR, 1);
+    } else if (avctx->color_range != AVCOL_RANGE_UNSPECIFIED) {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->decoder, AMF_VIDEO_DECODER_FULL_RANGE_COLOR, 0);
+    }
+    color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN;
+    switch (avctx->colorspace) {
+    case AVCOL_SPC_SMPTE170M:
+        if (avctx->color_range == AVCOL_RANGE_JPEG) {
+            color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_601;
+        } else {
+            color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_601;
+        }
+        break;
+    case AVCOL_SPC_BT709:
+        if (avctx->color_range == AVCOL_RANGE_JPEG) {
+            color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_709;
+        } else {
+            color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_709;
+        }
+        break;
+    case AVCOL_SPC_BT2020_NCL:
+    case AVCOL_SPC_BT2020_CL:
+        if (avctx->color_range == AVCOL_RANGE_JPEG) {
+            color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_2020;
+        } else {
+            color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020;
+        }
+        break;
+    }
+    if (color_profile != AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_COLOR_PROFILE, color_profile);
+    if (avctx->color_trc != AVCOL_TRC_UNSPECIFIED)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_COLOR_TRANSFER_CHARACTERISTIC, (amf_int64)avctx->color_trc);
+
+    if (avctx->color_primaries != AVCOL_PRI_UNSPECIFIED)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_COLOR_PRIMARIES, (amf_int64)avctx->color_primaries);
+
+    if (ctx->timestamp_mode != -1)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_TIMESTAMP_MODE, ctx->timestamp_mode);
+    if (ctx->decoder_mode != -1)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_REORDER_MODE, ctx->decoder_mode);
+    if (ctx->dpb_size != -1)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_DPB_SIZE, ctx->dpb_size);
+    if (ctx->lowlatency != -1)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_LOW_LATENCY, ctx->lowlatency);
+    if (ctx->smart_access_video != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_ENABLE_SMART_ACCESS_VIDEO, ctx->smart_access_video != 0);
+        if (res != AMF_OK) {
+            av_log(avctx, AV_LOG_ERROR, "The Smart Access Video is not supported by AMF decoder.\n");
+            return AVERROR(EINVAL);
+        } else {
+            av_log(avctx, AV_LOG_INFO, "The Smart Access Video (%d) is set.\n", ctx->smart_access_video);
+            // Set low latency mode if Smart Access Video is enabled
+            if (ctx->smart_access_video != 0) {
+                AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_LOW_LATENCY, true);
+                av_log(avctx, AV_LOG_INFO, "The Smart Access Video set low latency mode for decoder.\n");
+            }
+        }
+    }
+    if (ctx->skip_transfer_sav != -1)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_SKIP_TRANSFER_SMART_ACCESS_VIDEO, ctx->skip_transfer_sav);
+
+    if (avctx->extradata_size) {
+        res = internal->context->pVtbl->AllocBuffer(internal->context, AMF_MEMORY_HOST, avctx->extradata_size, &buffer);
+        if (res == AMF_OK) {
+            memcpy(buffer->pVtbl->GetNative(buffer), avctx->extradata, avctx->extradata_size);
+            AMF_ASSIGN_PROPERTY_INTERFACE(res,ctx->decoder, AMF_VIDEO_DECODER_EXTRADATA, buffer);
+            buffer->pVtbl->Release(buffer);
+            buffer = NULL;
+        }
+    }
+    if (ctx->surface_pool_size == -1) {
+        ctx->surface_pool_size = pool_size;
+        if (avctx->extra_hw_frames > 0)
+            ctx->surface_pool_size += avctx->extra_hw_frames;
+        if (avctx->active_thread_type & FF_THREAD_FRAME)
+            ctx->surface_pool_size += avctx->thread_count;
+    }
+
+    //at the moment, there is such a restriction in AMF.
+    //when it is possible, I will remove this code
+    if (ctx->surface_pool_size > 100)
+        ctx->surface_pool_size = 100;
+
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->decoder, AMF_VIDEO_DECODER_SURFACE_POOL_SIZE, ctx->surface_pool_size);
+    res = ctx->decoder->pVtbl->Init(ctx->decoder, AMF_SURFACE_UNKNOWN, avctx->width, avctx->height);
+
+    return 0;
+}
+
+static int amf_init_decoder_context(AVCodecContext *avctx)
+{
+    AMFDecoderContext *ctx = avctx->priv_data;
+    int ret;
+
+    if (avctx->hw_frames_ctx) {
+        AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+        ret = av_amf_context_derive(ctx->amf_device_ctx, frames_ctx->device_ctx, NULL, 0);
+        if (ret < 0)
+            return ret;
+    }
+    else if (avctx->hw_device_ctx) {
+        AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+        ret = av_amf_context_derive(ctx->amf_device_ctx, device_ctx, NULL, 0);
+        if (ret < 0)
+            return ret;
+    } else {
+        ret = av_amf_context_init(ctx->amf_device_ctx, avctx);
+        if (ret != 0) {
+            return ret;
+        }
+    }
+
+    return ret;
+}
+
+static int amf_decode_close(AVCodecContext *avctx)
+{
+    AMFDecoderContext *ctx = avctx->priv_data;
+
+    if (ctx->decoder) {
+        ctx->decoder->pVtbl->Terminate(ctx->decoder);
+        ctx->decoder->pVtbl->Release(ctx->decoder);
+        ctx->decoder = NULL;
+    }
+
+    if (ctx->amf_device_ctx && ctx->local_context) {
+        av_amf_context_free(0, (uint8_t *)ctx->amf_device_ctx);
+        av_freep(&ctx->amf_device_ctx);
+    }
+    av_packet_unref(ctx->in_pkt);
+
+    return 0;
+}
+
+static int amf_decode_init(AVCodecContext *avctx)
+{
+    AMFDecoderContext *ctx = avctx->priv_data;
+    int ret;
+    ctx->local_context = 0;
+    ctx->in_pkt = av_packet_alloc();
+    if (!ctx->in_pkt)
+        return AVERROR(ENOMEM);
+
+    if (avctx->hw_frames_ctx){
+        AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+        if (frames_ctx->device_ctx->type == AV_HWDEVICE_TYPE_AMF) {
+            ctx->amf_device_ctx =  frames_ctx->device_ctx->hwctx;
+        }
+    }
+    else if  (avctx->hw_device_ctx && !avctx->hw_frames_ctx) {
+        AVHWDeviceContext   *hwdev_ctx;
+        AVHWFramesContext *hwframes_ctx;
+        hwdev_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+        if (hwdev_ctx->type == AV_HWDEVICE_TYPE_AMF)
+        {
+            ctx->amf_device_ctx =  hwdev_ctx->hwctx;
+        }
+
+        avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx);
+
+        if (!avctx->hw_frames_ctx) {
+            av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
+            return AVERROR(ENOMEM);
+        }
+
+        hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+        hwframes_ctx->width             = FFALIGN(avctx->coded_width,  32);
+        hwframes_ctx->height            = FFALIGN(avctx->coded_height, 32);
+        hwframes_ctx->format            = AV_PIX_FMT_AMF_SURFACE;
+        hwframes_ctx->sw_format         = avctx->sw_pix_fmt == AV_PIX_FMT_YUV420P10 ? AV_PIX_FMT_P010 : AV_PIX_FMT_NV12;
+        hwframes_ctx->initial_pool_size = ctx->surface_pool_size + 8;
+        avctx->pix_fmt = AV_PIX_FMT_AMF_SURFACE;
+
+        ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
+
+        if (ret < 0) {
+            av_log(NULL, AV_LOG_ERROR, "Error initializing a AMF frame pool\n");
+            av_buffer_unref(&avctx->hw_frames_ctx);
+            return ret;
+        }
+    }  else {
+        ctx->amf_device_ctx = av_mallocz(sizeof(AVAMFDeviceContext));
+        ctx->local_context = 1;
+        if ((ret = av_amf_context_create(ctx->amf_device_ctx, avctx, "", NULL, 0)) != 0) {
+            amf_decode_close(avctx);
+            return ret;
+        }
+        if ((ret = amf_init_decoder_context(avctx)) != 0) {
+            return ret;
+        }
+    }
+    if ((ret = amf_init_decoder(avctx)) == 0) {
+        AMFVariantStruct    format_var = {0};
+        ret = ctx->decoder->pVtbl->GetProperty(ctx->decoder, AMF_VIDEO_DECODER_OUTPUT_FORMAT, &format_var);
+        if (ret != AMF_OK) {
+            return AVERROR(EINVAL);
+        }
+        enum AVPixelFormat format = av_amf_to_av_format((AMF_SURFACE_FORMAT)format_var.int64Value);
+        enum AVPixelFormat pix_fmts[3] = {
+        AV_PIX_FMT_AMF_SURFACE,
+        format,
+        AV_PIX_FMT_NONE };
+
+
+        ret = ff_get_format(avctx, pix_fmts);
+        if (ret < 0) {
+            avctx->pix_fmt = AV_PIX_FMT_NONE;
+        }
+
+        return 0;
+    }
+    amf_decode_close(avctx);
+    return ret;
+}
+
+static AMF_RESULT amf_get_property_buffer(AMFData *object, const wchar_t *name, AMFBuffer **val)
+{
+    AMF_RESULT res;
+    AMFVariantStruct var;
+    res = AMFVariantInit(&var);
+    if (res == AMF_OK) {
+        res = object->pVtbl->GetProperty(object, name, &var);
+        if (res == AMF_OK) {
+            if (var.type == AMF_VARIANT_INTERFACE) {
+                AMFGuid guid_AMFBuffer = IID_AMFBuffer();
+                AMFInterface *amf_interface = AMFVariantInterface(&var);
+                res = amf_interface->pVtbl->QueryInterface(amf_interface, &guid_AMFBuffer, (void**)val);
+            } else {
+                res = AMF_INVALID_DATA_TYPE;
+            }
+        }
+        AMFVariantClear(&var);
+    }
+    return res;
+}
+
+static int amf_amfsurface_to_avframe(AVCodecContext *avctx, AMFSurface* surface, AVFrame *frame)
+{
+    AMFVariantStruct    var = {0};
+    AMFPlane            *plane;
+    int                 i;
+    int ret;
+
+    if (avctx->hw_frames_ctx) {
+        AVHWFramesContext *hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+        if (hwframes_ctx->format == AV_PIX_FMT_AMF_SURFACE) {
+            ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
+            if (ret < 0) {
+                av_log(avctx, AV_LOG_ERROR, "Get hw frame failed.\n");
+                return ret;
+            }
+            //we need to release surface with frame to return it to decoder
+            frame->buf[1] = av_buffer_create((uint8_t *)surface, sizeof(AMFSurface),
+                                     amf_free_amfsurface, (void*)avctx,
+                                     AV_BUFFER_FLAG_READONLY);
+            frame->data[0] = (uint8_t *)surface;
+        } else {
+            av_log(avctx, AV_LOG_ERROR, "Unknown format for hwframes_ctx\n");
+            return AVERROR(ENOMEM);
+        }
+    } else {
+        ret = surface->pVtbl->Convert(surface, AMF_MEMORY_HOST);
+        AMF_RETURN_IF_FALSE(avctx, ret == AMF_OK, AVERROR_UNKNOWN, "Convert(amf::AMF_MEMORY_HOST) failed with error %d\n", ret);
+
+        for (i = 0; i < surface->pVtbl->GetPlanesCount(surface); i++) {
+            plane = surface->pVtbl->GetPlaneAt(surface, i);
+            frame->data[i] = plane->pVtbl->GetNative(plane);
+            frame->linesize[i] = plane->pVtbl->GetHPitch(plane);
+        }
+
+        frame->buf[0] = av_buffer_create((uint8_t *)surface, sizeof(AMFSurface),
+                                     amf_free_amfsurface, (void*)avctx,
+                                     AV_BUFFER_FLAG_READONLY);
+        frame->format = av_amf_to_av_format(surface->pVtbl->GetFormat(surface));
+    }
+
+    frame->width  = avctx->width;
+    frame->height = avctx->height;
+
+    frame->pts = surface->pVtbl->GetPts(surface);
+
+    surface->pVtbl->GetProperty(surface, L"FFMPEG:dts", &var);
+    frame->pkt_dts = var.int64Value;
+
+    frame->duration = surface->pVtbl->GetDuration(surface);
+    if (frame->duration < 0)
+        frame->duration = 0;
+
+    frame->color_range = avctx->color_range;
+    frame->colorspace = avctx->colorspace;
+    frame->color_trc = avctx->color_trc;
+    frame->color_primaries = avctx->color_primaries;
+
+    if (frame->color_trc == AVCOL_TRC_SMPTE2084) {
+        AMFBuffer * hdrmeta_buffer = NULL;
+        ret = amf_get_property_buffer((AMFData *)surface, AMF_VIDEO_DECODER_HDR_METADATA, &hdrmeta_buffer);
+        if (hdrmeta_buffer != NULL) {
+            AMFHDRMetadata * hdrmeta = (AMFHDRMetadata*)hdrmeta_buffer->pVtbl->GetNative(hdrmeta_buffer);
+            if (ret != AMF_OK)
+                return ret;
+            if (hdrmeta != NULL) {
+                AVMasteringDisplayMetadata *mastering = av_mastering_display_metadata_create_side_data(frame);
+                const int chroma_den = 50000;
+                const int luma_den = 10000;
+
+                if (!mastering)
+                    return AVERROR(ENOMEM);
+
+                mastering->display_primaries[0][0] = av_make_q(hdrmeta->redPrimary[0], chroma_den);
+                mastering->display_primaries[0][1] = av_make_q(hdrmeta->redPrimary[1], chroma_den);
+
+                mastering->display_primaries[1][0] = av_make_q(hdrmeta->greenPrimary[0], chroma_den);
+                mastering->display_primaries[1][1] = av_make_q(hdrmeta->greenPrimary[1], chroma_den);
+
+                mastering->display_primaries[2][0] = av_make_q(hdrmeta->bluePrimary[0], chroma_den);
+                mastering->display_primaries[2][1] = av_make_q(hdrmeta->bluePrimary[1], chroma_den);
+
+                mastering->white_point[0] = av_make_q(hdrmeta->whitePoint[0], chroma_den);
+                mastering->white_point[1] = av_make_q(hdrmeta->whitePoint[1], chroma_den);
+
+                mastering->max_luminance = av_make_q(hdrmeta->maxMasteringLuminance, luma_den);
+                mastering->min_luminance = av_make_q(hdrmeta->maxMasteringLuminance, luma_den);
+
+                mastering->has_luminance = 1;
+                mastering->has_primaries = 1;
+                if (hdrmeta->maxContentLightLevel) {
+                   AVContentLightMetadata *light = av_content_light_metadata_create_side_data(frame);
+
+                    if (!light)
+                        return AVERROR(ENOMEM);
+
+                    light->MaxCLL  = hdrmeta->maxContentLightLevel;
+                    light->MaxFALL = hdrmeta->maxFrameAverageLightLevel;
+                }
+            }
+        }
+    }
+    return 0;
+}
+
+static AMF_RESULT amf_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+    AMFDecoderContext *ctx = avctx->priv_data;
+    AMF_RESULT          ret = AMF_OK;
+    AMFSurface          *surface = NULL;
+    AMFData             *data_out = NULL;
+
+    ret = ctx->decoder->pVtbl->QueryOutput(ctx->decoder, &data_out);
+
+    if (ret != AMF_OK && ret != AMF_REPEAT) {
+        return ret;
+    }
+    if (data_out == NULL) {
+        return AMF_REPEAT;
+    }
+
+    if (data_out) {
+        AMFGuid guid = IID_AMFSurface();
+        data_out->pVtbl->QueryInterface(data_out, &guid, (void**)&surface); // query for buffer interface
+        data_out->pVtbl->Release(data_out);
+        data_out = NULL;
+    }
+
+    ret = amf_amfsurface_to_avframe(avctx, surface, frame);
+    AMF_GOTO_FAIL_IF_FALSE(avctx, ret >= 0, AMF_FAIL, "Failed to convert AMFSurface to AVFrame = %d\n", ret);
+    return AMF_OK;
+fail:
+
+    if (surface) {
+        surface->pVtbl->Release(surface);
+        surface = NULL;
+    }
+    return ret;
+}
+
+static AMF_RESULT amf_update_buffer_properties(AVCodecContext *avctx, AMFBuffer* buffer, const AVPacket* pkt)
+{
+    AMF_RESULT          res;
+
+    AMF_RETURN_IF_FALSE(avctx, buffer != NULL, AMF_INVALID_ARG, "update_buffer_properties() - buffer not passed in");
+    AMF_RETURN_IF_FALSE(avctx, pkt != NULL, AMF_INVALID_ARG, "update_buffer_properties() - packet not passed in");
+    buffer->pVtbl->SetPts(buffer, pkt->pts);
+    buffer->pVtbl->SetDuration(buffer, pkt->duration);
+    AMF_ASSIGN_PROPERTY_INT64(res, buffer, L"FFMPEG:dts", pkt->dts);
+    if (res != AMF_OK)
+        av_log(avctx, AV_LOG_VERBOSE, "Failed to assign dts value.");
+    return AMF_OK;
+}
+
+static AMF_RESULT amf_buffer_from_packet(AVCodecContext *avctx, const AVPacket* pkt, AMFBuffer** buffer)
+{
+    AMFDecoderContext *ctx = avctx->priv_data;
+    AMFContext          *ctxt = ctx->amf_device_ctx->context;
+    void                *mem;
+    AMF_RESULT          err;
+    AMFBuffer           *buf = NULL;
+
+    AMF_RETURN_IF_FALSE(ctxt, pkt != NULL, AMF_INVALID_ARG, "amf_buffer_from_packet() - packet not passed in");
+    AMF_RETURN_IF_FALSE(ctxt, buffer != NULL, AMF_INVALID_ARG, "amf_buffer_from_packet() - buffer pointer not passed in");
+
+    err = ctxt->pVtbl->AllocBuffer(ctxt, AMF_MEMORY_HOST, pkt->size + AV_INPUT_BUFFER_PADDING_SIZE, buffer);
+    AMF_RETURN_IF_FALSE(ctxt, err == AMF_OK, err, "amf_buffer_from_packet() -   failed");
+    buf = *buffer;
+    err = buf->pVtbl->SetSize(buf, pkt->size);
+    AMF_RETURN_IF_FALSE(ctxt, err == AMF_OK, err, "amf_buffer_from_packet() - SetSize failed");
+    // get the memory location and check the buffer was indeed allocated
+    mem = buf->pVtbl->GetNative(buf);
+    AMF_RETURN_IF_FALSE(ctxt, mem != NULL, AMF_INVALID_POINTER, "amf_buffer_from_packet() - GetNative failed");
+
+    // copy the packet memory and clear data padding
+    memcpy(mem, pkt->data, pkt->size);
+    memset((amf_int8*)(mem)+pkt->size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
+
+    return amf_update_buffer_properties(avctx, buf, pkt);
+}
+
+static int amf_decode_frame(AVCodecContext *avctx, struct AVFrame *frame)
+{
+    AMFDecoderContext *ctx = avctx->priv_data;
+    AMFBuffer           *buf;
+    AMF_RESULT          res;
+    int                 got_frame = 0;
+    AVPacket            *avpkt = ctx->in_pkt;
+
+    if (!ctx->decoder)
+        return AVERROR(EINVAL);
+    // get packet if needed
+    if(!ctx->drain){
+        if(ctx->resolution_changed)
+            ctx->resolution_changed = 0;
+        else{
+            av_packet_unref(avpkt);
+            int ret;
+            ret = ff_decode_get_packet(avctx, avpkt);
+            if (ret < 0 && ret != AVERROR_EOF)
+                return ret;
+            if (ret == AVERROR_EOF) {
+                //nothing to consume, start external drain
+                ctx->decoder->pVtbl->Drain(ctx->decoder);
+                ctx->drain = 1;
+            }
+        }
+    }
+
+    if (!ctx->drain) {
+        // submit frame
+        res = amf_buffer_from_packet(avctx, avpkt, &buf);
+        AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, 0, "Cannot convert AVPacket to AMFbuffer");
+        res = ctx->decoder->pVtbl->SubmitInput(ctx->decoder, (AMFData*) buf);
+        buf->pVtbl->Release(buf);
+
+        if(res == AMF_DECODER_NO_FREE_SURFACES) {
+            // input is not consumed, need to QueryOutput and submit again
+            av_log(avctx, AV_LOG_VERBOSE, "SubmitInput() returned NO_FREE_SURFACES\n");
+        } else if (res == AMF_RESOLUTION_CHANGED) {
+            //input is not consumed, start internal drain
+            ctx->decoder->pVtbl->Drain(ctx->decoder);
+            ctx->drain = 1;
+            // process resolution_changed when internal drain is complete
+            ctx->resolution_changed = 1;
+            res = AMF_OK;
+        } else if (res != AMF_OK && res != AMF_NEED_MORE_INPUT && res != AMF_REPEAT) {
+            av_log(avctx, AV_LOG_ERROR, "SubmitInput() returned error %d\n", res);
+            return AVERROR(EINVAL);
+        }
+
+    }
+
+    res = amf_receive_frame(avctx, frame);
+    if (res == AMF_OK)
+        got_frame = 1;
+    else if (res == AMF_REPEAT)
+        // decoder has no output yet
+        res = AMF_OK;
+    else if (res == AMF_EOF) {
+        // drain is complete
+        ctx->drain = 0;
+        if(ctx->resolution_changed){
+            // re-initialze decoder
+            AMFVariantStruct    size_var = {0};
+            res = ctx->decoder->pVtbl->GetProperty(ctx->decoder, AMF_VIDEO_DECODER_CURRENT_SIZE, &size_var);
+            if (res != AMF_OK) {
+                return AVERROR(EINVAL);
+            }
+
+            avctx->width = size_var.sizeValue.width;
+            avctx->height = size_var.sizeValue.height;
+            avctx->coded_width  = size_var.sizeValue.width;
+            avctx->coded_height = size_var.sizeValue.height;
+            res = ctx->decoder->pVtbl->ReInit(ctx->decoder, avctx->width, avctx->height);
+            if (res != AMF_OK) {
+                av_log(avctx, AV_LOG_ERROR, "ReInit() returned %d\n", res);
+                return AVERROR(EINVAL);
+            }
+
+        }else
+            return AVERROR_EOF;
+    } else {
+        av_log(avctx, AV_LOG_ERROR, "Unkown result from QueryOutput %d\n", res);
+    }
+    return got_frame ? 0 : AVERROR(EAGAIN);
+}
+
+static void amf_decode_flush(AVCodecContext *avctx)
+{
+    AMFDecoderContext *ctx = avctx->priv_data;
+    ctx->decoder->pVtbl->Flush(ctx->decoder);
+}
+
+#define OFFSET(x) offsetof(AMFDecoderContext, x)
+#define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
+
+static const AVOption options[] = {
+    // Decoder mode
+    { "decoder_mode",       "Decoder mode",                                                 OFFSET(decoder_mode),       AV_OPT_TYPE_INT,   { .i64 = -1  }, -1, AMF_VIDEO_DECODER_MODE_LOW_LATENCY, VD, "decoder_mode" },
+    { "regular",            "DPB delay is based on number of reference frames + 1",         0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_DECODER_MODE_REGULAR      }, 0, 0, VD, "decoder_mode" },
+    { "compliant",          "DPB delay is based on profile - up to 16",                     0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_DECODER_MODE_COMPLIANT    }, 0, 0, VD, "decoder_mode" },
+    { "low_latency",        "DPB delay is 0",                                               0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_DECODER_MODE_LOW_LATENCY  }, 0, 0, VD, "decoder_mode" },
+
+    // Timestamp mode
+    { "timestamp_mode",     "Timestamp mode",                                               OFFSET(timestamp_mode),     AV_OPT_TYPE_INT,   { .i64 = -1 }, -1, AMF_TS_DECODE, VD, "timestamp_mode" },
+    { "presentation",       "Preserve timestamps from input to output",                     0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_TS_PRESENTATION }, 0, 0, VD, "timestamp_mode" },
+    { "sort",               "Resort PTS list",                                              0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_TS_SORT         }, 0, 0, VD, "timestamp_mode" },
+    { "decode",             "Decode order",                                                 0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_TS_DECODE       }, 0, 0, VD, "timestamp_mode" },
+
+    // Reference frame management
+    { "surface_pool_size",  "Number of surfaces in the decode pool",                        OFFSET(surface_pool_size),  AV_OPT_TYPE_INT,  { .i64 = -1 }, -1, INT_MAX, VD, NULL },
+    { "dpb_size",           "Minimum number of surfaces for reordering",                    OFFSET(dpb_size),           AV_OPT_TYPE_INT,  { .i64 = -1 }, -1, 32, VD, NULL },
+
+    { "lowlatency",         "Low latency",                                                  OFFSET(lowlatency),         AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD, NULL },
+    { "smart_access_video", "Smart Access Video",                                           OFFSET(smart_access_video), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD, NULL },
+    { "skip_transfer_sav",  "Skip transfer on another GPU when SAV enabled",                OFFSET(skip_transfer_sav),  AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VD, NULL },
+
+    { NULL }
+};
+
+static const AVClass amf_decode_class = {
+    .class_name = "amf",
+    .item_name  = av_default_item_name,
+    .option     = options,
+    .version    = LIBAVUTIL_VERSION_INT,
+};
+
+//    FF_CODEC_DECODE_CB(amf_decode_frame),
+
+#define DEFINE_AMF_DECODER(x, X, bsf_name) \
+const FFCodec ff_##x##_amf_decoder = { \
+    .p.name         = #x "_amf", \
+    CODEC_LONG_NAME(#X " AMD AMF video decoder"), \
+    .priv_data_size = sizeof(AMFDecoderContext), \
+    .p.type         = AVMEDIA_TYPE_VIDEO, \
+    .p.id           = AV_CODEC_ID_##X, \
+    .init           = amf_decode_init, \
+    FF_CODEC_RECEIVE_FRAME_CB(amf_decode_frame), \
+    .flush          = amf_decode_flush, \
+    .close          = amf_decode_close, \
+    .bsfs           = bsf_name, \
+    .p.capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
+    .p.priv_class   = &amf_decode_class, \
+    .p.pix_fmts     = amf_dec_pix_fmts, \
+    .hw_configs     = amf_hw_configs, \
+    .p.wrapper_name = "amf", \
+    .caps_internal  = FF_CODEC_CAP_NOT_INIT_THREADSAFE, \
+}; \
+
+DEFINE_AMF_DECODER(h264, H264, "h264_mp4toannexb")
+DEFINE_AMF_DECODER(hevc, HEVC, NULL)
+DEFINE_AMF_DECODER(av1, AV1, NULL)
diff --git a/libavcodec/amfdec.h b/libavcodec/amfdec.h
new file mode 100644
index 0000000000..a1870271d8
--- /dev/null
+++ b/libavcodec/amfdec.h
@@ -0,0 +1,63 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AMFDEC_H
+#define AVCODEC_AMFDEC_H
+
+#include <AMF/core/Buffer.h>
+#include <AMF/core/Factory.h>
+#include <AMF/core/Context.h>
+#include <AMF/core/Surface.h>
+#include <AMF/components/Component.h>
+#include <AMF/components/VideoDecoderUVD.h>
+
+#include "avcodec.h"
+#include "libavformat/avformat.h"
+#include "libavutil/fifo.h"
+#include "libavutil/frame.h"
+#include "libavutil/opt.h"
+#include "libavutil/hwcontext_amf.h"
+/**
+* AMF decoder context
+*/
+typedef struct AMFDecoderContext {
+    AVClass            *avclass;
+
+    AVAMFDeviceContext *amf_device_ctx;
+    int                 local_context;
+
+    //decoder
+    AMFComponent       *decoder; ///< AMF decoder object
+    AMF_SURFACE_FORMAT  format;  ///< AMF surface format
+
+    // common decoder options
+    int                 decoder_mode;
+    int                 timestamp_mode;
+    int                 surface_pool_size;
+    int                 dpb_size;
+    int                 lowlatency;
+    int                 smart_access_video;
+    int                 skip_transfer_sav;
+    int                 drain;
+    int                 resolution_changed;
+    AVPacket*           in_pkt;
+    enum AMF_SURFACE_FORMAT output_format;
+
+} AMFDecoderContext;
+
+#endif // AVCODEC_AMFDEC_H
\ No newline at end of file
-- 
2.39.3 (Apple Git-146)

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [FFmpeg-devel] [PATCH 03/10, v3] avcodec/amfenc: Fixes the color information in the output.
  2024-05-30 13:08 [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Dmitrii Ovchinnikov
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 02/10, v3] avcodec: add amfdec Dmitrii Ovchinnikov
@ 2024-05-30 13:08 ` Dmitrii Ovchinnikov
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 04/10, v3] avcodec/amfenc: HDR metadata Dmitrii Ovchinnikov
                   ` (9 subsequent siblings)
  11 siblings, 0 replies; 17+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-05-30 13:08 UTC (permalink / raw)
  To: ffmpeg-devel
  Cc: Evgeny Pavlov, Michael Fabian 'Xaymar' Dirks, Araz Iusubov

From: Michael Fabian 'Xaymar' Dirks <michael.dirks@xaymar.com>

added 10 bit support for amf hevc.

before:

command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file.mkv -an -c:v h264_amf res.dx11_hw_h264.mkv
output -  Format of input frames context (p010le) is not supported by AMF.
command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v hevc_amf res.dx11_hw_hevc.mkv
output -  Format of input frames context (p010le) is not supported by AMF.

after:

command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v h264_amf res.dx11_hw_h264.mkv
output -  10-bit input video is not supported by AMF H264 encoder
command - ffmpeg.exe -hide_banner -y -hwaccel d3d11va -hwaccel_output_format d3d11 -i test_10bit_file -an -c:v hevc_amf res.dx11_hw_hevc.mkv
output -  10bit file

v2 - lost line returned in ff_amf_pix_fmts
v3 - fixes after review
v4 - extract duplicated code, fix incorrect processing of 10-bit input for h264
v5 - non-functional changes after review

Co-authored-by: Evgeny Pavlov <lucenticus@gmail.com>
Co-authored-by: Araz Iusubov <Primeadvice@gmail.com>
---
 libavcodec/amfenc.c      | 37 +++++++++++++++++++++++++++++++++++++
 libavcodec/amfenc.h      |  3 +++
 libavcodec/amfenc_h264.c | 24 ++++++++++++++++++++----
 libavcodec/amfenc_hevc.c | 26 +++++++++++++++++++++++++-
 4 files changed, 85 insertions(+), 5 deletions(-)

diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
index 061859f85c..0bd15dd812 100644
--- a/libavcodec/amfenc.c
+++ b/libavcodec/amfenc.c
@@ -60,6 +60,7 @@ const enum AVPixelFormat ff_amf_pix_fmts[] = {
 #if CONFIG_DXVA2
     AV_PIX_FMT_DXVA2_VLD,
 #endif
+    AV_PIX_FMT_P010,
     AV_PIX_FMT_NONE
 };
 
@@ -72,6 +73,7 @@ static const FormatMap format_map[] =
 {
     { AV_PIX_FMT_NONE,       AMF_SURFACE_UNKNOWN },
     { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },
+    { AV_PIX_FMT_P010,       AMF_SURFACE_P010 },
     { AV_PIX_FMT_BGR0,       AMF_SURFACE_BGRA },
     { AV_PIX_FMT_RGB0,       AMF_SURFACE_RGBA },
     { AV_PIX_FMT_GRAY8,      AMF_SURFACE_GRAY8 },
@@ -785,6 +787,41 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
     return ret;
 }
 
+int ff_amf_get_color_profile(AVCodecContext *avctx)
+{
+    amf_int64 color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN;
+    if (avctx->color_range == AVCOL_RANGE_JPEG) {
+        /// Color Space for Full (JPEG) Range
+        switch (avctx->colorspace) {
+        case AVCOL_SPC_SMPTE170M:
+            color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_601;
+            break;
+        case AVCOL_SPC_BT709:
+            color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_709;
+            break;
+        case AVCOL_SPC_BT2020_NCL:
+        case AVCOL_SPC_BT2020_CL:
+            color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_2020;
+            break;
+        }
+    } else {
+        /// Color Space for Limited (MPEG) range
+        switch (avctx->colorspace) {
+        case AVCOL_SPC_SMPTE170M:
+            color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_601;
+            break;
+        case AVCOL_SPC_BT709:
+            color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_709;
+            break;
+        case AVCOL_SPC_BT2020_NCL:
+        case AVCOL_SPC_BT2020_CL:
+            color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020;
+            break;
+        }
+    }
+    return color_profile;
+}
+
 const AVCodecHWConfigInternal *const ff_amfenc_hw_configs[] = {
 #if CONFIG_D3D11VA
     HW_CONFIG_ENCODER_FRAMES(D3D11, D3D11VA),
diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h
index 2dbd378ef8..62736ef579 100644
--- a/libavcodec/amfenc.h
+++ b/libavcodec/amfenc.h
@@ -21,6 +21,7 @@
 
 #include <AMF/core/Factory.h>
 
+#include <AMF/components/ColorSpace.h>
 #include <AMF/components/VideoEncoderVCE.h>
 #include <AMF/components/VideoEncoderHEVC.h>
 #include <AMF/components/VideoEncoderAV1.h>
@@ -170,6 +171,8 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt);
 */
 extern const enum AVPixelFormat ff_amf_pix_fmts[];
 
+int ff_amf_get_color_profile(AVCodecContext *avctx);
+
 /**
 * Error handling helper
 */
diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c
index abfac2a90f..ad5fcc9ecb 100644
--- a/libavcodec/amfenc_h264.c
+++ b/libavcodec/amfenc_h264.c
@@ -199,6 +199,8 @@ static av_cold int amf_encode_init_h264(AVCodecContext *avctx)
     AMFRate                          framerate;
     AMFSize                          framesize = AMFConstructSize(avctx->width, avctx->height);
     int                              deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
+    amf_int64                        color_profile;
+    enum                             AVPixelFormat pix_fmt;
 
     if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
         framerate = AMFConstructRate(avctx->framerate.num, avctx->framerate.den);
@@ -262,10 +264,24 @@ FF_ENABLE_DEPRECATION_WARNINGS
         AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDEO_ENCODER_ASPECT_RATIO, ratio);
     }
 
-    /// Color Range (Partial/TV/MPEG or Full/PC/JPEG)
-    if (avctx->color_range == AVCOL_RANGE_JPEG) {
-        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_FULL_RANGE_COLOR, 1);
-    }
+    color_profile = ff_amf_get_color_profile(avctx);
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_OUTPUT_COLOR_PROFILE, color_profile);
+
+    /// Color Range (Support for older Drivers)
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_FULL_RANGE_COLOR, !!(avctx->color_range == AVCOL_RANGE_JPEG));
+
+    /// Color Depth
+    pix_fmt = avctx->hw_frames_ctx ? ((AVHWFramesContext*)avctx->hw_frames_ctx->data)->sw_format
+                                : avctx->pix_fmt;
+
+    // 10 bit input video is not supported by AMF H264 encoder
+    AMF_RETURN_IF_FALSE(ctx, pix_fmt != AV_PIX_FMT_P010, AVERROR_INVALIDDATA, "10-bit input video is not supported by AMF H264 encoder\n");
+
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_COLOR_BIT_DEPTH, AMF_COLOR_BIT_DEPTH_8);
+    /// Color Transfer Characteristics (AMF matches ISO/IEC)
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_OUTPUT_TRANSFER_CHARACTERISTIC, (amf_int64)avctx->color_trc);
+    /// Color Primaries (AMF matches ISO/IEC)
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_OUTPUT_COLOR_PRIMARIES, (amf_int64)avctx->color_primaries);
 
     // autodetect rate control method
     if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_UNKNOWN) {
diff --git a/libavcodec/amfenc_hevc.c b/libavcodec/amfenc_hevc.c
index 0a74ccd6c4..a89a3cf20c 100644
--- a/libavcodec/amfenc_hevc.c
+++ b/libavcodec/amfenc_hevc.c
@@ -34,8 +34,9 @@ static const AVOption options[] = {
     { "high_quality",           "high quality trancoding",                  0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_HIGH_QUALITY              }, 0, 0, VE, .unit = "usage" },
     { "lowlatency_high_quality","low latency yet high quality trancoding",  0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_LOW_LATENCY_HIGH_QUALITY  }, 0, 0, VE, .unit = "usage" },
 
-    { "profile",        "Set the profile (default main)",           OFFSET(profile),   AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, VE, .unit = "profile" },
+    { "profile",        "Set the profile (default main)",           OFFSET(profile),   AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN_10, VE, .unit = "profile" },
     { "main",           "", 0,                      AV_OPT_TYPE_CONST,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, 0, 0, VE, .unit = "profile" },
+    { "main10",         "", 0,                      AV_OPT_TYPE_CONST,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN_10 }, 0, 0, VE, .unit = "profile" },
 
     { "profile_tier",   "Set the profile tier (default main)",      OFFSET(tier), AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, AMF_VIDEO_ENCODER_HEVC_TIER_MAIN, AMF_VIDEO_ENCODER_HEVC_TIER_HIGH, VE, .unit = "tier" },
     { "main",           "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, 0, 0, VE, .unit = "tier" },
@@ -160,6 +161,9 @@ static av_cold int amf_encode_init_hevc(AVCodecContext *avctx)
     AMFRate             framerate;
     AMFSize             framesize = AMFConstructSize(avctx->width, avctx->height);
     int                 deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
+    amf_int64           color_depth;
+    amf_int64           color_profile;
+    enum                AVPixelFormat pix_fmt;
 
     if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
         framerate = AMFConstructRate(avctx->framerate.num, avctx->framerate.den);
@@ -187,6 +191,9 @@ FF_ENABLE_DEPRECATION_WARNINGS
     case AV_PROFILE_HEVC_MAIN:
         profile = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN;
         break;
+    case AV_PROFILE_HEVC_MAIN_10:
+        profile = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN_10;
+        break;
     default:
         break;
     }
@@ -215,6 +222,23 @@ FF_ENABLE_DEPRECATION_WARNINGS
         AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ASPECT_RATIO, ratio);
     }
 
+    color_profile = ff_amf_get_color_profile(avctx);
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_COLOR_PROFILE, color_profile);
+    /// Color Range (Support for older Drivers)
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_NOMINAL_RANGE, !!(avctx->color_range == AVCOL_RANGE_JPEG));
+    /// Color Depth
+    color_depth = AMF_COLOR_BIT_DEPTH_8;
+    pix_fmt = avctx->hw_frames_ctx ? ((AVHWFramesContext*)avctx->hw_frames_ctx->data)->sw_format
+                                    : avctx->pix_fmt;
+    if (pix_fmt == AV_PIX_FMT_P010) {
+        color_depth = AMF_COLOR_BIT_DEPTH_10;
+    }
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_COLOR_BIT_DEPTH, color_depth);
+    /// Color Transfer Characteristics (AMF matches ISO/IEC)
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_TRANSFER_CHARACTERISTIC, (amf_int64)avctx->color_trc);
+    /// Color Primaries (AMF matches ISO/IEC)
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_OUTPUT_COLOR_PRIMARIES, (amf_int64)avctx->color_primaries);
+
     // Picture control properties
     AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR, ctx->gops_per_idr);
     AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_GOP_SIZE, avctx->gop_size);
-- 
2.39.3 (Apple Git-146)

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [FFmpeg-devel] [PATCH 04/10, v3] avcodec/amfenc: HDR metadata.
  2024-05-30 13:08 [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Dmitrii Ovchinnikov
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 02/10, v3] avcodec: add amfdec Dmitrii Ovchinnikov
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 03/10, v3] avcodec/amfenc: Fixes the color information in the output Dmitrii Ovchinnikov
@ 2024-05-30 13:08 ` Dmitrii Ovchinnikov
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 05/10, v3] avcodec/amfenc: add 10 bit encoding in av1_amf Dmitrii Ovchinnikov
                   ` (8 subsequent siblings)
  11 siblings, 0 replies; 17+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-05-30 13:08 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: nyanmisaka

From: nyanmisaka <nst799610810@gmail.com>

v2: fixes for indentation
---
 libavcodec/amfenc.c | 83 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 83 insertions(+)

diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
index 0bd15dd812..068bb53002 100644
--- a/libavcodec/amfenc.c
+++ b/libavcodec/amfenc.c
@@ -36,6 +36,57 @@
 #include "amfenc.h"
 #include "encode.h"
 #include "internal.h"
+#include "libavutil/mastering_display_metadata.h"
+
+static int amf_save_hdr_metadata(AVCodecContext *avctx, const AVFrame *frame, AMFHDRMetadata *hdrmeta)
+{
+    AVFrameSideData            *sd_display;
+    AVFrameSideData            *sd_light;
+    AVMasteringDisplayMetadata *display_meta;
+    AVContentLightMetadata     *light_meta;
+
+    sd_display = av_frame_get_side_data(frame, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
+    if (sd_display) {
+        display_meta = (AVMasteringDisplayMetadata *)sd_display->data;
+        if (display_meta->has_luminance) {
+            const unsigned int luma_den = 10000;
+            hdrmeta->maxMasteringLuminance =
+                (amf_uint32)(luma_den * av_q2d(display_meta->max_luminance));
+            hdrmeta->minMasteringLuminance =
+                FFMIN((amf_uint32)(luma_den * av_q2d(display_meta->min_luminance)), hdrmeta->maxMasteringLuminance);
+        }
+        if (display_meta->has_primaries) {
+            const unsigned int chroma_den = 50000;
+            hdrmeta->redPrimary[0] =
+                FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[0][0])), chroma_den);
+            hdrmeta->redPrimary[1] =
+                FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[0][1])), chroma_den);
+            hdrmeta->greenPrimary[0] =
+                FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[1][0])), chroma_den);
+            hdrmeta->greenPrimary[1] =
+                FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[1][1])), chroma_den);
+            hdrmeta->bluePrimary[0] =
+                FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[2][0])), chroma_den);
+            hdrmeta->bluePrimary[1] =
+                FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->display_primaries[2][1])), chroma_den);
+            hdrmeta->whitePoint[0] =
+                FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->white_point[0])), chroma_den);
+            hdrmeta->whitePoint[1] =
+                FFMIN((amf_uint16)(chroma_den * av_q2d(display_meta->white_point[1])), chroma_den);
+        }
+
+        sd_light = av_frame_get_side_data(frame, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
+        if (sd_light) {
+            light_meta = (AVContentLightMetadata *)sd_light->data;
+            if (light_meta) {
+                hdrmeta->maxContentLightLevel = (amf_uint16)light_meta->MaxCLL;
+                hdrmeta->maxFrameAverageLightLevel = (amf_uint16)light_meta->MaxFALL;
+            }
+        }
+        return 0;
+    }
+    return 1;
+}
 
 #if CONFIG_D3D11VA
 #include <d3d11.h>
@@ -683,6 +734,26 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
             frame_ref_storage_buffer->pVtbl->Release(frame_ref_storage_buffer);
         }
 
+        // HDR10 metadata
+        if (frame->color_trc == AVCOL_TRC_SMPTE2084) {
+            AMFBuffer * hdrmeta_buffer = NULL;
+            res = ctx->context->pVtbl->AllocBuffer(ctx->context, AMF_MEMORY_HOST, sizeof(AMFHDRMetadata), &hdrmeta_buffer);
+            if (res == AMF_OK) {
+                AMFHDRMetadata * hdrmeta = (AMFHDRMetadata*)hdrmeta_buffer->pVtbl->GetNative(hdrmeta_buffer);
+                if (amf_save_hdr_metadata(avctx, frame, hdrmeta) == 0) {
+                    switch (avctx->codec->id) {
+                    case AV_CODEC_ID_H264:
+                        AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break;
+                    case AV_CODEC_ID_HEVC:
+                        AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break;
+                    }
+                    res = amf_set_property_buffer(surface, L"av_frame_hdrmeta", hdrmeta_buffer);
+                    AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "SetProperty failed for \"av_frame_hdrmeta\" with error %d\n", res);
+                }
+                hdrmeta_buffer->pVtbl->Release(hdrmeta_buffer);
+            }
+        }
+
         surface->pVtbl->SetPts(surface, frame->pts);
         AMF_ASSIGN_PROPERTY_INT64(res, surface, PTS_PROP, frame->pts);
 
@@ -746,6 +817,18 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
         }
         res_resubmit = AMF_OK;
         if (ctx->delayed_surface != NULL) { // try to resubmit frame
+            if (ctx->delayed_surface->pVtbl->HasProperty(ctx->delayed_surface, L"av_frame_hdrmeta")) {
+                AMFBuffer * hdrmeta_buffer = NULL;
+                res = amf_get_property_buffer((AMFData *)ctx->delayed_surface, L"av_frame_hdrmeta", &hdrmeta_buffer);
+                AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "GetProperty failed for \"av_frame_hdrmeta\" with error %d\n", res);
+                switch (avctx->codec->id) {
+                case AV_CODEC_ID_H264:
+                    AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break;
+                case AV_CODEC_ID_HEVC:
+                    AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break;
+                }
+                hdrmeta_buffer->pVtbl->Release(hdrmeta_buffer);
+            }
             res_resubmit = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)ctx->delayed_surface);
             if (res_resubmit != AMF_INPUT_FULL) {
                 int64_t pts = ctx->delayed_surface->pVtbl->GetPts(ctx->delayed_surface);
-- 
2.39.3 (Apple Git-146)

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [FFmpeg-devel] [PATCH 05/10, v3] avcodec/amfenc: add 10 bit encoding in av1_amf
  2024-05-30 13:08 [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Dmitrii Ovchinnikov
                   ` (2 preceding siblings ...)
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 04/10, v3] avcodec/amfenc: HDR metadata Dmitrii Ovchinnikov
@ 2024-05-30 13:08 ` Dmitrii Ovchinnikov
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 06/10, v3] avcodec/amfenc: GPU driver version check Dmitrii Ovchinnikov
                   ` (7 subsequent siblings)
  11 siblings, 0 replies; 17+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-05-30 13:08 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Evgeny Pavlov, Araz Iusubov

From: Evgeny Pavlov <lucenticus@gmail.com>

v2: refactored after review

Signed-off-by: Evgeny Pavlov <lucenticus@gmail.com>
Co-authored-by: Araz Iusubov <Primeadvice@gmail.com>
---
 libavcodec/amfenc.c     |  2 ++
 libavcodec/amfenc_av1.c | 22 ++++++++++++++++++++++
 2 files changed, 24 insertions(+)

diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
index 068bb53002..49dd91c4e0 100644
--- a/libavcodec/amfenc.c
+++ b/libavcodec/amfenc.c
@@ -746,6 +746,8 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
                         AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break;
                     case AV_CODEC_ID_HEVC:
                         AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break;
+                    case AV_CODEC_ID_AV1:
+                        AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_INPUT_HDR_METADATA, hdrmeta_buffer); break;
                     }
                     res = amf_set_property_buffer(surface, L"av_frame_hdrmeta", hdrmeta_buffer);
                     AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "SetProperty failed for \"av_frame_hdrmeta\" with error %d\n", res);
diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c
index 9f18aac648..cc48e93fcb 100644
--- a/libavcodec/amfenc_av1.c
+++ b/libavcodec/amfenc_av1.c
@@ -165,6 +165,9 @@ static av_cold int amf_encode_init_av1(AVCodecContext* avctx)
     AMFGuid             guid;
     AMFRate             framerate;
     AMFSize             framesize = AMFConstructSize(avctx->width, avctx->height);
+    amf_int64           color_depth;
+    amf_int64           color_profile;
+    enum                AVPixelFormat pix_fmt;
 
 
 
@@ -203,6 +206,25 @@ FF_ENABLE_DEPRECATION_WARNINGS
     }
     AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_PROFILE, profile);
 
+    /// Color profile
+    color_profile = ff_amf_get_color_profile(avctx);
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PROFILE, color_profile);
+
+    /// Color Depth
+    pix_fmt = avctx->hw_frames_ctx ? ((AVHWFramesContext*)avctx->hw_frames_ctx->data)->sw_format
+                                : avctx->pix_fmt;
+    color_depth = AMF_COLOR_BIT_DEPTH_8;
+    if (pix_fmt == AV_PIX_FMT_P010) {
+        color_depth = AMF_COLOR_BIT_DEPTH_10;
+    }
+
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_COLOR_BIT_DEPTH, color_depth);
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PROFILE, color_profile);
+    /// Color Transfer Characteristics (AMF matches ISO/IEC)
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_TRANSFER_CHARACTERISTIC, (amf_int64)avctx->color_trc);
+    /// Color Primaries (AMF matches ISO/IEC)
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_OUTPUT_COLOR_PRIMARIES, (amf_int64)avctx->color_primaries);
+
     profile_level = avctx->level;
     if (profile_level == AV_LEVEL_UNKNOWN) {
         profile_level = ctx->level;
-- 
2.39.3 (Apple Git-146)

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [FFmpeg-devel] [PATCH 06/10, v3] avcodec/amfenc: GPU driver version check
  2024-05-30 13:08 [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Dmitrii Ovchinnikov
                   ` (3 preceding siblings ...)
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 05/10, v3] avcodec/amfenc: add 10 bit encoding in av1_amf Dmitrii Ovchinnikov
@ 2024-05-30 13:08 ` Dmitrii Ovchinnikov
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 07/10, v3] avcodec/amfenc: add smart access video option Dmitrii Ovchinnikov
                   ` (6 subsequent siblings)
  11 siblings, 0 replies; 17+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-05-30 13:08 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Araz Iusubov

From: Araz Iusubov <Primeadvice@gmail.com>

Implemented gpu driver check.
10-bit patch works incorrectly on driver version lower than 23.30.

Signed-off-by: Araz Iusubov <Primeadvice@gmail.com>
---
 libavcodec/amfenc.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
index 49dd91c4e0..510050e282 100644
--- a/libavcodec/amfenc.c
+++ b/libavcodec/amfenc.c
@@ -558,6 +558,10 @@ int ff_amf_encode_init(AVCodecContext *avctx)
     if ((ret = amf_load_library(avctx)) == 0) {
         if ((ret = amf_init_context(avctx)) == 0) {
             if ((ret = amf_init_encoder(avctx)) == 0) {
+                    if (avctx->pix_fmt == AV_PIX_FMT_P010) {
+                        AmfContext *ctx = avctx->priv_data;
+                        AMF_RETURN_IF_FALSE(ctx, ctx->version >= AMF_MAKE_FULL_VERSION(1, 4, 32, 0), AVERROR_UNKNOWN, "10-bit encoder is not supported by AMD GPU drivers versions lower than 23.30.\n");
+                    }
                 return 0;
             }
         }
-- 
2.39.3 (Apple Git-146)

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [FFmpeg-devel] [PATCH 07/10, v3] avcodec/amfenc: add smart access video option
  2024-05-30 13:08 [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Dmitrii Ovchinnikov
                   ` (4 preceding siblings ...)
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 06/10, v3] avcodec/amfenc: GPU driver version check Dmitrii Ovchinnikov
@ 2024-05-30 13:08 ` Dmitrii Ovchinnikov
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 08/10, v3] avcodec/amfenc: redesign to use hwcontext_amf Dmitrii Ovchinnikov
                   ` (5 subsequent siblings)
  11 siblings, 0 replies; 17+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-05-30 13:08 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Evgeny Pavlov

From: Evgeny Pavlov <lucenticus@gmail.com>

This commit adds option for enabling SmartAccess Video (SAV)
in AMF encoders. SAV is an AMD hardware-specific feature which
enables the parallelization of encode and decode streams across
multiple Video Codec Engine (VCN) hardware instances.

Signed-off-by: Evgeny Pavlov <lucenticus@gmail.com>
---
 libavcodec/amfenc.h      |  1 +
 libavcodec/amfenc_av1.c  | 18 ++++++++++++++++++
 libavcodec/amfenc_h264.c | 18 ++++++++++++++++++
 libavcodec/amfenc_hevc.c | 18 ++++++++++++++++++
 4 files changed, 55 insertions(+)

diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h
index 62736ef579..1bda0136bd 100644
--- a/libavcodec/amfenc.h
+++ b/libavcodec/amfenc.h
@@ -90,6 +90,7 @@ typedef struct AmfContext {
     int                 quality;
     int                 b_frame_delta_qp;
     int                 ref_b_frame_delta_qp;
+    int                 smart_access_video;
 
     // Dynamic options, can be set after Init() call
 
diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c
index cc48e93fcb..7d37a242fc 100644
--- a/libavcodec/amfenc_av1.c
+++ b/libavcodec/amfenc_av1.c
@@ -104,6 +104,8 @@ static const AVOption options[] = {
 
     { "log_to_dbg",     "Enable AMF logging to debug output",   OFFSET(log_to_dbg), AV_OPT_TYPE_BOOL,{.i64 = 0 }, 0, 1, VE },
 
+    { "smart_access_video",     "Enable Smart Access Video",                OFFSET(smart_access_video),             AV_OPT_TYPE_BOOL, {.i64 = -1  }, -1, 1, VE},
+
     //Pre Analysis options
     { "preanalysis",                            "Enable preanalysis",                                           OFFSET(preanalysis),                            AV_OPT_TYPE_BOOL,   {.i64 = -1 }, -1, 1, VE },
 
@@ -265,6 +267,22 @@ FF_ENABLE_DEPRECATION_WARNINGS
         }
     }
 
+    if (ctx->smart_access_video != -1) {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_ENABLE_SMART_ACCESS_VIDEO, ctx->smart_access_video != 0);
+        if (res != AMF_OK) {
+            av_log(avctx, AV_LOG_ERROR, "The Smart Access Video is not supported by AMF.\n");
+            if (ctx->smart_access_video != 0)
+                return AVERROR(ENOSYS);
+        } else {
+            av_log(avctx, AV_LOG_INFO, "The Smart Access Video (%d) is set.\n", ctx->smart_access_video);
+            // Set low latency mode if Smart Access Video is enabled
+            if (ctx->smart_access_video != 0) {
+                AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_ENCODING_LATENCY_MODE, AMF_VIDEO_ENCODER_AV1_ENCODING_LATENCY_MODE_LOWEST_LATENCY);
+                av_log(avctx, AV_LOG_INFO, "The Smart Access Video set low latency mode.\n");
+            }
+        }
+    }
+
     // Pre-Pass, Pre-Analysis, Two-Pass
     if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_METHOD_CONSTANT_QP) {
         AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_RATE_CONTROL_PREENCODE, 0);
diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c
index ad5fcc9ecb..a26a6dbef8 100644
--- a/libavcodec/amfenc_h264.c
+++ b/libavcodec/amfenc_h264.c
@@ -136,6 +136,8 @@ static const AVOption options[] = {
 
     { "log_to_dbg",     "Enable AMF logging to debug output",   OFFSET(log_to_dbg)    , AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
 
+    { "smart_access_video",     "Enable Smart Access Video",    OFFSET(smart_access_video), AV_OPT_TYPE_BOOL, {.i64 = -1  }, -1, 1, VE},
+
     //Pre Analysis options
     { "preanalysis",                            "Enable preanalysis",                                           OFFSET(preanalysis),                            AV_OPT_TYPE_BOOL,   {.i64 = -1 }, -1, 1, VE },
 
@@ -369,6 +371,22 @@ FF_ENABLE_DEPRECATION_WARNINGS
         av_log(ctx, AV_LOG_WARNING, "rate control mode is PEAK_CONSTRAINED_VBR but rc_max_rate is not set\n");
     }
 
+    if (ctx->smart_access_video != -1) {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENABLE_SMART_ACCESS_VIDEO, ctx->smart_access_video != 0);
+        if (res != AMF_OK) {
+            av_log(avctx, AV_LOG_ERROR, "The Smart Access Video is not supported by AMF.\n");
+            if (ctx->smart_access_video != 0)
+                return AVERROR(ENOSYS);
+        } else {
+            av_log(avctx, AV_LOG_INFO, "The Smart Access Video (%d) is set.\n", ctx->smart_access_video);
+            // Set low latency mode if Smart Access Video is enabled
+            if (ctx->smart_access_video != 0) {
+                AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_LOWLATENCY_MODE, true);
+                av_log(avctx, AV_LOG_INFO, "The Smart Access Video set low latency mode.\n");
+            }
+        }
+    }
+
     if (ctx->preanalysis != -1) {
         AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_PRE_ANALYSIS_ENABLE, !!((ctx->preanalysis == 0) ? false : true));
     }
diff --git a/libavcodec/amfenc_hevc.c b/libavcodec/amfenc_hevc.c
index a89a3cf20c..8c26956513 100644
--- a/libavcodec/amfenc_hevc.c
+++ b/libavcodec/amfenc_hevc.c
@@ -100,6 +100,8 @@ static const AVOption options[] = {
 
     { "log_to_dbg",     "Enable AMF logging to debug output",   OFFSET(log_to_dbg), AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE },
 
+    { "smart_access_video",     "Enable Smart Access Video",        OFFSET(smart_access_video), AV_OPT_TYPE_BOOL, {.i64 = -1  }, -1, 1, VE},
+
     //Pre Analysis options
     { "preanalysis",                            "Enable preanalysis",                                           OFFSET(preanalysis),                            AV_OPT_TYPE_BOOL,   {.i64 = -1 }, -1, 1, VE },
 
@@ -265,6 +267,22 @@ FF_ENABLE_DEPRECATION_WARNINGS
         }
     }
 
+    if (ctx->smart_access_video != -1) {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ENABLE_SMART_ACCESS_VIDEO, ctx->smart_access_video != 0);
+        if (res != AMF_OK) {
+            av_log(avctx, AV_LOG_ERROR, "The Smart Access Video is not supported by AMF.\n");
+            if (ctx->smart_access_video != 0)
+                return AVERROR(ENOSYS);
+        } else {
+            av_log(avctx, AV_LOG_INFO, "The Smart Access Video (%d) is set.\n", ctx->smart_access_video);
+            // Set low latency mode if Smart Access Video is enabled
+            if (ctx->smart_access_video != 0) {
+                AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_LOWLATENCY_MODE, true);
+                av_log(avctx, AV_LOG_INFO, "The Smart Access Video set low latency mode.\n");
+            }
+        }
+    }
+
     // Pre-Pass, Pre-Analysis, Two-Pass
     if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP) {
         AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PREENCODE_ENABLE, 0);
-- 
2.39.3 (Apple Git-146)

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [FFmpeg-devel] [PATCH 08/10, v3] avcodec/amfenc: redesign to use hwcontext_amf.
  2024-05-30 13:08 [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Dmitrii Ovchinnikov
                   ` (5 preceding siblings ...)
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 07/10, v3] avcodec/amfenc: add smart access video option Dmitrii Ovchinnikov
@ 2024-05-30 13:08 ` Dmitrii Ovchinnikov
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 09/10, v3] avfilter/scale_amf: Add AMF VPP & super resolution filters Dmitrii Ovchinnikov
                   ` (4 subsequent siblings)
  11 siblings, 0 replies; 17+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-05-30 13:08 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Evgeny Pavlov, Dmitrii Ovchinnikov

Co-authored-by: Evgeny Pavlov <lucenticus@gmail.com>
v3: cleanup code
---
 libavcodec/amfenc.c      | 573 +++++++++++----------------------------
 libavcodec/amfenc.h      |  32 +--
 libavcodec/amfenc_av1.c  |   8 +-
 libavcodec/amfenc_h264.c |   8 +-
 libavcodec/amfenc_hevc.c |  14 +-
 5 files changed, 176 insertions(+), 459 deletions(-)

diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
index 510050e282..c57fa1b980 100644
--- a/libavcodec/amfenc.c
+++ b/libavcodec/amfenc.c
@@ -29,6 +29,8 @@
 #define COBJMACROS
 #include "libavutil/hwcontext_dxva2.h"
 #endif
+#include "libavutil/hwcontext_amf.h"
+#include "libavutil/hwcontext_amf_internal.h"
 #include "libavutil/mem.h"
 #include "libavutil/pixdesc.h"
 #include "libavutil/time.h"
@@ -38,6 +40,18 @@
 #include "internal.h"
 #include "libavutil/mastering_display_metadata.h"
 
+#if CONFIG_D3D11VA
+#include <d3d11.h>
+#endif
+
+#ifdef _WIN32
+#include "compat/w32dlfcn.h"
+#else
+#include <dlfcn.h>
+#endif
+
+#define PTS_PROP L"PtsProp"
+
 static int amf_save_hdr_metadata(AVCodecContext *avctx, const AVFrame *frame, AMFHDRMetadata *hdrmeta)
 {
     AVFrameSideData            *sd_display;
@@ -88,20 +102,6 @@ static int amf_save_hdr_metadata(AVCodecContext *avctx, const AVFrame *frame, AM
     return 1;
 }
 
-#if CONFIG_D3D11VA
-#include <d3d11.h>
-#endif
-
-#ifdef _WIN32
-#include "compat/w32dlfcn.h"
-#else
-#include <dlfcn.h>
-#endif
-
-#define FFMPEG_AMF_WRITER_ID L"ffmpeg_amf"
-
-#define PTS_PROP L"PtsProp"
-
 const enum AVPixelFormat ff_amf_pix_fmts[] = {
     AV_PIX_FMT_NV12,
     AV_PIX_FMT_YUV420P,
@@ -111,289 +111,18 @@ const enum AVPixelFormat ff_amf_pix_fmts[] = {
 #if CONFIG_DXVA2
     AV_PIX_FMT_DXVA2_VLD,
 #endif
+    AV_PIX_FMT_AMF_SURFACE,
     AV_PIX_FMT_P010,
     AV_PIX_FMT_NONE
 };
 
-typedef struct FormatMap {
-    enum AVPixelFormat       av_format;
-    enum AMF_SURFACE_FORMAT  amf_format;
-} FormatMap;
-
-static const FormatMap format_map[] =
-{
-    { AV_PIX_FMT_NONE,       AMF_SURFACE_UNKNOWN },
-    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },
-    { AV_PIX_FMT_P010,       AMF_SURFACE_P010 },
-    { AV_PIX_FMT_BGR0,       AMF_SURFACE_BGRA },
-    { AV_PIX_FMT_RGB0,       AMF_SURFACE_RGBA },
-    { AV_PIX_FMT_GRAY8,      AMF_SURFACE_GRAY8 },
-    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YUV420P },
-    { AV_PIX_FMT_YUYV422,    AMF_SURFACE_YUY2 },
-};
-
-static enum AMF_SURFACE_FORMAT amf_av_to_amf_format(enum AVPixelFormat fmt)
-{
-    int i;
-    for (i = 0; i < amf_countof(format_map); i++) {
-        if (format_map[i].av_format == fmt) {
-            return format_map[i].amf_format;
-        }
-    }
-    return AMF_SURFACE_UNKNOWN;
-}
-
-static void AMF_CDECL_CALL AMFTraceWriter_Write(AMFTraceWriter *pThis,
-    const wchar_t *scope, const wchar_t *message)
-{
-    AmfTraceWriter *tracer = (AmfTraceWriter*)pThis;
-    av_log(tracer->avctx, AV_LOG_DEBUG, "%ls: %ls", scope, message); // \n is provided from AMF
-}
-
-static void AMF_CDECL_CALL AMFTraceWriter_Flush(AMFTraceWriter *pThis)
-{
-}
-
-static AMFTraceWriterVtbl tracer_vtbl =
-{
-    .Write = AMFTraceWriter_Write,
-    .Flush = AMFTraceWriter_Flush,
-};
-
-static int amf_load_library(AVCodecContext *avctx)
-{
-    AmfContext        *ctx = avctx->priv_data;
-    AMFInit_Fn         init_fun;
-    AMFQueryVersion_Fn version_fun;
-    AMF_RESULT         res;
-
-    ctx->delayed_frame = av_frame_alloc();
-    if (!ctx->delayed_frame) {
-        return AVERROR(ENOMEM);
-    }
-    // hardcoded to current HW queue size - will auto-realloc if too small
-    ctx->timestamp_list = av_fifo_alloc2(avctx->max_b_frames + 16, sizeof(int64_t),
-                                         AV_FIFO_FLAG_AUTO_GROW);
-    if (!ctx->timestamp_list) {
-        return AVERROR(ENOMEM);
-    }
-    ctx->dts_delay = 0;
-
-
-    ctx->library = dlopen(AMF_DLL_NAMEA, RTLD_NOW | RTLD_LOCAL);
-    AMF_RETURN_IF_FALSE(ctx, ctx->library != NULL,
-        AVERROR_UNKNOWN, "DLL %s failed to open\n", AMF_DLL_NAMEA);
-
-    init_fun = (AMFInit_Fn)dlsym(ctx->library, AMF_INIT_FUNCTION_NAME);
-    AMF_RETURN_IF_FALSE(ctx, init_fun != NULL, AVERROR_UNKNOWN, "DLL %s failed to find function %s\n", AMF_DLL_NAMEA, AMF_INIT_FUNCTION_NAME);
-
-    version_fun = (AMFQueryVersion_Fn)dlsym(ctx->library, AMF_QUERY_VERSION_FUNCTION_NAME);
-    AMF_RETURN_IF_FALSE(ctx, version_fun != NULL, AVERROR_UNKNOWN, "DLL %s failed to find function %s\n", AMF_DLL_NAMEA, AMF_QUERY_VERSION_FUNCTION_NAME);
-
-    res = version_fun(&ctx->version);
-    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s failed with error %d\n", AMF_QUERY_VERSION_FUNCTION_NAME, res);
-    res = init_fun(AMF_FULL_VERSION, &ctx->factory);
-    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s failed with error %d\n", AMF_INIT_FUNCTION_NAME, res);
-    res = ctx->factory->pVtbl->GetTrace(ctx->factory, &ctx->trace);
-    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetTrace() failed with error %d\n", res);
-    res = ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug);
-    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetDebug() failed with error %d\n", res);
-    return 0;
-}
-
-#if CONFIG_D3D11VA
-static int amf_init_from_d3d11_device(AVCodecContext *avctx, AVD3D11VADeviceContext *hwctx)
-{
-    AmfContext *ctx = avctx->priv_data;
-    AMF_RESULT res;
-
-    res = ctx->context->pVtbl->InitDX11(ctx->context, hwctx->device, AMF_DX11_1);
-    if (res != AMF_OK) {
-        if (res == AMF_NOT_SUPPORTED)
-            av_log(avctx, AV_LOG_ERROR, "AMF via D3D11 is not supported on the given device.\n");
-        else
-            av_log(avctx, AV_LOG_ERROR, "AMF failed to initialise on the given D3D11 device: %d.\n", res);
-        return AVERROR(ENODEV);
-    }
-
-    return 0;
-}
-#endif
-
-#if CONFIG_DXVA2
-static int amf_init_from_dxva2_device(AVCodecContext *avctx, AVDXVA2DeviceContext *hwctx)
-{
-    AmfContext *ctx = avctx->priv_data;
-    HANDLE device_handle;
-    IDirect3DDevice9 *device;
-    HRESULT hr;
-    AMF_RESULT res;
-    int ret;
-
-    hr = IDirect3DDeviceManager9_OpenDeviceHandle(hwctx->devmgr, &device_handle);
-    if (FAILED(hr)) {
-        av_log(avctx, AV_LOG_ERROR, "Failed to open device handle for Direct3D9 device: %lx.\n", (unsigned long)hr);
-        return AVERROR_EXTERNAL;
-    }
-
-    hr = IDirect3DDeviceManager9_LockDevice(hwctx->devmgr, device_handle, &device, FALSE);
-    if (SUCCEEDED(hr)) {
-        IDirect3DDeviceManager9_UnlockDevice(hwctx->devmgr, device_handle, FALSE);
-        ret = 0;
-    } else {
-        av_log(avctx, AV_LOG_ERROR, "Failed to lock device handle for Direct3D9 device: %lx.\n", (unsigned long)hr);
-        ret = AVERROR_EXTERNAL;
-    }
-
-    IDirect3DDeviceManager9_CloseDeviceHandle(hwctx->devmgr, device_handle);
-
-    if (ret < 0)
-        return ret;
-
-    res = ctx->context->pVtbl->InitDX9(ctx->context, device);
-
-    IDirect3DDevice9_Release(device);
-
-    if (res != AMF_OK) {
-        if (res == AMF_NOT_SUPPORTED)
-            av_log(avctx, AV_LOG_ERROR, "AMF via D3D9 is not supported on the given device.\n");
-        else
-            av_log(avctx, AV_LOG_ERROR, "AMF failed to initialise on given D3D9 device: %d.\n", res);
-        return AVERROR(ENODEV);
-    }
-
-    return 0;
-}
-#endif
-
-static int amf_init_context(AVCodecContext *avctx)
-{
-    AmfContext *ctx = avctx->priv_data;
-    AMFContext1 *context1 = NULL;
-    AMF_RESULT  res;
-    av_unused int ret;
-
-    ctx->hwsurfaces_in_queue = 0;
-    ctx->hwsurfaces_in_queue_max = 16;
-
-    // configure AMF logger
-    // the return of these functions indicates old state and do not affect behaviour
-    ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, ctx->log_to_dbg != 0 );
-    if (ctx->log_to_dbg)
-        ctx->trace->pVtbl->SetWriterLevel(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, AMF_TRACE_TRACE);
-    ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_CONSOLE, 0);
-    ctx->trace->pVtbl->SetGlobalLevel(ctx->trace, AMF_TRACE_TRACE);
-
-    // connect AMF logger to av_log
-    ctx->tracer.vtbl = &tracer_vtbl;
-    ctx->tracer.avctx = avctx;
-    ctx->trace->pVtbl->RegisterWriter(ctx->trace, FFMPEG_AMF_WRITER_ID,(AMFTraceWriter*)&ctx->tracer, 1);
-    ctx->trace->pVtbl->SetWriterLevel(ctx->trace, FFMPEG_AMF_WRITER_ID, AMF_TRACE_TRACE);
-
-    res = ctx->factory->pVtbl->CreateContext(ctx->factory, &ctx->context);
-    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "CreateContext() failed with error %d\n", res);
-
-    // If a device was passed to the encoder, try to initialise from that.
-    if (avctx->hw_frames_ctx) {
-        AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
-
-        if (amf_av_to_amf_format(frames_ctx->sw_format) == AMF_SURFACE_UNKNOWN) {
-            av_log(avctx, AV_LOG_ERROR, "Format of input frames context (%s) is not supported by AMF.\n",
-                   av_get_pix_fmt_name(frames_ctx->sw_format));
-            return AVERROR(EINVAL);
-        }
-
-        switch (frames_ctx->device_ctx->type) {
-#if CONFIG_D3D11VA
-        case AV_HWDEVICE_TYPE_D3D11VA:
-            ret = amf_init_from_d3d11_device(avctx, frames_ctx->device_ctx->hwctx);
-            if (ret < 0)
-                return ret;
-            break;
-#endif
-#if CONFIG_DXVA2
-        case AV_HWDEVICE_TYPE_DXVA2:
-            ret = amf_init_from_dxva2_device(avctx, frames_ctx->device_ctx->hwctx);
-            if (ret < 0)
-                return ret;
-            break;
-#endif
-        default:
-            av_log(avctx, AV_LOG_ERROR, "AMF initialisation from a %s frames context is not supported.\n",
-                   av_hwdevice_get_type_name(frames_ctx->device_ctx->type));
-            return AVERROR(ENOSYS);
-        }
-
-        ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
-        if (!ctx->hw_frames_ctx)
-            return AVERROR(ENOMEM);
-
-        if (frames_ctx->initial_pool_size > 0)
-            ctx->hwsurfaces_in_queue_max = frames_ctx->initial_pool_size - 1;
-
-    } else if (avctx->hw_device_ctx) {
-        AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
-
-        switch (device_ctx->type) {
-#if CONFIG_D3D11VA
-        case AV_HWDEVICE_TYPE_D3D11VA:
-            ret = amf_init_from_d3d11_device(avctx, device_ctx->hwctx);
-            if (ret < 0)
-                return ret;
-            break;
-#endif
-#if CONFIG_DXVA2
-        case AV_HWDEVICE_TYPE_DXVA2:
-            ret = amf_init_from_dxva2_device(avctx, device_ctx->hwctx);
-            if (ret < 0)
-                return ret;
-            break;
-#endif
-        default:
-            av_log(avctx, AV_LOG_ERROR, "AMF initialisation from a %s device is not supported.\n",
-                   av_hwdevice_get_type_name(device_ctx->type));
-            return AVERROR(ENOSYS);
-        }
-
-        ctx->hw_device_ctx = av_buffer_ref(avctx->hw_device_ctx);
-        if (!ctx->hw_device_ctx)
-            return AVERROR(ENOMEM);
-
-    } else {
-        res = ctx->context->pVtbl->InitDX11(ctx->context, NULL, AMF_DX11_1);
-        if (res == AMF_OK) {
-            av_log(avctx, AV_LOG_VERBOSE, "AMF initialisation succeeded via D3D11.\n");
-        } else {
-            res = ctx->context->pVtbl->InitDX9(ctx->context, NULL);
-            if (res == AMF_OK) {
-                av_log(avctx, AV_LOG_VERBOSE, "AMF initialisation succeeded via D3D9.\n");
-            } else {
-                AMFGuid guid = IID_AMFContext1();
-                res = ctx->context->pVtbl->QueryInterface(ctx->context, &guid, (void**)&context1);
-                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "CreateContext1() failed with error %d\n", res);
-
-                res = context1->pVtbl->InitVulkan(context1, NULL);
-                context1->pVtbl->Release(context1);
-                if (res != AMF_OK) {
-                    if (res == AMF_NOT_SUPPORTED)
-                        av_log(avctx, AV_LOG_ERROR, "AMF via Vulkan is not supported on the given device.\n");
-                    else
-                        av_log(avctx, AV_LOG_ERROR, "AMF failed to initialise on the given Vulkan device: %d.\n", res);
-                    return AVERROR(ENOSYS);
-                }
-                av_log(avctx, AV_LOG_VERBOSE, "AMF initialisation succeeded via Vulkan.\n");
-            }
-        }
-    }
-    return 0;
-}
-
 static int amf_init_encoder(AVCodecContext *avctx)
 {
-    AmfContext        *ctx = avctx->priv_data;
+    AMFEncoderContext *ctx = avctx->priv_data;
     const wchar_t     *codec_id = NULL;
     AMF_RESULT         res;
     enum AVPixelFormat pix_fmt;
+    AVAMFDeviceContext* internal = ctx->amf_device_ctx;
 
     switch (avctx->codec->id) {
         case AV_CODEC_ID_H264:
@@ -410,29 +139,27 @@ static int amf_init_encoder(AVCodecContext *avctx)
     }
     AMF_RETURN_IF_FALSE(ctx, codec_id != NULL, AVERROR(EINVAL), "Codec %d is not supported\n", avctx->codec->id);
 
-    if (ctx->hw_frames_ctx)
-        pix_fmt = ((AVHWFramesContext*)ctx->hw_frames_ctx->data)->sw_format;
+    if (avctx->hw_frames_ctx)
+        pix_fmt = ((AVHWFramesContext*)avctx->hw_frames_ctx->data)->sw_format;
     else
         pix_fmt = avctx->pix_fmt;
 
-    ctx->format = amf_av_to_amf_format(pix_fmt);
+    if (avctx->pix_fmt != AV_PIX_FMT_AMF_SURFACE)
+        ctx->format = av_amf_av_to_amf_format(pix_fmt);
+    else
+        ctx->format = av_amf_av_to_amf_format(avctx->sw_pix_fmt);
+
     AMF_RETURN_IF_FALSE(ctx, ctx->format != AMF_SURFACE_UNKNOWN, AVERROR(EINVAL),
-                        "Format %s is not supported\n", av_get_pix_fmt_name(pix_fmt));
+                    "Format %s is not supported\n", av_get_pix_fmt_name(pix_fmt));
 
-    res = ctx->factory->pVtbl->CreateComponent(ctx->factory, ctx->context, codec_id, &ctx->encoder);
+    res = ctx->amf_device_ctx->factory->pVtbl->CreateComponent(ctx->amf_device_ctx->factory, internal->context, codec_id, &ctx->encoder);
     AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_ENCODER_NOT_FOUND, "CreateComponent(%ls) failed with error %d\n", codec_id, res);
-
     return 0;
 }
 
 int av_cold ff_amf_encode_close(AVCodecContext *avctx)
 {
-    AmfContext *ctx = avctx->priv_data;
-
-    if (ctx->delayed_surface) {
-        ctx->delayed_surface->pVtbl->Release(ctx->delayed_surface);
-        ctx->delayed_surface = NULL;
-    }
+    AMFEncoderContext *ctx = avctx->priv_data;
 
     if (ctx->encoder) {
         ctx->encoder->pVtbl->Terminate(ctx->encoder);
@@ -440,38 +167,50 @@ int av_cold ff_amf_encode_close(AVCodecContext *avctx)
         ctx->encoder = NULL;
     }
 
-    if (ctx->context) {
-        ctx->context->pVtbl->Terminate(ctx->context);
-        ctx->context->pVtbl->Release(ctx->context);
-        ctx->context = NULL;
+    if (ctx->amf_device_ctx && ctx->local_context) {
+        av_amf_context_free(0, (uint8_t *)ctx->amf_device_ctx);
+        av_freep(&ctx->amf_device_ctx);
     }
-    av_buffer_unref(&ctx->hw_device_ctx);
-    av_buffer_unref(&ctx->hw_frames_ctx);
+    av_fifo_freep2(&ctx->timestamp_list);
+
+    return 0;
+}
 
-    if (ctx->trace) {
-        ctx->trace->pVtbl->UnregisterWriter(ctx->trace, FFMPEG_AMF_WRITER_ID);
+static int amf_init_encoder_context(AVCodecContext *avctx)
+{
+    AMFEncoderContext *ctx = avctx->priv_data;
+    AMFContext1 *context1 = NULL;
+    int ret;
+
+    if (avctx->hw_frames_ctx) {
+        AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+        ret = av_amf_context_derive(ctx->amf_device_ctx, frames_ctx->device_ctx, NULL, 0);
+        if (ret < 0)
+            return ret;
     }
-    if (ctx->library) {
-        dlclose(ctx->library);
-        ctx->library = NULL;
+    else if (avctx->hw_device_ctx) {
+        AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+        ret = av_amf_context_derive(ctx->amf_device_ctx, device_ctx, NULL, 0);
+        if (ret < 0)
+            return ret;
+
+    } else {
+        ret = av_amf_context_init(ctx->amf_device_ctx, avctx);
+        if (ret != 0) {
+            return ret;
+        }
     }
-    ctx->trace = NULL;
-    ctx->debug = NULL;
-    ctx->factory = NULL;
-    ctx->version = 0;
-    ctx->delayed_drain = 0;
-    av_frame_free(&ctx->delayed_frame);
-    av_fifo_freep2(&ctx->timestamp_list);
 
-    return 0;
+
+    return ret;
 }
 
 static int amf_copy_surface(AVCodecContext *avctx, const AVFrame *frame,
     AMFSurface* surface)
 {
     AMFPlane *plane;
-    uint8_t  *dst_data[4];
-    int       dst_linesize[4];
+    uint8_t  *dst_data[4] = {0};
+    int       dst_linesize[4] = {0};
     int       planes;
     int       i;
 
@@ -492,7 +231,7 @@ static int amf_copy_surface(AVCodecContext *avctx, const AVFrame *frame,
 
 static int amf_copy_buffer(AVCodecContext *avctx, AVPacket *pkt, AMFBuffer *buffer)
 {
-    AmfContext      *ctx = avctx->priv_data;
+    AMFEncoderContext *ctx = avctx->priv_data;
     int              ret;
     AMFVariantStruct var = {0};
     int64_t          timestamp = AV_NOPTS_VALUE;
@@ -555,17 +294,47 @@ int ff_amf_encode_init(AVCodecContext *avctx)
 {
     int ret;
 
-    if ((ret = amf_load_library(avctx)) == 0) {
-        if ((ret = amf_init_context(avctx)) == 0) {
-            if ((ret = amf_init_encoder(avctx)) == 0) {
-                    if (avctx->pix_fmt == AV_PIX_FMT_P010) {
-                        AmfContext *ctx = avctx->priv_data;
-                        AMF_RETURN_IF_FALSE(ctx, ctx->version >= AMF_MAKE_FULL_VERSION(1, 4, 32, 0), AVERROR_UNKNOWN, "10-bit encoder is not supported by AMD GPU drivers versions lower than 23.30.\n");
-                    }
-                return 0;
-            }
+    AMFEncoderContext *ctx = avctx->priv_data;
+    AVHWDeviceContext   *hwdev_ctx = NULL;
+    if (avctx->hw_device_ctx) {
+        hwdev_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+    } else if (avctx->hw_frames_ctx) {
+        AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+        hwdev_ctx = (AVHWDeviceContext*)frames_ctx->device_ctx;
+    }
+    // hardcoded to current HW queue size - will auto-realloc if too small
+    ctx->timestamp_list = av_fifo_alloc2(avctx->max_b_frames + 16, sizeof(int64_t),
+                                         AV_FIFO_FLAG_AUTO_GROW);
+    if (!ctx->timestamp_list) {
+        return AVERROR(ENOMEM);
+    }
+    ctx->dts_delay = 0;
+
+    ctx->hwsurfaces_in_queue = 0;
+    ctx->hwsurfaces_in_queue_max = 16;
+    ctx->local_context = 0;
+    if (avctx->hw_frames_ctx && hwdev_ctx && hwdev_ctx->type == AV_HWDEVICE_TYPE_AMF) {
+        AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+        ctx->amf_device_ctx =  hwdev_ctx->hwctx;
+    }
+    else if (avctx->hw_device_ctx && hwdev_ctx && hwdev_ctx->type == AV_HWDEVICE_TYPE_AMF) {
+        ctx->amf_device_ctx = hwdev_ctx->hwctx;
+    } else {
+        ctx->amf_device_ctx = av_mallocz(sizeof(AVAMFDeviceContext));
+        ctx->local_context = 1;
+        if ((ret = av_amf_context_create(ctx->amf_device_ctx, avctx, "", NULL, 0)) != 0) {
+            ff_amf_encode_close(avctx);
+            return ret;
+        }
+        if ((ret = amf_init_encoder_context(avctx)) != 0) {
+            ff_amf_encode_close(avctx);
+            return ret;
         }
     }
+    if ((ret = amf_init_encoder(avctx)) == 0) {
+        return 0;
+    }
+
     ff_amf_encode_close(avctx);
     return ret;
 }
@@ -642,31 +411,27 @@ static void amf_release_buffer_with_frame_ref(AMFBuffer *frame_ref_storage_buffe
 
 int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
 {
-    AmfContext *ctx = avctx->priv_data;
+    AMFEncoderContext *ctx = avctx->priv_data;
     AMFSurface *surface;
     AMF_RESULT  res;
     int         ret;
     AMF_RESULT  res_query;
     AMFData    *data = NULL;
-    AVFrame    *frame = ctx->delayed_frame;
+    AVFrame    *frame = av_frame_alloc();
     int         block_and_wait;
     int         query_output_data_flag = 0;
-    AMF_RESULT  res_resubmit;
+    int count = 0;
 
     if (!ctx->encoder)
         return AVERROR(EINVAL);
 
-    if (!frame->buf[0]) {
-        ret = ff_encode_get_frame(avctx, frame);
-        if (ret < 0 && ret != AVERROR_EOF)
-            return ret;
-    }
+    ret = ff_encode_get_frame(avctx, frame);
+    if (ret < 0 && ret != AVERROR_EOF)
+        return ret;
 
     if (!frame->buf[0]) { // submit drain
         if (!ctx->eof) { // submit drain one time only
-            if (ctx->delayed_surface != NULL) {
-                ctx->delayed_drain = 1; // input queue is full: resubmit Drain() in ff_amf_receive_packet
-            } else if(!ctx->delayed_drain) {
+            if(!ctx->delayed_drain) {
                 res = ctx->encoder->pVtbl->Drain(ctx->encoder);
                 if (res == AMF_INPUT_FULL) {
                     ctx->delayed_drain = 1; // input queue is full: resubmit Drain() in ff_amf_receive_packet
@@ -678,7 +443,7 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
                 }
             }
         }
-    } else if (!ctx->delayed_surface) { // submit frame
+    } else { // submit frame
         int hw_surface = 0;
 
         // prepare surface from frame
@@ -690,12 +455,12 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
                 ID3D11Texture2D *texture = (ID3D11Texture2D*)frame->data[0]; // actual texture
                 int index = (intptr_t)frame->data[1]; // index is a slice in texture array is - set to tell AMF which slice to use
 
-                av_assert0(frame->hw_frames_ctx       && ctx->hw_frames_ctx &&
-                           frame->hw_frames_ctx->data == ctx->hw_frames_ctx->data);
+                av_assert0(frame->hw_frames_ctx       && avctx->hw_frames_ctx &&
+                           frame->hw_frames_ctx->data == avctx->hw_frames_ctx->data);
 
                 texture->lpVtbl->SetPrivateData(texture, &AMFTextureArrayIndexGUID, sizeof(index), &index);
 
-                res = ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx->context, texture, &surface, NULL); // wrap to AMF surface
+                res = ctx->amf_device_ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx->amf_device_ctx->context, texture, &surface, NULL); // wrap to AMF surface
                 AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "CreateSurfaceFromDX11Native() failed  with error %d\n", res);
 
                 hw_surface = 1;
@@ -707,16 +472,23 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
             {
                 IDirect3DSurface9 *texture = (IDirect3DSurface9 *)frame->data[3]; // actual texture
 
-                res = ctx->context->pVtbl->CreateSurfaceFromDX9Native(ctx->context, texture, &surface, NULL); // wrap to AMF surface
+                res = ctx->amf_device_ctx->context->pVtbl->CreateSurfaceFromDX9Native(ctx->amf_device_ctx->context, texture, &surface, NULL); // wrap to AMF surface
                 AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "CreateSurfaceFromDX9Native() failed  with error %d\n", res);
 
                 hw_surface = 1;
             }
             break;
 #endif
+        case AV_PIX_FMT_AMF_SURFACE:
+            {
+                surface = (AMFSurface*)frame->data[0];
+                surface->pVtbl->Acquire(surface);
+                hw_surface = 1;
+            }
+            break;
         default:
             {
-                res = ctx->context->pVtbl->AllocSurface(ctx->context, AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);
+                res = ctx->amf_device_ctx->context->pVtbl->AllocSurface(ctx->amf_device_ctx->context, AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);
                 AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "AllocSurface() failed  with error %d\n", res);
                 amf_copy_surface(avctx, frame, surface);
             }
@@ -729,7 +501,7 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
             // input HW surfaces can be vertically aligned by 16; tell AMF the real size
             surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame->height);
 
-            frame_ref_storage_buffer = amf_create_buffer_with_frame_ref(frame, ctx->context);
+            frame_ref_storage_buffer = amf_create_buffer_with_frame_ref(frame, ctx->amf_device_ctx->context);
             AMF_RETURN_IF_FALSE(ctx, frame_ref_storage_buffer != NULL, AVERROR(ENOMEM), "create_buffer_with_frame_ref() returned NULL\n");
 
             res = amf_set_property_buffer(surface, L"av_frame_ref", frame_ref_storage_buffer);
@@ -741,7 +513,7 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
         // HDR10 metadata
         if (frame->color_trc == AVCOL_TRC_SMPTE2084) {
             AMFBuffer * hdrmeta_buffer = NULL;
-            res = ctx->context->pVtbl->AllocBuffer(ctx->context, AMF_MEMORY_HOST, sizeof(AMFHDRMetadata), &hdrmeta_buffer);
+            res = ctx->amf_device_ctx->context->pVtbl->AllocBuffer(ctx->amf_device_ctx->context, AMF_MEMORY_HOST, sizeof(AMFHDRMetadata), &hdrmeta_buffer);
             if (res == AMF_OK) {
                 AMFHDRMetadata * hdrmeta = (AMFHDRMetadata*)hdrmeta_buffer->pVtbl->GetNative(hdrmeta_buffer);
                 if (amf_save_hdr_metadata(avctx, frame, hdrmeta) == 0) {
@@ -750,8 +522,6 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
                         AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break;
                     case AV_CODEC_ID_HEVC:
                         AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break;
-                    case AV_CODEC_ID_AV1:
-                        AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_INPUT_HDR_METADATA, hdrmeta_buffer); break;
                     }
                     res = amf_set_property_buffer(surface, L"av_frame_hdrmeta", hdrmeta_buffer);
                     AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "SetProperty failed for \"av_frame_hdrmeta\" with error %d\n", res);
@@ -778,13 +548,11 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
         // submit surface
         res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface);
         if (res == AMF_INPUT_FULL) { // handle full queue
-            //store surface for later submission
-            ctx->delayed_surface = surface;
+            av_usleep(1000); // wait and poll again
         } else {
             int64_t pts = frame->pts;
             surface->pVtbl->Release(surface);
             AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "SubmitInput() failed with error %d\n", res);
-
             av_frame_unref(frame);
             ret = av_fifo_write(ctx->timestamp_list, &pts, 1);
             if (ret < 0)
@@ -796,73 +564,40 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
     do {
         block_and_wait = 0;
         // poll data
-        if (!avpkt->data && !avpkt->buf) {
-            res_query = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);
-            if (data) {
-                // copy data to packet
-                AMFBuffer *buffer;
-                AMFGuid guid = IID_AMFBuffer();
-                query_output_data_flag = 1;
-                data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); // query for buffer interface
-                ret = amf_copy_buffer(avctx, avpkt, buffer);
-
-                buffer->pVtbl->Release(buffer);
-
-                if (data->pVtbl->HasProperty(data, L"av_frame_ref")) {
-                    AMFBuffer* frame_ref_storage_buffer;
-                    res = amf_get_property_buffer(data, L"av_frame_ref", &frame_ref_storage_buffer);
-                    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetProperty failed for \"av_frame_ref\" with error %d\n", res);
-                    amf_release_buffer_with_frame_ref(frame_ref_storage_buffer);
-                    ctx->hwsurfaces_in_queue--;
-                }
+        res_query = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);
+        if (data) {
+            // copy data to packet
+            AMFBuffer* buffer;
+            AMFGuid guid = IID_AMFBuffer();
+            data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); // query for buffer interface
+            ret = amf_copy_buffer(avctx, avpkt, buffer);
+
+            buffer->pVtbl->Release(buffer);
+
+            if (data->pVtbl->HasProperty(data, L"av_frame_ref")) {
+                AMFBuffer *frame_ref_storage_buffer;
+                res = amf_get_property_buffer(data, L"av_frame_ref", &frame_ref_storage_buffer);
+                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetProperty failed for \"av_frame_ref\" with error %d\n", res);
+                amf_release_buffer_with_frame_ref(frame_ref_storage_buffer);
+                ctx->hwsurfaces_in_queue--;
+            }
+            data->pVtbl->Release(data);
 
-                data->pVtbl->Release(data);
+            AMF_RETURN_IF_FALSE(ctx, ret >= 0, ret, "amf_copy_buffer() failed with error %d\n", ret);
 
-                AMF_RETURN_IF_FALSE(ctx, ret >= 0, ret, "amf_copy_buffer() failed with error %d\n", ret);
-            }
-        }
-        res_resubmit = AMF_OK;
-        if (ctx->delayed_surface != NULL) { // try to resubmit frame
-            if (ctx->delayed_surface->pVtbl->HasProperty(ctx->delayed_surface, L"av_frame_hdrmeta")) {
-                AMFBuffer * hdrmeta_buffer = NULL;
-                res = amf_get_property_buffer((AMFData *)ctx->delayed_surface, L"av_frame_hdrmeta", &hdrmeta_buffer);
-                AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "GetProperty failed for \"av_frame_hdrmeta\" with error %d\n", res);
-                switch (avctx->codec->id) {
-                case AV_CODEC_ID_H264:
-                    AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_INPUT_HDR_METADATA, hdrmeta_buffer); break;
-                case AV_CODEC_ID_HEVC:
-                    AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INPUT_HDR_METADATA, hdrmeta_buffer); break;
+            if (ctx->delayed_drain) { // try to resubmit drain
+                res = ctx->encoder->pVtbl->Drain(ctx->encoder);
+                if (res != AMF_INPUT_FULL) {
+                    ctx->delayed_drain = 0;
+                    ctx->eof = 1; // drain started
+                    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "Repeated Drain() failed with error %d\n", res);
+                } else {
+                    av_log(avctx, AV_LOG_WARNING, "Data acquired but delayed drain submission got AMF_INPUT_FULL- should not happen\n");
                 }
-                hdrmeta_buffer->pVtbl->Release(hdrmeta_buffer);
-            }
-            res_resubmit = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)ctx->delayed_surface);
-            if (res_resubmit != AMF_INPUT_FULL) {
-                int64_t pts = ctx->delayed_surface->pVtbl->GetPts(ctx->delayed_surface);
-                ctx->delayed_surface->pVtbl->Release(ctx->delayed_surface);
-                ctx->delayed_surface = NULL;
-                av_frame_unref(ctx->delayed_frame);
-                AMF_RETURN_IF_FALSE(ctx, res_resubmit == AMF_OK, AVERROR_UNKNOWN, "Repeated SubmitInput() failed with error %d\n", res_resubmit);
-
-                ret = av_fifo_write(ctx->timestamp_list, &pts, 1);
-                if (ret < 0)
-                    return ret;
-            }
-        } else if (ctx->delayed_drain) { // try to resubmit drain
-            res = ctx->encoder->pVtbl->Drain(ctx->encoder);
-            if (res != AMF_INPUT_FULL) {
-                ctx->delayed_drain = 0;
-                ctx->eof = 1; // drain started
-                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "Repeated Drain() failed with error %d\n", res);
-            } else {
-                av_log(avctx, AV_LOG_WARNING, "Data acquired but delayed drain submission got AMF_INPUT_FULL- should not happen\n");
-            }
-        }
-
-        if (query_output_data_flag == 0) {
-            if (res_resubmit == AMF_INPUT_FULL || ctx->delayed_drain || (ctx->eof && res_query != AMF_EOF) || (ctx->hwsurfaces_in_queue >= ctx->hwsurfaces_in_queue_max)) {
-                block_and_wait = 1;
-                av_usleep(1000);
             }
+        } else if (ctx->delayed_drain || (ctx->eof && res_query != AMF_EOF) || (ctx->hwsurfaces_in_queue >= ctx->hwsurfaces_in_queue_max)) {
+            block_and_wait = 1;
+            av_usleep(1000); // wait and poll again
         }
     } while (block_and_wait);
 
@@ -920,5 +655,7 @@ const AVCodecHWConfigInternal *const ff_amfenc_hw_configs[] = {
     HW_CONFIG_ENCODER_FRAMES(DXVA2_VLD, DXVA2),
     HW_CONFIG_ENCODER_DEVICE(NONE,      DXVA2),
 #endif
+    HW_CONFIG_ENCODER_FRAMES(AMF_SURFACE,   AMF),
+    HW_CONFIG_ENCODER_DEVICE(NONE,          AMF),
     NULL,
 };
diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h
index 1bda0136bd..8fc411d44d 100644
--- a/libavcodec/amfenc.h
+++ b/libavcodec/amfenc.h
@@ -25,54 +25,34 @@
 #include <AMF/components/VideoEncoderVCE.h>
 #include <AMF/components/VideoEncoderHEVC.h>
 #include <AMF/components/VideoEncoderAV1.h>
-
+#include "libavutil/hwcontext_amf.h"
 #include "libavutil/fifo.h"
+#include "libavutil/mem.h"
 
 #include "avcodec.h"
 #include "hwconfig.h"
 
 #define  MAX_LOOKAHEAD_DEPTH 41
 
-/**
-* AMF trace writer callback class
-* Used to capture all AMF logging
-*/
-
-typedef struct AmfTraceWriter {
-    AMFTraceWriterVtbl *vtbl;
-    AVCodecContext     *avctx;
-} AmfTraceWriter;
-
 /**
 * AMF encoder context
 */
 
-typedef struct AmfContext {
+typedef struct AMFEncoderContext {
     AVClass            *avclass;
     // access to AMF runtime
-    amf_handle          library; ///< handle to DLL library
-    AMFFactory         *factory; ///< pointer to AMF factory
-    AMFDebug           *debug;   ///< pointer to AMF debug interface
-    AMFTrace           *trace;   ///< pointer to AMF trace interface
-
-    amf_uint64          version; ///< version of AMF runtime
-    AmfTraceWriter      tracer;  ///< AMF writer registered with AMF
-    AMFContext         *context; ///< AMF context
+    AVAMFDeviceContext *amf_device_ctx;
+    int                *local_context;
     //encoder
     AMFComponent       *encoder; ///< AMF encoder object
     amf_bool            eof;     ///< flag indicating EOF happened
     AMF_SURFACE_FORMAT  format;  ///< AMF surface format
 
-    AVBufferRef        *hw_device_ctx; ///< pointer to HW accelerator (decoder)
-    AVBufferRef        *hw_frames_ctx; ///< pointer to HW accelerator (frame allocator)
-
     int                 hwsurfaces_in_queue;
     int                 hwsurfaces_in_queue_max;
 
     // helpers to handle async calls
     int                 delayed_drain;
-    AMFSurface         *delayed_surface;
-    AVFrame            *delayed_frame;
 
     // shift dts back by max_b_frames in timing
     AVFifo             *timestamp_list;
@@ -149,7 +129,7 @@ typedef struct AmfContext {
     int                 pa_adaptive_mini_gop;
 
 
-} AmfContext;
+} AMFEncoderContext;
 
 extern const AVCodecHWConfigInternal *const ff_amfenc_hw_configs[];
 
diff --git a/libavcodec/amfenc_av1.c b/libavcodec/amfenc_av1.c
index 7d37a242fc..8edde12950 100644
--- a/libavcodec/amfenc_av1.c
+++ b/libavcodec/amfenc_av1.c
@@ -17,12 +17,12 @@
  */
 
 #include "libavutil/internal.h"
-#include "libavutil/mem.h"
 #include "libavutil/opt.h"
 #include "amfenc.h"
 #include "codec_internal.h"
+#include "internal.h"
 
-#define OFFSET(x) offsetof(AmfContext, x)
+#define OFFSET(x) offsetof(AMFEncoderContext, x)
 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
 static const AVOption options[] = {
     { "usage",                  "Set the encoding usage",                   OFFSET(usage),                          AV_OPT_TYPE_INT,   {.i64 = AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING }, AMF_VIDEO_ENCODER_AV1_USAGE_TRANSCODING, AMF_VIDEO_ENCODER_AV1_USAGE_LOW_LATENCY, VE, .unit = "usage" },
@@ -159,7 +159,7 @@ static av_cold int amf_encode_init_av1(AVCodecContext* avctx)
 {
     int                 ret = 0;
     AMF_RESULT          res = AMF_OK;
-    AmfContext* ctx = avctx->priv_data;
+    AMFEncoderContext* ctx = avctx->priv_data;
     AMFVariantStruct    var = { 0 };
     amf_int64           profile = 0;
     amf_int64           profile_level = 0;
@@ -518,7 +518,7 @@ const FFCodec ff_av1_amf_encoder = {
     .init           = amf_encode_init_av1,
     FF_CODEC_RECEIVE_PACKET_CB(ff_amf_receive_packet),
     .close          = ff_amf_encode_close,
-    .priv_data_size = sizeof(AmfContext),
+    .priv_data_size = sizeof(AMFEncoderContext),
     .p.priv_class     = &av1_amf_class,
     .defaults       = defaults,
     .p.capabilities   = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE |
diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c
index a26a6dbef8..09f1ebf9d4 100644
--- a/libavcodec/amfenc_h264.c
+++ b/libavcodec/amfenc_h264.c
@@ -18,13 +18,13 @@
 
 
 #include "libavutil/internal.h"
-#include "libavutil/mem.h"
 #include "libavutil/opt.h"
 #include "amfenc.h"
 #include "codec_internal.h"
+#include "internal.h"
 #include <AMF/components/PreAnalysis.h>
 
-#define OFFSET(x) offsetof(AmfContext, x)
+#define OFFSET(x) offsetof(AMFEncoderContext, x)
 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
 
 static const AVOption options[] = {
@@ -192,7 +192,7 @@ static av_cold int amf_encode_init_h264(AVCodecContext *avctx)
 {
     int                              ret = 0;
     AMF_RESULT                       res = AMF_OK;
-    AmfContext                      *ctx = avctx->priv_data;
+    AMFEncoderContext               *ctx = avctx->priv_data;
     AMFVariantStruct                 var = { 0 };
     amf_int64                        profile = 0;
     amf_int64                        profile_level = 0;
@@ -565,7 +565,7 @@ const FFCodec ff_h264_amf_encoder = {
     .init           = amf_encode_init_h264,
     FF_CODEC_RECEIVE_PACKET_CB(ff_amf_receive_packet),
     .close          = ff_amf_encode_close,
-    .priv_data_size = sizeof(AmfContext),
+    .priv_data_size = sizeof(AMFEncoderContext),
     .p.priv_class   = &h264_amf_class,
     .defaults       = defaults,
     .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE |
diff --git a/libavcodec/amfenc_hevc.c b/libavcodec/amfenc_hevc.c
index 8c26956513..35698c26c8 100644
--- a/libavcodec/amfenc_hevc.c
+++ b/libavcodec/amfenc_hevc.c
@@ -17,13 +17,13 @@
  */
 
 #include "libavutil/internal.h"
-#include "libavutil/mem.h"
 #include "libavutil/opt.h"
 #include "amfenc.h"
 #include "codec_internal.h"
+#include "internal.h"
 #include <AMF/components/PreAnalysis.h>
 
-#define OFFSET(x) offsetof(AmfContext, x)
+#define OFFSET(x) offsetof(AMFEncoderContext, x)
 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
 static const AVOption options[] = {
     { "usage",                  "Set the encoding usage",                   OFFSET(usage),                          AV_OPT_TYPE_INT,   {.i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCODING }, AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCODING, AMF_VIDEO_ENCODER_HEVC_USAGE_LOW_LATENCY_HIGH_QUALITY, VE, .unit = "usage" },
@@ -34,9 +34,9 @@ static const AVOption options[] = {
     { "high_quality",           "high quality trancoding",                  0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_HIGH_QUALITY              }, 0, 0, VE, .unit = "usage" },
     { "lowlatency_high_quality","low latency yet high quality trancoding",  0, AV_OPT_TYPE_CONST, {.i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_LOW_LATENCY_HIGH_QUALITY  }, 0, 0, VE, .unit = "usage" },
 
-    { "profile",        "Set the profile (default main)",           OFFSET(profile),   AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN_10, VE, .unit = "profile" },
-    { "main",           "", 0,                      AV_OPT_TYPE_CONST,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, 0, 0, VE, .unit = "profile" },
-    { "main10",         "", 0,                      AV_OPT_TYPE_CONST,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN_10 }, 0, 0, VE, .unit = "profile" },
+    { "profile",        "Set the profile (default main)",           OFFSET(profile),   AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN_10, VE, "profile" },
+    { "main",           "", 0,                      AV_OPT_TYPE_CONST,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, 0, 0, VE, "profile" },
+    { "main10",         "", 0,                      AV_OPT_TYPE_CONST,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN_10 }, 0, 0, VE, "profile" },
 
     { "profile_tier",   "Set the profile tier (default main)",      OFFSET(tier), AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, AMF_VIDEO_ENCODER_HEVC_TIER_MAIN, AMF_VIDEO_ENCODER_HEVC_TIER_HIGH, VE, .unit = "tier" },
     { "main",           "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, 0, 0, VE, .unit = "tier" },
@@ -154,7 +154,7 @@ static av_cold int amf_encode_init_hevc(AVCodecContext *avctx)
 {
     int                 ret = 0;
     AMF_RESULT          res = AMF_OK;
-    AmfContext         *ctx = avctx->priv_data;
+    AMFEncoderContext  *ctx = avctx->priv_data;
     AMFVariantStruct    var = {0};
     amf_int64           profile = 0;
     amf_int64           profile_level = 0;
@@ -488,7 +488,7 @@ const FFCodec ff_hevc_amf_encoder = {
     .init           = amf_encode_init_hevc,
     FF_CODEC_RECEIVE_PACKET_CB(ff_amf_receive_packet),
     .close          = ff_amf_encode_close,
-    .priv_data_size = sizeof(AmfContext),
+    .priv_data_size = sizeof(AMFEncoderContext),
     .p.priv_class   = &hevc_amf_class,
     .defaults       = defaults,
     .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE |
-- 
2.39.3 (Apple Git-146)

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [FFmpeg-devel] [PATCH 09/10, v3] avfilter/scale_amf: Add AMF VPP & super resolution filters
  2024-05-30 13:08 [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Dmitrii Ovchinnikov
                   ` (6 preceding siblings ...)
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 08/10, v3] avcodec/amfenc: redesign to use hwcontext_amf Dmitrii Ovchinnikov
@ 2024-05-30 13:08 ` Dmitrii Ovchinnikov
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 10/10, v3] doc/filters: Add documentation for AMF filters Dmitrii Ovchinnikov
                   ` (3 subsequent siblings)
  11 siblings, 0 replies; 17+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-05-30 13:08 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Evgeny Pavlov

From: Evgeny Pavlov <lucenticus@gmail.com>

This commit adds two AMF filters: vpp_amf & sr_amf.
Both filters are using AMF hardware acceleration.
vpp_amf supports simple scaling algorithms & color conversion.
sr_amf supports advanced scaling algorithms such as FSR & can
be used for upscaling only.
---
 configure                   |   1 +
 libavfilter/Makefile        |   2 +
 libavfilter/allfilters.c    |   2 +
 libavfilter/vf_amf_common.c | 516 ++++++++++++++++++++++++++++++++++++
 libavfilter/vf_amf_common.h |  73 +++++
 libavfilter/vf_sr_amf.c     | 189 +++++++++++++
 libavfilter/vf_vpp_amf.c    | 264 ++++++++++++++++++
 7 files changed, 1047 insertions(+)
 create mode 100644 libavfilter/vf_amf_common.c
 create mode 100644 libavfilter/vf_amf_common.h
 create mode 100644 libavfilter/vf_sr_amf.c
 create mode 100644 libavfilter/vf_vpp_amf.c

diff --git a/configure b/configure
index 96b181fd21..56d9bad3ee 100755
--- a/configure
+++ b/configure
@@ -3916,6 +3916,7 @@ rubberband_filter_deps="librubberband"
 sab_filter_deps="gpl swscale"
 scale2ref_filter_deps="swscale"
 scale_filter_deps="swscale"
+scale_amf_filter_deps="amf"
 scale_qsv_filter_deps="libmfx"
 scale_qsv_filter_select="qsvvpp"
 scdet_filter_select="scene_sad"
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 5992fd161f..8c8a9466a8 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -500,6 +500,7 @@ OBJS-$(CONFIG_SITI_FILTER)                   += vf_siti.o
 OBJS-$(CONFIG_SPLIT_FILTER)                  += split.o
 OBJS-$(CONFIG_SPP_FILTER)                    += vf_spp.o qp_table.o
 OBJS-$(CONFIG_SR_FILTER)                     += vf_sr.o
+OBJS-$(CONFIG_SR_AMF_FILTER)                 += vf_sr_amf.o scale_eval.o vf_amf_common.o
 OBJS-$(CONFIG_SSIM_FILTER)                   += vf_ssim.o framesync.o
 OBJS-$(CONFIG_SSIM360_FILTER)                += vf_ssim360.o framesync.o
 OBJS-$(CONFIG_STEREO3D_FILTER)               += vf_stereo3d.o
@@ -553,6 +554,7 @@ OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER)       += vidstabutils.o vf_vidstabtransfo
 OBJS-$(CONFIG_VIF_FILTER)                    += vf_vif.o framesync.o
 OBJS-$(CONFIG_VIGNETTE_FILTER)               += vf_vignette.o
 OBJS-$(CONFIG_VMAFMOTION_FILTER)             += vf_vmafmotion.o framesync.o
+OBJS-$(CONFIG_VPP_AMF_FILTER)                += vf_vpp_amf.o scale_eval.o vf_amf_common.o
 OBJS-$(CONFIG_VPP_QSV_FILTER)                += vf_vpp_qsv.o
 OBJS-$(CONFIG_VSTACK_FILTER)                 += vf_stack.o framesync.o
 OBJS-$(CONFIG_W3FDIF_FILTER)                 += vf_w3fdif.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index c532682fc2..2f40fb8f6f 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -430,6 +430,8 @@ extern const AVFilter ff_vf_roberts_opencl;
 extern const AVFilter ff_vf_rotate;
 extern const AVFilter ff_vf_sab;
 extern const AVFilter ff_vf_scale;
+extern const AVFilter ff_vf_vpp_amf;
+extern const AVFilter ff_vf_sr_amf;
 extern const AVFilter ff_vf_scale_cuda;
 extern const AVFilter ff_vf_scale_npp;
 extern const AVFilter ff_vf_scale_qsv;
diff --git a/libavfilter/vf_amf_common.c b/libavfilter/vf_amf_common.c
new file mode 100644
index 0000000000..b842aae77a
--- /dev/null
+++ b/libavfilter/vf_amf_common.c
@@ -0,0 +1,516 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "vf_amf_common.h"
+
+#include "libavutil/avassert.h"
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "libavutil/imgutils.h"
+
+#include "libavutil/hwcontext_amf.h"
+#include "libavutil/hwcontext_amf_internal.h"
+#include "AMF/components/ColorSpace.h"
+#include "scale_eval.h"
+
+#if CONFIG_DXVA2
+#include <d3d9.h>
+#endif
+
+#if CONFIG_D3D11VA
+#include <d3d11.h>
+#endif
+
+int amf_filter_init(AVFilterContext *avctx)
+{
+    AMFFilterContext     *ctx = avctx->priv;
+
+    if (!strcmp(ctx->format_str, "same")) {
+        ctx->format = AV_PIX_FMT_NONE;
+    } else {
+        ctx->format = av_get_pix_fmt(ctx->format_str);
+        if (ctx->format == AV_PIX_FMT_NONE) {
+            av_log(avctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", ctx->format_str);
+            return AVERROR(EINVAL);
+        }
+    }
+
+    return 0;
+}
+
+void amf_filter_uninit(AVFilterContext *avctx)
+{
+    AMFFilterContext *ctx = avctx->priv;
+
+    if (ctx->component) {
+        ctx->component->pVtbl->Terminate(ctx->component);
+        ctx->component->pVtbl->Release(ctx->component);
+        ctx->component = NULL;
+    }
+    if (ctx->amf_device_ctx && ctx->local_context) {
+        av_amf_context_free(0, (uint8_t *)ctx->amf_device_ctx);
+        av_freep(ctx->amf_device_ctx);
+    }
+
+    av_buffer_unref(&ctx->amf_device_ref);
+    av_buffer_unref(&ctx->hwdevice_ref);
+    av_buffer_unref(&ctx->hwframes_in_ref);
+    av_buffer_unref(&ctx->hwframes_out_ref);
+}
+
+int amf_filter_filter_frame(AVFilterLink *inlink, AVFrame *in)
+{
+    AVFilterContext             *avctx = inlink->dst;
+    AMFFilterContext             *ctx = avctx->priv;
+    AVFilterLink                *outlink = avctx->outputs[0];
+    AMF_RESULT  res;
+    AMFSurface *surface_in;
+    AMFSurface *surface_out;
+    AMFData *data_out = NULL;
+    enum AVColorSpace out_colorspace;
+    enum AVColorRange out_color_range;
+
+    AVFrame *out = NULL;
+    int ret = 0;
+
+    if (!ctx->component)
+        return AVERROR(EINVAL);
+
+    ret = amf_avframe_to_amfsurface(avctx, in, &surface_in);
+    if (ret < 0)
+        goto fail;
+
+    res = ctx->component->pVtbl->SubmitInput(ctx->component, (AMFData*)surface_in);
+    AMF_GOTO_FAIL_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "SubmitInput() failed with error %d\n", res);
+    res = ctx->component->pVtbl->QueryOutput(ctx->component, &data_out);
+    AMF_GOTO_FAIL_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "QueryOutput() failed with error %d\n", res);
+
+    if (data_out) {
+        AMFGuid guid = IID_AMFSurface();
+        data_out->pVtbl->QueryInterface(data_out, &guid, (void**)&surface_out); // query for buffer interface
+        data_out->pVtbl->Release(data_out);
+    }
+
+    out = amf_amfsurface_to_avframe(avctx, surface_out);
+
+    ret = av_frame_copy_props(out, in);
+    av_frame_unref(in);
+
+    out_colorspace = AVCOL_SPC_UNSPECIFIED;
+
+    if (ctx->color_profile != AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN) {
+        switch(ctx->color_profile) {
+        case AMF_VIDEO_CONVERTER_COLOR_PROFILE_601:
+            out_colorspace = AVCOL_SPC_SMPTE170M;
+        break;
+        case AMF_VIDEO_CONVERTER_COLOR_PROFILE_709:
+            out_colorspace = AVCOL_SPC_BT709;
+        break;
+        case AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020:
+            out_colorspace = AVCOL_SPC_BT2020_NCL;
+        break;
+        case AMF_VIDEO_CONVERTER_COLOR_PROFILE_JPEG:
+            out_colorspace = AVCOL_SPC_RGB;
+        break;
+        default:
+            out_colorspace = AVCOL_SPC_UNSPECIFIED;
+        break;
+        }
+        out->colorspace = out_colorspace;
+    }
+
+    out_color_range = AVCOL_RANGE_UNSPECIFIED;
+    if (ctx->color_range == AMF_COLOR_RANGE_FULL)
+        out_color_range = AVCOL_RANGE_JPEG;
+    else if (ctx->color_range == AMF_COLOR_RANGE_STUDIO)
+        out_color_range = AVCOL_RANGE_MPEG;
+
+    if (ctx->color_range != AMF_COLOR_RANGE_UNDEFINED)
+        out->color_range = out_color_range;
+
+    if (ctx->primaries != AMF_COLOR_PRIMARIES_UNDEFINED)
+        out->color_primaries = ctx->primaries;
+
+    if (ctx->trc != AMF_COLOR_TRANSFER_CHARACTERISTIC_UNDEFINED)
+        out->color_trc = ctx->trc;
+
+
+    if (ret < 0)
+        goto fail;
+
+    out->format = outlink->format;
+    out->width  = outlink->w;
+    out->height = outlink->h;
+
+    out->hw_frames_ctx = av_buffer_ref(ctx->hwframes_out_ref);
+    if (!out->hw_frames_ctx) {
+        ret = AVERROR(ENOMEM);
+        goto fail;
+    }
+
+    if (inlink->sample_aspect_ratio.num) {
+        outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h * inlink->w, outlink->w * inlink->h}, inlink->sample_aspect_ratio);
+    } else
+        outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
+
+    av_frame_free(&in);
+    return ff_filter_frame(outlink, out);
+fail:
+    av_frame_free(&in);
+    av_frame_free(&out);
+    return ret;
+}
+
+
+
+int amf_setup_input_output_formats(AVFilterContext *avctx,
+                                    const enum AVPixelFormat *input_pix_fmts,
+                                    const enum AVPixelFormat *output_pix_fmts)
+{
+    int err;
+    int i;
+    AVFilterFormats *input_formats;
+
+    //in case if hw_device_ctx is set to DXVA2 we change order of pixel formats to set DXVA2 be choosen by default
+    //The order is ignored if hw_frames_ctx is not NULL on the config_output stage
+    if (avctx->hw_device_ctx) {
+        AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+
+        switch (device_ctx->type) {
+    #if CONFIG_D3D11VA
+        case AV_HWDEVICE_TYPE_D3D11VA:
+            {
+                static const enum AVPixelFormat output_pix_fmts_d3d11[] = {
+                    AV_PIX_FMT_D3D11,
+                    AV_PIX_FMT_NONE,
+                };
+                output_pix_fmts = output_pix_fmts_d3d11;
+            }
+            break;
+    #endif
+    #if CONFIG_DXVA2
+        case AV_HWDEVICE_TYPE_DXVA2:
+            {
+                static const enum AVPixelFormat output_pix_fmts_dxva2[] = {
+                    AV_PIX_FMT_DXVA2_VLD,
+                    AV_PIX_FMT_NONE,
+                };
+                output_pix_fmts = output_pix_fmts_dxva2;
+            }
+            break;
+    #endif
+        default:
+            {
+                av_log(avctx, AV_LOG_ERROR, "Unsupported device : %s\n", av_hwdevice_get_type_name(device_ctx->type));
+                return AVERROR(EINVAL);
+            }
+            break;
+        }
+    }
+
+    input_formats = ff_make_format_list(output_pix_fmts);
+    if (!input_formats) {
+        return AVERROR(ENOMEM);
+    }
+
+    for (i = 0; input_pix_fmts[i] != AV_PIX_FMT_NONE; i++) {
+        err = ff_add_format(&input_formats, input_pix_fmts[i]);
+        if (err < 0)
+            return err;
+    }
+
+    if ((err = ff_formats_ref(input_formats, &avctx->inputs[0]->outcfg.formats)) < 0 ||
+        (err = ff_formats_ref(ff_make_format_list(output_pix_fmts),
+                              &avctx->outputs[0]->incfg.formats)) < 0)
+        return err;
+    return 0;
+}
+
+int amf_copy_surface(AVFilterContext *avctx, const AVFrame *frame,
+    AMFSurface* surface)
+{
+    AMFPlane *plane;
+    uint8_t  *dst_data[4];
+    int       dst_linesize[4];
+    int       planes;
+    int       i;
+
+    planes = surface->pVtbl->GetPlanesCount(surface);
+    av_assert0(planes < FF_ARRAY_ELEMS(dst_data));
+
+    for (i = 0; i < planes; i++) {
+        plane = surface->pVtbl->GetPlaneAt(surface, i);
+        dst_data[i] = plane->pVtbl->GetNative(plane);
+        dst_linesize[i] = plane->pVtbl->GetHPitch(plane);
+    }
+    av_image_copy(dst_data, dst_linesize,
+        (const uint8_t**)frame->data, frame->linesize, frame->format,
+        frame->width, frame->height);
+
+    return 0;
+}
+
+int amf_init_filter_config(AVFilterLink *outlink, enum AVPixelFormat *in_format)
+{
+    AVFilterContext *avctx = outlink->src;
+    AVFilterLink   *inlink = avctx->inputs[0];
+    AMFFilterContext  *ctx = avctx->priv;
+    AVHWFramesContext *hwframes_out;
+    int err;
+    AMF_RESULT res;
+
+    if ((err = ff_scale_eval_dimensions(avctx,
+                                        ctx->w_expr, ctx->h_expr,
+                                        inlink, outlink,
+                                        &ctx->width, &ctx->height)) < 0)
+        return err;
+
+    ff_scale_adjust_dimensions(inlink, &ctx->width, &ctx->height,
+                               ctx->force_original_aspect_ratio, ctx->force_divisible_by);
+
+    av_buffer_unref(&ctx->amf_device_ref);
+    av_buffer_unref(&ctx->hwframes_in_ref);
+    av_buffer_unref(&ctx->hwframes_out_ref);
+    ctx->local_context = 0;
+    if (inlink->hw_frames_ctx) {
+        AVHWFramesContext *frames_ctx = (AVHWFramesContext*)inlink->hw_frames_ctx->data;
+        if (frames_ctx->device_ctx->type == AV_HWDEVICE_TYPE_AMF) {
+            ctx->amf_device_ctx = frames_ctx->device_ctx->hwctx;
+        }
+        if (av_amf_av_to_amf_format(frames_ctx->sw_format) == AMF_SURFACE_UNKNOWN) {
+            av_log(avctx, AV_LOG_ERROR, "Format of input frames context (%s) is not supported by AMF.\n",
+                   av_get_pix_fmt_name(frames_ctx->sw_format));
+            return AVERROR(EINVAL);
+        }
+
+        err = av_hwdevice_ctx_create_derived(&ctx->amf_device_ref, AV_HWDEVICE_TYPE_AMF, frames_ctx->device_ref, 0);
+        if (err < 0)
+            return err;
+
+        ctx->hwframes_in_ref = av_buffer_ref(inlink->hw_frames_ctx);
+        if (!ctx->hwframes_in_ref)
+            return AVERROR(ENOMEM);
+
+        ctx->hwframes_out_ref = av_hwframe_ctx_alloc(frames_ctx->device_ref);
+        if (!ctx->hwframes_out_ref)
+            return AVERROR(ENOMEM);
+
+        hwframes_out = (AVHWFramesContext*)ctx->hwframes_out_ref->data;
+        hwframes_out->format    = outlink->format;
+        hwframes_out->sw_format = frames_ctx->sw_format;
+    } else if (avctx->hw_device_ctx) {
+        AVHWDeviceContext   *hwdev_ctx;
+        err = av_hwdevice_ctx_create_derived(&ctx->amf_device_ref, AV_HWDEVICE_TYPE_AMF, avctx->hw_device_ctx, 0);
+        if (err < 0)
+            return err;
+        hwdev_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
+        if (hwdev_ctx->type == AV_HWDEVICE_TYPE_AMF)
+        {
+            ctx->amf_device_ctx =  hwdev_ctx->hwctx;
+        }
+        ctx->hwdevice_ref = av_buffer_ref(avctx->hw_device_ctx);
+        if (!ctx->hwdevice_ref)
+            return AVERROR(ENOMEM);
+
+        ctx->hwframes_out_ref = av_hwframe_ctx_alloc(ctx->hwdevice_ref);
+        if (!ctx->hwframes_out_ref)
+            return AVERROR(ENOMEM);
+
+        hwframes_out = (AVHWFramesContext*)ctx->hwframes_out_ref->data;
+        hwframes_out->format    = AV_PIX_FMT_AMF_SURFACE;
+        hwframes_out->sw_format = outlink->format;
+    } else {
+        ctx->amf_device_ctx = av_mallocz(sizeof(AVAMFDeviceContext));
+        ctx->local_context = 1;
+        if ((res = av_amf_context_create(ctx->amf_device_ctx, avctx, "", NULL, 0)) != 0) {
+            return res;
+        }
+        ctx->hwframes_out_ref = av_hwframe_ctx_alloc(ctx->amf_device_ref);
+        if (!ctx->hwframes_out_ref)
+            return AVERROR(ENOMEM);
+
+        hwframes_out = (AVHWFramesContext*)ctx->hwframes_out_ref->data;
+        hwframes_out->format    = outlink->format;
+        hwframes_out->sw_format = inlink->format;
+    }
+
+    if (ctx->format != AV_PIX_FMT_NONE) {
+        hwframes_out->sw_format = ctx->format;
+    }
+
+    if (inlink->format == AV_PIX_FMT_AMF_SURFACE) {
+        if (!inlink->hw_frames_ctx || !inlink->hw_frames_ctx->data)
+            return AVERROR(EINVAL);
+        else
+            *in_format = ((AVHWFramesContext*)inlink->hw_frames_ctx->data)->sw_format;
+    } else
+        *in_format = inlink->format;
+
+    outlink->w = ctx->width;
+    outlink->h = ctx->height;
+
+    hwframes_out->width = outlink->w;
+    hwframes_out->height = outlink->h;
+
+    err = av_hwframe_ctx_init(ctx->hwframes_out_ref);
+    if (err < 0)
+        return err;
+
+    outlink->hw_frames_ctx = av_buffer_ref(ctx->hwframes_out_ref);
+    if (!outlink->hw_frames_ctx) {
+        return AVERROR(ENOMEM);
+    }
+    return 0;
+}
+
+void amf_free_amfsurface(void *opaque, uint8_t *data)
+{
+    AMFSurface *surface = (AMFSurface*)data;
+    surface->pVtbl->Release(surface);
+}
+
+AVFrame *amf_amfsurface_to_avframe(AVFilterContext *avctx, AMFSurface* pSurface)
+{
+    AVFrame *frame = av_frame_alloc();
+    AMFFilterContext  *ctx = avctx->priv;
+
+    if (!frame)
+        return NULL;
+
+    if (ctx->hwframes_out_ref) {
+        AVHWFramesContext *hwframes_out = (AVHWFramesContext *)ctx->hwframes_out_ref->data;
+        if (hwframes_out->format == AV_PIX_FMT_AMF_SURFACE) {
+            int ret = av_hwframe_get_buffer(ctx->hwframes_out_ref, frame, 0);
+            if (ret < 0) {
+                av_log(avctx, AV_LOG_ERROR, "Get hw frame failed.\n");
+                av_frame_free(&frame);
+                return NULL;
+            }
+            frame->data[3] = (uint8_t *)pSurface;
+            frame->buf[1] = av_buffer_create((uint8_t *)pSurface, sizeof(AMFSurface),
+                                            amf_free_amfsurface,
+                                            (void*)avctx,
+                                            AV_BUFFER_FLAG_READONLY);
+        } else { // FIXME: add processing of other hw formats
+            av_log(ctx, AV_LOG_ERROR, "Unknown pixel format\n");
+            return NULL;
+        }
+    } else {
+
+        switch (pSurface->pVtbl->GetMemoryType(pSurface))
+        {
+    #if CONFIG_D3D11VA
+            case AMF_MEMORY_DX11:
+            {
+                AMFPlane *plane0 = pSurface->pVtbl->GetPlaneAt(pSurface, 0);
+                frame->data[0] = plane0->pVtbl->GetNative(plane0);
+                frame->data[1] = (uint8_t*)(intptr_t)0;
+
+                frame->buf[0] = av_buffer_create(NULL,
+                                        0,
+                                        amf_free_amfsurface,
+                                        pSurface,
+                                        AV_BUFFER_FLAG_READONLY);
+            }
+            break;
+    #endif
+    #if CONFIG_DXVA2
+            case AMF_MEMORY_DX9:
+            {
+                AMFPlane *plane0 = pSurface->pVtbl->GetPlaneAt(pSurface, 0);
+                frame->data[3] = plane0->pVtbl->GetNative(plane0);
+
+                frame->buf[0] = av_buffer_create(NULL,
+                                        0,
+                                        amf_free_amfsurface,
+                                        pSurface,
+                                        AV_BUFFER_FLAG_READONLY);
+            }
+            break;
+    #endif
+        default:
+            {
+                av_log(avctx, AV_LOG_ERROR, "Unsupported memory type : %d\n", pSurface->pVtbl->GetMemoryType(pSurface));
+                return NULL;
+            }
+        }
+    }
+
+    return frame;
+}
+
+int amf_avframe_to_amfsurface(AVFilterContext *avctx, const AVFrame *frame, AMFSurface** ppSurface)
+{
+    AMFFilterContext *ctx = avctx->priv;
+    AMFSurface *surface;
+    AMF_RESULT  res;
+    int hw_surface = 0;
+
+    switch (frame->format) {
+#if CONFIG_D3D11VA
+    case AV_PIX_FMT_D3D11:
+        {
+            static const GUID AMFTextureArrayIndexGUID = { 0x28115527, 0xe7c3, 0x4b66, { 0x99, 0xd3, 0x4f, 0x2a, 0xe6, 0xb4, 0x7f, 0xaf } };
+            ID3D11Texture2D *texture = (ID3D11Texture2D*)frame->data[0]; // actual texture
+            int index = (intptr_t)frame->data[1]; // index is a slice in texture array is - set to tell AMF which slice to use
+            texture->lpVtbl->SetPrivateData(texture, &AMFTextureArrayIndexGUID, sizeof(index), &index);
+
+            res = ctx->amf_device_ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx->amf_device_ctx->context, texture, &surface, NULL); // wrap to AMF surface
+            AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR(ENOMEM), "CreateSurfaceFromDX11Native() failed  with error %d\n", res);
+            hw_surface = 1;
+        }
+        break;
+#endif
+    case AV_PIX_FMT_AMF_SURFACE:
+        {
+            surface = (AMFSurface*)frame->data[3]; // actual surface
+            hw_surface = 1;
+        }
+        break;
+
+#if CONFIG_DXVA2
+    case AV_PIX_FMT_DXVA2_VLD:
+        {
+            IDirect3DSurface9 *texture = (IDirect3DSurface9 *)frame->data[3]; // actual texture
+
+            res = ctx->amf_device_ctx->context->pVtbl->CreateSurfaceFromDX9Native(ctx->amf_device_ctx->context, texture, &surface, NULL); // wrap to AMF surface
+            AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR(ENOMEM), "CreateSurfaceFromDX9Native() failed  with error %d\n", res);
+            hw_surface = 1;
+        }
+        break;
+#endif
+    default:
+        {
+            AMF_SURFACE_FORMAT amf_fmt = av_amf_av_to_amf_format(frame->format);
+            res = ctx->amf_device_ctx->context->pVtbl->AllocSurface(ctx->amf_device_ctx->context, AMF_MEMORY_HOST, amf_fmt, frame->width, frame->height, &surface);
+            AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR(ENOMEM), "AllocSurface() failed  with error %d\n", res);
+            amf_copy_surface(avctx, frame, surface);
+        }
+        break;
+    }
+
+    if (hw_surface) {
+        // input HW surfaces can be vertically aligned by 16; tell AMF the real size
+        surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame->height);
+    }
+
+    surface->pVtbl->SetPts(surface, frame->pts);
+    *ppSurface = surface;
+    return 0;
+}
diff --git a/libavfilter/vf_amf_common.h b/libavfilter/vf_amf_common.h
new file mode 100644
index 0000000000..c4b5ba659a
--- /dev/null
+++ b/libavfilter/vf_amf_common.h
@@ -0,0 +1,73 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_AMF_COMMON_H
+#define AVFILTER_AMF_COMMON_H
+
+#include "avfilter.h"
+
+#include "AMF/core/Surface.h"
+#include "AMF/components/Component.h"
+#include "libavutil/hwcontext_amf.h"
+
+typedef struct AMFFilterContext {
+    const AVClass *class;
+
+    int width, height;
+    enum AVPixelFormat format;
+    int scale_type;
+    int color_profile;
+    int color_range;
+    int primaries;
+    int trc;
+    int fill;
+    int fill_color;
+    int keep_ratio;
+
+    // HQScaler properties
+    int algorithm;
+    float sharpness;
+
+    char *w_expr;
+    char *h_expr;
+    char *format_str;
+    int force_original_aspect_ratio;
+    int force_divisible_by;
+
+    AMFComponent        *component;
+    AVBufferRef         *amf_device_ref;
+
+    AVBufferRef         *hwframes_in_ref;
+    AVBufferRef         *hwframes_out_ref;
+    AVBufferRef         *hwdevice_ref;
+
+    AVAMFDeviceContext  *amf_device_ctx;
+    int                  local_context;
+} AMFFilterContext;
+
+int amf_filter_init(AVFilterContext *avctx);
+void amf_filter_uninit(AVFilterContext *avctx);
+int amf_init_filter_config(AVFilterLink *outlink, enum AVPixelFormat *in_format);
+int amf_copy_surface(AVFilterContext *avctx, const AVFrame *frame, AMFSurface* surface);
+void amf_free_amfsurface(void *opaque, uint8_t *data);
+AVFrame *amf_amfsurface_to_avframe(AVFilterContext *avctx, AMFSurface* pSurface);
+int amf_avframe_to_amfsurface(AVFilterContext *avctx, const AVFrame *frame, AMFSurface** ppSurface);
+int amf_setup_input_output_formats(AVFilterContext *avctx, const enum AVPixelFormat *input_pix_fmts, const enum AVPixelFormat *output_pix_fmts);
+int amf_filter_filter_frame(AVFilterLink *inlink, AVFrame *in);
+
+#endif /* AVFILTER_AMF_COMMON_H */
diff --git a/libavfilter/vf_sr_amf.c b/libavfilter/vf_sr_amf.c
new file mode 100644
index 0000000000..a699a9fa47
--- /dev/null
+++ b/libavfilter/vf_sr_amf.c
@@ -0,0 +1,189 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Super resolution video filter with AMF hardware acceleration
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/time.h"
+
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_amf.h"
+#include "libavutil/hwcontext_amf_internal.h"
+
+#include "AMF/components/HQScaler.h"
+#include "AMF/components/ColorSpace.h"
+#include "vf_amf_common.h"
+
+#include "avfilter.h"
+#include "internal.h"
+#include "formats.h"
+#include "video.h"
+
+#if CONFIG_DXVA2
+#include <d3d9.h>
+#endif
+
+#if CONFIG_D3D11VA
+#include <d3d11.h>
+#endif
+
+
+static int amf_filter_query_formats(AVFilterContext *avctx)
+{
+    const enum AVPixelFormat *output_pix_fmts;
+    static const enum AVPixelFormat input_pix_fmts[] = {
+        AV_PIX_FMT_NV12,
+        AV_PIX_FMT_P010,
+        AV_PIX_FMT_BGRA,
+        AV_PIX_FMT_RGBA,
+        AV_PIX_FMT_AMF_SURFACE,
+        AV_PIX_FMT_RGBAF16,
+        AV_PIX_FMT_NONE,
+    };
+    static const enum AVPixelFormat output_pix_fmts_default[] = {
+        AV_PIX_FMT_NV12,
+        AV_PIX_FMT_P010,
+        AV_PIX_FMT_BGRA,
+        AV_PIX_FMT_RGBA,
+        AV_PIX_FMT_AMF_SURFACE,
+        AV_PIX_FMT_D3D11,
+        AV_PIX_FMT_DXVA2_VLD,
+        AV_PIX_FMT_RGBAF16,
+        AV_PIX_FMT_NONE,
+    };
+    output_pix_fmts = output_pix_fmts_default;
+
+    return amf_setup_input_output_formats(avctx, input_pix_fmts, output_pix_fmts);
+}
+
+static int amf_filter_config_output(AVFilterLink *outlink)
+{
+    AVFilterContext *avctx = outlink->src;
+    AVFilterLink   *inlink = avctx->inputs[0];
+    AMFFilterContext  *ctx = avctx->priv;
+    AMFSize out_size;
+    int err;
+    AMF_RESULT res;
+    enum AVPixelFormat in_format;
+
+    err = amf_init_filter_config(outlink, &in_format);
+    if (err < 0)
+        return err;
+
+    // HQ scaler should be used for upscaling only
+    if (inlink->w > outlink->w || inlink->h > outlink->h) {
+        av_log(avctx, AV_LOG_ERROR, "AMF HQ scaler should be used for upscaling only.\n");
+        return AVERROR_UNKNOWN;
+    }
+    // FIXME: add checks whether we have HW context
+    res = ctx->amf_device_ctx->factory->pVtbl->CreateComponent(ctx->amf_device_ctx->factory, ctx->amf_device_ctx->context, AMFHQScaler, &ctx->component);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_FILTER_NOT_FOUND, "CreateComponent(%ls) failed with error %d\n", AMFHQScaler, res);
+
+    out_size.width = outlink->w;
+    out_size.height = outlink->h;
+    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->component, AMF_HQ_SCALER_OUTPUT_SIZE, out_size);
+    AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "AMFHQScaler-SetProperty() failed with error %d\n", res);
+
+    if (ctx->algorithm != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->component, AMF_HQ_SCALER_ALGORITHM, ctx->algorithm);
+    }
+    if (ctx->sharpness != -1) {
+        AMF_ASSIGN_PROPERTY_DOUBLE(res, ctx->component, AMF_HQ_SCALER_SHARPNESS, ctx->sharpness);
+    }
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->component, AMF_HQ_SCALER_FILL, ctx->fill);
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->component, AMF_HQ_SCALER_KEEP_ASPECT_RATIO, ctx->keep_ratio);
+    // Setup default options to skip color conversion
+    ctx->color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN;
+    ctx->color_range = AMF_COLOR_RANGE_UNDEFINED;
+    ctx->primaries = AMF_COLOR_PRIMARIES_UNDEFINED;
+    ctx->trc = AMF_COLOR_TRANSFER_CHARACTERISTIC_UNDEFINED;
+
+    res = ctx->component->pVtbl->Init(ctx->component, av_amf_av_to_amf_format(in_format), inlink->w, inlink->h);
+    AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "AMFHQScaler-Init() failed with error %d\n", res);
+
+    return 0;
+}
+
+#define OFFSET(x) offsetof(AMFFilterContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption sr_amf_options[] = {
+    { "w",              "Output video width",   OFFSET(w_expr),     AV_OPT_TYPE_STRING, { .str = "iw"   }, .flags = FLAGS },
+    { "h",              "Output video height",  OFFSET(h_expr),     AV_OPT_TYPE_STRING, { .str = "ih"   }, .flags = FLAGS },
+
+    { "format",         "Output pixel format",  OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
+    { "sharpness",      "Sharpness",            OFFSET(sharpness),  AV_OPT_TYPE_FLOAT,  { .dbl = -1 }, -1, 2., FLAGS, "sharpness" },
+    { "keep-ratio",     "Keep aspect ratio",    OFFSET(keep_ratio), AV_OPT_TYPE_BOOL, { .i64 = 0  },  0, 1, FLAGS, "keep_ration" },
+    { "fill",           "Fill",                 OFFSET(fill),       AV_OPT_TYPE_BOOL, { .i64 = 0  },  0, 1, FLAGS, "fill" },
+
+    { "algorithm",      "Scaling algorithm",    OFFSET(algorithm),      AV_OPT_TYPE_INT,   { .i64 = -1 }, -1, AMF_HQ_SCALER_ALGORITHM_VIDEOSR1_1, FLAGS, "algorithm" },
+    { "bilinear",       "Bilinear",             0,  AV_OPT_TYPE_CONST, { .i64 = AMF_HQ_SCALER_ALGORITHM_BILINEAR }, 0, 0, FLAGS, "algorithm" },
+    { "bicubic",        "Bicubic",              0,  AV_OPT_TYPE_CONST, { .i64 = AMF_HQ_SCALER_ALGORITHM_BICUBIC }, 0, 0, FLAGS, "algorithm" },
+    { "sr1-0",          "Video SR1.0",          0,  AV_OPT_TYPE_CONST, { .i64 = AMF_HQ_SCALER_ALGORITHM_VIDEOSR1_0 }, 0, 0, FLAGS, "algorithm" },
+    { "point",          "Point",                0,  AV_OPT_TYPE_CONST, { .i64 = AMF_HQ_SCALER_ALGORITHM_POINT }, 0, 0, FLAGS, "algorithm" },
+    { "sr1-1",          "Video SR1.1",          0,  AV_OPT_TYPE_CONST, { .i64 = AMF_HQ_SCALER_ALGORITHM_VIDEOSR1_1 }, 0, 0, FLAGS, "algorithm" },
+
+    { NULL },
+};
+
+
+AVFILTER_DEFINE_CLASS(sr_amf);
+
+static const AVFilterPad amf_filter_inputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .filter_frame = amf_filter_filter_frame,
+    }
+};
+
+static const AVFilterPad amf_filter_outputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .config_props = amf_filter_config_output,
+    }
+};
+
+AVFilter ff_vf_sr_amf = {
+    .name      = "sr_amf",
+    .description = NULL_IF_CONFIG_SMALL("AMF HQ video upscaling"),
+
+    .init          = amf_filter_init,
+    .uninit        = amf_filter_uninit,
+    FILTER_QUERY_FUNC(&amf_filter_query_formats),
+
+    .priv_size = sizeof(AMFFilterContext),
+    .priv_class = &sr_amf_class,
+
+    FILTER_INPUTS(amf_filter_inputs),
+    FILTER_OUTPUTS(amf_filter_outputs),
+
+    FILTER_SINGLE_PIXFMT(AV_PIX_FMT_AMF_SURFACE),
+
+    .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+    .flags          = AVFILTER_FLAG_HWDEVICE,
+};
\ No newline at end of file
diff --git a/libavfilter/vf_vpp_amf.c b/libavfilter/vf_vpp_amf.c
new file mode 100644
index 0000000000..c4f75023ac
--- /dev/null
+++ b/libavfilter/vf_vpp_amf.c
@@ -0,0 +1,264 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * VPP video filter with AMF hardware acceleration
+ */
+
+#include <stdio.h>
+#include <string.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/time.h"
+
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_amf.h"
+#include "libavutil/hwcontext_amf_internal.h"
+
+#include "AMF/components/VideoConverter.h"
+#include "vf_amf_common.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "video.h"
+#include "scale_eval.h"
+#include "internal.h"
+
+#if CONFIG_DXVA2
+#include <d3d9.h>
+#endif
+
+#if CONFIG_D3D11VA
+#include <d3d11.h>
+#endif
+
+static int amf_filter_query_formats(AVFilterContext *avctx)
+{
+    const enum AVPixelFormat *output_pix_fmts;
+    static const enum AVPixelFormat input_pix_fmts[] = {
+        AV_PIX_FMT_NV12,
+        AV_PIX_FMT_P010,
+        AV_PIX_FMT_0RGB,
+        AV_PIX_FMT_BGR0,
+        AV_PIX_FMT_BGRA,
+        AV_PIX_FMT_RGB0,
+        AV_PIX_FMT_RGBA,
+        AV_PIX_FMT_GRAY8,
+        AV_PIX_FMT_YUV420P,
+        AV_PIX_FMT_YUV420P10,
+        AV_PIX_FMT_YUYV422,
+        AV_PIX_FMT_AMF_SURFACE,
+        AV_PIX_FMT_NONE,
+    };
+    static const enum AVPixelFormat output_pix_fmts_default[] = {
+        AV_PIX_FMT_AMF_SURFACE,
+        AV_PIX_FMT_D3D11,
+        AV_PIX_FMT_DXVA2_VLD,
+        AV_PIX_FMT_NV12,
+        AV_PIX_FMT_BGRA,
+        AV_PIX_FMT_YUV420P,
+        AV_PIX_FMT_NONE,
+    };
+    output_pix_fmts = output_pix_fmts_default;
+
+    return amf_setup_input_output_formats(avctx, input_pix_fmts, output_pix_fmts);
+}
+
+static int amf_filter_config_output(AVFilterLink *outlink)
+{
+    AVFilterContext *avctx = outlink->src;
+    AVFilterLink   *inlink = avctx->inputs[0];
+    AMFFilterContext  *ctx = avctx->priv;
+    AVHWFramesContext *hwframes_out = NULL;
+    AMFSize out_size;
+    int err;
+    AMF_RESULT res;
+    enum AMF_VIDEO_CONVERTER_COLOR_PROFILE_ENUM amf_color_profile;
+    enum AVPixelFormat in_format;
+
+    err = amf_init_filter_config(outlink, &in_format);
+    if (err < 0)
+        return err;
+    // FIXME: add checks whether we have HW context
+    hwframes_out = (AVHWFramesContext*)ctx->hwframes_out_ref->data;
+    res = ctx->amf_device_ctx->factory->pVtbl->CreateComponent(ctx->amf_device_ctx->factory, ctx->amf_device_ctx->context, AMFVideoConverter, &ctx->component);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_FILTER_NOT_FOUND, "CreateComponent(%ls) failed with error %d\n", AMFVideoConverter, res);
+    // FIXME: add checks whether we have HW context
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->component, AMF_VIDEO_CONVERTER_OUTPUT_FORMAT, (amf_int32)av_amf_av_to_amf_format(hwframes_out->sw_format));
+    AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "AMFConverter-SetProperty() failed with error %d\n", res);
+
+    out_size.width = outlink->w;
+    out_size.height = outlink->h;
+    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->component, AMF_VIDEO_CONVERTER_OUTPUT_SIZE, out_size);
+    AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "AMFConverter-SetProperty() failed with error %d\n", res);
+
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->component, AMF_VIDEO_CONVERTER_SCALE, (amf_int32)ctx->scale_type);
+    AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "AMFConverter-SetProperty() failed with error %d\n", res);
+
+    amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN;
+
+    switch(ctx->color_profile) {
+    case AMF_VIDEO_CONVERTER_COLOR_PROFILE_601:
+        if (ctx->color_range == AMF_COLOR_RANGE_FULL) {
+            amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_601;
+        } else {
+            amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_601;
+        }
+        break;
+    case AMF_VIDEO_CONVERTER_COLOR_PROFILE_709:
+        if (ctx->color_range == AMF_COLOR_RANGE_FULL) {
+            amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_709;
+        } else {
+            amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_709;
+        }
+        break;
+    case AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020:
+        if (ctx->color_range == AMF_COLOR_RANGE_FULL) {
+            amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_2020;
+        } else {
+            amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020;
+        }
+        break;
+    default:
+        amf_color_profile = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN;
+        break;
+    }
+
+    if (amf_color_profile != AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->component, AMF_VIDEO_CONVERTER_COLOR_PROFILE, amf_color_profile);
+    }
+
+    if (ctx->color_range != AMF_COLOR_RANGE_UNDEFINED) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->component, AMF_VIDEO_CONVERTER_OUTPUT_COLOR_RANGE, ctx->color_range);
+    }
+
+    if (ctx->primaries != AMF_COLOR_PRIMARIES_UNDEFINED) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->component, AMF_VIDEO_CONVERTER_OUTPUT_COLOR_PRIMARIES, ctx->primaries);
+    }
+
+    if (ctx->trc != AMF_COLOR_TRANSFER_CHARACTERISTIC_UNDEFINED) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->component, AMF_VIDEO_CONVERTER_OUTPUT_TRANSFER_CHARACTERISTIC, ctx->trc);
+    }
+
+    res = ctx->component->pVtbl->Init(ctx->component, av_amf_av_to_amf_format(in_format), inlink->w, inlink->h);
+    AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "AMFConverter-Init() failed with error %d\n", res);
+
+    return 0;
+}
+
+#define OFFSET(x) offsetof(AMFFilterContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
+static const AVOption vpp_amf_options[] = {
+    { "w",              "Output video width",   OFFSET(w_expr),     AV_OPT_TYPE_STRING, { .str = "iw"   }, .flags = FLAGS },
+    { "h",              "Output video height",  OFFSET(h_expr),     AV_OPT_TYPE_STRING, { .str = "ih"   }, .flags = FLAGS },
+    { "format",         "Output pixel format",  OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
+
+    { "scale_type",     "Scale type",           OFFSET(scale_type),      AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_CONVERTER_SCALE_BILINEAR }, AMF_VIDEO_CONVERTER_SCALE_BILINEAR, AMF_VIDEO_CONVERTER_SCALE_BICUBIC, FLAGS, "scale_type" },
+    { "bilinear",       "Bilinear",         0,  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_CONVERTER_SCALE_BILINEAR }, 0, 0, FLAGS, "scale_type" },
+    { "bicubic",        "Bicubic",          0,  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_CONVERTER_SCALE_BICUBIC },  0, 0, FLAGS, "scale_type" },
+
+    { "color_profile",  "Color profile",        OFFSET(color_profile), AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN }, AMF_VIDEO_CONVERTER_COLOR_PROFILE_UNKNOWN, AMF_VIDEO_CONVERTER_COLOR_PROFILE_FULL_2020, FLAGS, "color_profile" },
+    { "bt601",          "BT.601",           0,  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_CONVERTER_COLOR_PROFILE_601 }, 0, 0, FLAGS, "color_profile" },
+    { "bt709",          "BT.709",           0,  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_CONVERTER_COLOR_PROFILE_709 },  0, 0, FLAGS, "color_profile" },
+    { "bt2020",         "BT.2020",          0,  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_CONVERTER_COLOR_PROFILE_2020 },  0, 0, FLAGS, "color_profile" },
+
+    { "color_range",    "Color range",          OFFSET(color_range),      AV_OPT_TYPE_INT,   { .i64 = AMF_COLOR_RANGE_UNDEFINED }, AMF_COLOR_RANGE_UNDEFINED, AMF_COLOR_RANGE_FULL, FLAGS, "color_range" },
+    { "studio",         "Studio",                   0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_RANGE_STUDIO }, 0, 0, FLAGS, "color_range" },
+    { "full",           "Full",                     0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_RANGE_FULL }, 0, 0, FLAGS, "color_range" },
+
+    { "primaries",      "Output color primaries",   OFFSET(primaries),  AV_OPT_TYPE_INT,   { .i64 = AMF_COLOR_PRIMARIES_UNDEFINED }, AMF_COLOR_PRIMARIES_UNDEFINED, AMF_COLOR_PRIMARIES_JEDEC_P22, FLAGS, "primaries" },
+    { "bt709",          "BT.709",                   0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_BT709 }, 0, 0, FLAGS, "primaries" },
+    { "bt470m",         "BT.470M",                  0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_BT470M }, 0, 0, FLAGS, "primaries" },
+    { "bt470bg",        "BT.470BG",                 0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_BT470BG }, 0, 0, FLAGS, "primaries" },
+    { "smpte170m",      "SMPTE170M",                0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_SMPTE170M }, 0, 0, FLAGS, "primaries" },
+    { "smpte240m",      "SMPTE240M",                0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_SMPTE240M }, 0, 0, FLAGS, "primaries" },
+    { "film",           "FILM",                     0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_FILM }, 0, 0, FLAGS, "primaries" },
+    { "bt2020",         "BT2020",                   0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_BT2020 }, 0, 0, FLAGS, "primaries" },
+    { "smpte428",       "SMPTE428",                 0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_SMPTE428 }, 0, 0, FLAGS, "primaries" },
+    { "smpte431",       "SMPTE431",                 0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_SMPTE431 }, 0, 0, FLAGS, "primaries" },
+    { "smpte432",       "SMPTE432",                 0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_SMPTE432 }, 0, 0, FLAGS, "primaries" },
+    { "jedec-p22",      "JEDEC_P22",                0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_PRIMARIES_JEDEC_P22 }, 0, 0, FLAGS, "primaries" },
+
+    { "trc",            "Output transfer characteristics",  OFFSET(trc),  AV_OPT_TYPE_INT,   { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_UNDEFINED }, AMF_COLOR_TRANSFER_CHARACTERISTIC_UNDEFINED, AMF_COLOR_TRANSFER_CHARACTERISTIC_ARIB_STD_B67, FLAGS, "trc" },
+    { "bt709",          "BT.709",                   0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_BT709 }, 0, 0, FLAGS, "trc" },
+    { "gamma22",        "GAMMA22",                  0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_GAMMA22 }, 0, 0, FLAGS, "trc" },
+    { "gamma28",        "GAMMA28",                  0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_GAMMA28 }, 0, 0, FLAGS, "trc" },
+    { "smpte170m",      "SMPTE170M",                0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_SMPTE170M }, 0, 0, FLAGS, "trc" },
+    { "smpte240m",      "SMPTE240M",                0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_SMPTE240M }, 0, 0, FLAGS, "trc" },
+    { "linear",         "Linear",                   0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_LINEAR }, 0, 0, FLAGS, "trc" },
+    { "log",            "LOG",                      0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_LOG }, 0, 0, FLAGS, "trc" },
+    { "log-sqrt",       "LOG_SQRT",                 0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_LOG_SQRT }, 0, 0, FLAGS, "trc" },
+    { "iec61966-2-4",   "IEC61966_2_4",             0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_IEC61966_2_4 }, 0, 0, FLAGS, "trc" },
+    { "bt1361-ecg",     "BT1361_ECG",               0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_BT1361_ECG }, 0, 0, FLAGS, "trc" },
+    { "iec61966-2-1",   "IEC61966_2_1",             0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_IEC61966_2_1 }, 0, 0, FLAGS, "trc" },
+    { "bt2020-10",      "BT.2020_10",               0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_BT2020_10 }, 0, 0, FLAGS, "trc" },
+    { "bt2020-12",      "BT.2020-12",               0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_BT2020_12 }, 0, 0, FLAGS, "trc" },
+    { "smpte2084",      "SMPTE2084",                0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_SMPTE2084 }, 0, 0, FLAGS, "trc" },
+    { "smpte428",       "SMPTE428",                 0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_SMPTE428 }, 0, 0, FLAGS, "trc" },
+    { "arib-std-b67",   "ARIB_STD_B67",             0,  AV_OPT_TYPE_CONST, { .i64 = AMF_COLOR_TRANSFER_CHARACTERISTIC_ARIB_STD_B67 }, 0, 0, FLAGS, "trc" },
+
+    { "force_original_aspect_ratio", "decrease or increase w/h if necessary to keep the original AR", OFFSET(force_original_aspect_ratio), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, FLAGS, "force_oar" },
+    { "disable",  NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "force_oar" },
+    { "decrease", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "force_oar" },
+    { "increase", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 2 }, 0, 0, FLAGS, "force_oar" },
+    { "force_divisible_by", "enforce that the output resolution is divisible by a defined integer when force_original_aspect_ratio is used", OFFSET(force_divisible_by), AV_OPT_TYPE_INT, { .i64 = 1}, 1, 256, FLAGS },
+
+    { NULL },
+};
+
+
+AVFILTER_DEFINE_CLASS(vpp_amf);
+
+static const AVFilterPad amf_filter_inputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .filter_frame = amf_filter_filter_frame,
+    }
+};
+
+static const AVFilterPad amf_filter_outputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .config_props = amf_filter_config_output,
+    }
+};
+
+AVFilter ff_vf_vpp_amf = {
+    .name      = "vpp_amf",
+    .description = NULL_IF_CONFIG_SMALL("AMF video scaling and format conversion"),
+
+    .init          = amf_filter_init,
+    .uninit        = amf_filter_uninit,
+    FILTER_QUERY_FUNC(&amf_filter_query_formats),
+
+    .priv_size = sizeof(AMFFilterContext),
+    .priv_class = &vpp_amf_class,
+
+    FILTER_INPUTS(amf_filter_inputs),
+    FILTER_OUTPUTS(amf_filter_outputs),
+    FILTER_SINGLE_PIXFMT(AV_PIX_FMT_AMF_SURFACE),
+
+    .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+    .flags          = AVFILTER_FLAG_HWDEVICE,
+};
-- 
2.39.3 (Apple Git-146)

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [FFmpeg-devel] [PATCH 10/10, v3] doc/filters: Add documentation for AMF filters
  2024-05-30 13:08 [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Dmitrii Ovchinnikov
                   ` (7 preceding siblings ...)
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 09/10, v3] avfilter/scale_amf: Add AMF VPP & super resolution filters Dmitrii Ovchinnikov
@ 2024-05-30 13:08 ` Dmitrii Ovchinnikov
  2024-05-30 14:04 ` [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Andreas Rheinhardt
                   ` (2 subsequent siblings)
  11 siblings, 0 replies; 17+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-05-30 13:08 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Evgeny Pavlov

From: Evgeny Pavlov <lucenticus@gmail.com>

Signed-off-by: Evgeny Pavlov <lucenticus@gmail.com>
---
 doc/filters.texi | 238 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 238 insertions(+)

diff --git a/doc/filters.texi b/doc/filters.texi
index f5bf475d13..78e87ff5f7 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -22791,6 +22791,76 @@ input upscaled using bicubic upscaling with proper scale factor.
 
 To get full functionality (such as async execution), please use the @ref{dnn_processing} filter.
 
+@anchor{sr_amf}
+@section sr_amf
+
+Upscale (size increasing) for the input video using AMD Advanced Media Framework library for hardware acceleration.
+Use advanced algorithms for upscaling with higher output quality.
+Setting the output width and height works in the same way as for the @ref{scale} filter.
+
+The filter accepts the following options:
+@table @option
+@item w
+@item h
+Set the output video dimension expression. Default value is the input dimension.
+
+Allows for the same expressions as the @ref{scale} filter.
+
+@item algorithm
+Sets the algorithm used for scaling:
+
+@table @var
+@item bilinear
+Bilinear
+
+@item bicubic
+Bicubic
+
+@item sr1-0
+Video SR1.0
+This is a default value
+
+@item point
+Point
+
+@item sr1-1
+Video SR1.1
+
+@end table
+
+@item sharpness
+Control hq scaler sharpening. The value is a float in the range of [0.0, 2.0]
+
+@item format
+Controls the output pixel format. By default, or if none is specified, the input
+pixel format is used.
+
+@item keep-ratio
+Force the scaler to keep the aspect ratio of the input image when the output size has a different aspect ratio.
+Default value is false.
+
+@item fill
+Specifies whether the output image outside the region of interest,
+which does not fill the entire output surface should be filled with a solid color.
+
+@end table
+
+@subsection Examples
+
+@itemize
+@item
+Scale input to 720p, keeping aspect ratio and ensuring the output is yuv420p.
+@example
+sr_amf=-2:720:format=yuv420p
+@end example
+
+@item
+Upscale to 4K with algorithm video SR1.1.
+@example
+sr_amf=4096:2160:algorithm=sr1-1
+@end example
+@end itemize
+
 @section ssim
 
 Obtain the SSIM (Structural SImilarity Metric) between two input videos.
@@ -25528,6 +25598,174 @@ Example:
 ffmpeg -i ref.mpg -vf vmafmotion -f null -
 @end example
 
+@anchor{vpp_amf}
+@section vpp_amf
+
+Scale (resize) and convert colorspace, transfer characteristics or color primaries for the input video, using AMD Advanced Media Framework library for hardware acceleration.
+Setting the output width and height works in the same way as for the @ref{scale} filter.
+
+The filter accepts the following options:
+@table @option
+@item w
+@item h
+Set the output video dimension expression. Default value is the input dimension.
+
+Allows for the same expressions as the @ref{scale} filter.
+
+@item scale_type
+Sets the algorithm used for scaling:
+
+@table @var
+@item bilinear
+Bilinear
+
+This is the default.
+
+@item bicubic
+Bicubic
+
+@end table
+
+@item format
+Controls the output pixel format. By default, or if none is specified, the input
+pixel format is used.
+
+
+@item force_original_aspect_ratio
+@item force_divisible_by
+Work the same as the identical @ref{scale} filter options.
+
+@anchor{color_profile}
+@item color_profile
+Specify all color properties at once.
+
+The accepted values are:
+@table @samp
+@item bt601
+BT.601
+
+@item bt709
+BT.709
+
+@item bt2020
+BT.2020
+
+@end table
+
+@anchor{trc}
+@item trc
+Specify output transfer characteristics.
+
+The accepted values are:
+@table @samp
+@item bt709
+BT.709
+
+@item gamma22
+Constant gamma of 2.2
+
+@item gamma28
+Constant gamma of 2.8
+
+@item smpte170m
+SMPTE-170M
+
+@item smpte240m
+SMPTE-240M
+
+@item linear
+Linear
+
+@item log
+LOG
+
+@item log-sqrt
+LOG_SQRT
+
+@item iec61966-2-4
+iec61966-2-4
+
+@item bt1361-ecg
+BT1361_ECG
+
+@item iec61966-2-1
+iec61966-2-1
+
+@item bt2020-10
+BT.2020 for 10-bits content
+
+@item bt2020-12
+BT.2020 for 12-bits content
+
+@item smpte2084
+SMPTE2084
+
+@item smpte428
+SMPTE428
+
+@item arib-std-b67
+ARIB_STD_B67
+
+@end table
+
+@anchor{primaries}
+@item primaries
+Specify output color primaries.
+
+The accepted values are:
+@table @samp
+@item bt709
+BT.709
+
+@item bt470m
+BT.470M
+
+@item bt470bg
+BT.470BG or BT.601-6 625
+
+@item smpte170m
+SMPTE-170M or BT.601-6 525
+
+@item smpte240m
+SMPTE-240M
+
+@item film
+film
+
+@item bt2020
+BT.2020
+
+@item smpte428
+SMPTE-428
+
+@item smpte431
+SMPTE-431
+
+@item smpte432
+SMPTE-432
+
+@item jedec-p22
+JEDEC P22 phosphors
+
+@end table
+@end table
+
+@subsection Examples
+
+@itemize
+@item
+Scale input to 720p, keeping aspect ratio and ensuring the output is yuv420p.
+@example
+vpp_amf=-2:720:format=yuv420p
+@end example
+
+@item
+Upscale to 4K and change color profile to bt2020.
+@example
+vpp_amf=4096:2160:color_profile=bt2020
+@end example
+@end itemize
+
 @anchor{vstack}
 @section vstack
 Stack input videos vertically.
-- 
2.39.3 (Apple Git-146)

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf.
  2024-05-30 13:08 [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Dmitrii Ovchinnikov
                   ` (8 preceding siblings ...)
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 10/10, v3] doc/filters: Add documentation for AMF filters Dmitrii Ovchinnikov
@ 2024-05-30 14:04 ` Andreas Rheinhardt
  2024-05-30 14:34 ` Lynne via ffmpeg-devel
  2024-06-04 18:58 ` Mark Thompson
  11 siblings, 0 replies; 17+ messages in thread
From: Andreas Rheinhardt @ 2024-05-30 14:04 UTC (permalink / raw)
  To: ffmpeg-devel

Dmitrii Ovchinnikov:
> Adds hwcontext_amf, which allows to use shared AMF
> context for the encoder, decoder and AMF-based filters,
> without copy to the host memory.
> It will also allow you to use some optimisations in
> the interaction of components (for example, SAV) and make a more
> manageable and optimal setup for using GPU devices with AMF
> in the case of a fully AMF pipeline.
> It will be a significant performance uplift when full AMF pipeline
> with filters is used.
> 
> We also plan to add Compression artefact removal filter in near feature.
> v2: cleanup header files
> v3: an unnecessary class has been removed.
> ---
>  libavutil/Makefile                 |   4 +
>  libavutil/hwcontext.c              |   4 +
>  libavutil/hwcontext.h              |   1 +
>  libavutil/hwcontext_amf.c          | 585 +++++++++++++++++++++++++++++
>  libavutil/hwcontext_amf.h          |  64 ++++
>  libavutil/hwcontext_amf_internal.h |  44 +++
>  libavutil/hwcontext_internal.h     |   1 +
>  libavutil/pixdesc.c                |   4 +
>  libavutil/pixfmt.h                 |   5 +
>  9 files changed, 712 insertions(+)
>  create mode 100644 libavutil/hwcontext_amf.c
>  create mode 100644 libavutil/hwcontext_amf.h
>  create mode 100644 libavutil/hwcontext_amf_internal.h
> 
> diff --git a/libavutil/Makefile b/libavutil/Makefile
> index 6e6fa8d800..13c318560d 100644
> --- a/libavutil/Makefile
> +++ b/libavutil/Makefile
> @@ -45,6 +45,7 @@ HEADERS = adler32.h                                                     \
>            hwcontext_d3d12va.h                                           \
>            hwcontext_drm.h                                               \
>            hwcontext_dxva2.h                                             \
> +          hwcontext_amf.h                                               \
>            hwcontext_qsv.h                                               \
>            hwcontext_mediacodec.h                                        \
>            hwcontext_opencl.h                                            \
> @@ -196,6 +197,7 @@ OBJS-$(CONFIG_CUDA)                     += hwcontext_cuda.o
>  OBJS-$(CONFIG_D3D11VA)                  += hwcontext_d3d11va.o
>  OBJS-$(CONFIG_D3D12VA)                  += hwcontext_d3d12va.o
>  OBJS-$(CONFIG_DXVA2)                    += hwcontext_dxva2.o
> +OBJS-$(CONFIG_AMF)                      += hwcontext_amf.o
>  OBJS-$(CONFIG_LIBDRM)                   += hwcontext_drm.o
>  OBJS-$(CONFIG_MACOS_KPERF)              += macos_kperf.o
>  OBJS-$(CONFIG_MEDIACODEC)               += hwcontext_mediacodec.o
> @@ -220,6 +222,8 @@ SKIPHEADERS-$(CONFIG_CUDA)             += hwcontext_cuda_internal.h     \
>  SKIPHEADERS-$(CONFIG_D3D11VA)          += hwcontext_d3d11va.h
>  SKIPHEADERS-$(CONFIG_D3D12VA)          += hwcontext_d3d12va.h
>  SKIPHEADERS-$(CONFIG_DXVA2)            += hwcontext_dxva2.h
> +SKIPHEADERS-$(CONFIG_AMF)              += hwcontext_amf.h               \
> +                                          hwcontext_amf_internal
>  SKIPHEADERS-$(CONFIG_QSV)              += hwcontext_qsv.h
>  SKIPHEADERS-$(CONFIG_OPENCL)           += hwcontext_opencl.h
>  SKIPHEADERS-$(CONFIG_VAAPI)            += hwcontext_vaapi.h
> diff --git a/libavutil/hwcontext.c b/libavutil/hwcontext.c
> index fa99a0d8a4..f06d49c45c 100644
> --- a/libavutil/hwcontext.c
> +++ b/libavutil/hwcontext.c
> @@ -65,6 +65,9 @@ static const HWContextType * const hw_table[] = {
>  #endif
>  #if CONFIG_VULKAN
>      &ff_hwcontext_type_vulkan,
> +#endif
> +#if CONFIG_AMF
> +    &ff_hwcontext_type_amf,
>  #endif
>      NULL,
>  };
> @@ -82,6 +85,7 @@ static const char *const hw_type_names[] = {
>      [AV_HWDEVICE_TYPE_VIDEOTOOLBOX] = "videotoolbox",
>      [AV_HWDEVICE_TYPE_MEDIACODEC] = "mediacodec",
>      [AV_HWDEVICE_TYPE_VULKAN] = "vulkan",
> +    [AV_HWDEVICE_TYPE_AMF] = "amf",
>  };
>  
>  typedef struct FFHWDeviceContext {
> diff --git a/libavutil/hwcontext.h b/libavutil/hwcontext.h
> index bac30debae..96042ba197 100644
> --- a/libavutil/hwcontext.h
> +++ b/libavutil/hwcontext.h
> @@ -38,6 +38,7 @@ enum AVHWDeviceType {
>      AV_HWDEVICE_TYPE_MEDIACODEC,
>      AV_HWDEVICE_TYPE_VULKAN,
>      AV_HWDEVICE_TYPE_D3D12VA,
> +    AV_HWDEVICE_TYPE_AMF,
>  };
>  
>  /**
> diff --git a/libavutil/hwcontext_amf.c b/libavutil/hwcontext_amf.c
> new file mode 100644
> index 0000000000..1c589669e1
> --- /dev/null
> +++ b/libavutil/hwcontext_amf.c
> @@ -0,0 +1,585 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#include "buffer.h"
> +#include "common.h"
> +#include "hwcontext.h"
> +#include "hwcontext_amf.h"
> +#include "hwcontext_internal.h"
> +#include "hwcontext_amf_internal.h"
> +#if CONFIG_VULKAN
> +#include "hwcontext_vulkan.h"
> +#endif
> +#if CONFIG_D3D11VA
> +#include "libavutil/hwcontext_d3d11va.h"
> +#endif
> +#if CONFIG_DXVA2
> +#define COBJMACROS
> +#include "libavutil/hwcontext_dxva2.h"
> +#endif
> +#include "mem.h"
> +#include "pixdesc.h"
> +#include "pixfmt.h"
> +#include "imgutils.h"
> +#include "libavutil/avassert.h"
> +#include <AMF/core/Surface.h>
> +#include <AMF/core/Trace.h>
> +#ifdef _WIN32
> +#include "compat/w32dlfcn.h"
> +#else
> +#include <dlfcn.h>
> +#endif
> +#define FFMPEG_AMF_WRITER_ID L"ffmpeg_amf"
> +int av_amf_load_library(AVAMFDeviceContext* amf_ctx,  void* avcl);
> +
> +typedef struct AMFFramesContext {
> +    AMFSurface * surfaces;
> +    int            nb_surfaces;
> +} AMFFramesContext;
> +
> +typedef struct AmfTraceWriter {
> +    AMFTraceWriterVtbl  *vtbl;
> +    void                *avctx;
> +} AmfTraceWriter;
> +
> +static void AMF_CDECL_CALL AMFTraceWriter_Write(AMFTraceWriter *pThis,
> +    const wchar_t *scope, const wchar_t *message)
> +{
> +    AmfTraceWriter *tracer = (AmfTraceWriter*)pThis;
> +    av_log(tracer->avctx, AV_LOG_DEBUG, "%ls: %ls", scope, message); // \n is provided from AMF
> +}
> +
> +static void AMF_CDECL_CALL AMFTraceWriter_Flush(AMFTraceWriter *pThis)
> +{
> +}
> +
> +AmfTraceWriter * amf_writer_alloc(void  *avctx)

This function should be declared as static (in fact, how does this even
compile given that we add -Werror=missing-prototypes for both GCC and
clang?). The same applies to several other functions here.

> +{
> +    AmfTraceWriter * writer = av_mallocz(sizeof(AmfTraceWriter));
> +    if (!writer)
> +        return NULL;
> +    writer->vtbl = av_mallocz(sizeof(AmfTraceWriter));

1. Wrong sizeof.
2. I don't see a reason why there should be separate allocations at all: Use
struct AmfTraceWriter {
    AMFTraceWriterVtbl *vtblp;
    void               *avctx;
    AMFTraceWriterVtbl  vtbl;
} AmfTraceWriter;
and make vtblp point to vtbl.

> +    if (writer->vtbl) {
> +        av_freep(&writer);
> +        return NULL;
> +    }
> +    writer->vtbl->Write = AMFTraceWriter_Write;
> +    writer->vtbl->Flush = AMFTraceWriter_Flush;
> +    writer->avctx = avctx;
> +    return writer;
> +}
> +
> +void amf_writer_free(void  *opaque)
> +{
> +    AmfTraceWriter *writer = (AmfTraceWriter *)opaque;
> +    av_freep(&writer->vtbl);
> +    av_freep(&writer);
> +}
> +
> +
> +typedef struct AVAMFFormatMap {
> +    enum AVPixelFormat       av_format;
> +    enum AMF_SURFACE_FORMAT  amf_format;
> +} FormatMap;
> +
> +const FormatMap format_map[] =
> +{
> +    { AV_PIX_FMT_NONE,          AMF_SURFACE_UNKNOWN },
> +    { AV_PIX_FMT_NV12,          AMF_SURFACE_NV12 },
> +    { AV_PIX_FMT_BGR0,          AMF_SURFACE_BGRA },
> +    { AV_PIX_FMT_RGB0,          AMF_SURFACE_RGBA },
> +    { AV_PIX_FMT_BGRA,          AMF_SURFACE_BGRA },
> +    { AV_PIX_FMT_ARGB,          AMF_SURFACE_ARGB },
> +    { AV_PIX_FMT_RGBA,          AMF_SURFACE_RGBA },
> +    { AV_PIX_FMT_GRAY8,         AMF_SURFACE_GRAY8 },
> +    { AV_PIX_FMT_YUV420P,       AMF_SURFACE_YUV420P },
> +    { AV_PIX_FMT_YUYV422,       AMF_SURFACE_YUY2 },
> +    { AV_PIX_FMT_P010,          AMF_SURFACE_P010 },
> +};
> +
> +enum AMF_SURFACE_FORMAT av_amf_av_to_amf_format(enum AVPixelFormat fmt)
> +{
> +    int i;
> +    for (i = 0; i < amf_countof(format_map); i++) {
> +        if (format_map[i].av_format == fmt) {
> +            return format_map[i].amf_format;
> +        }
> +    }
> +    return AMF_SURFACE_UNKNOWN;
> +}
> +
> +enum AVPixelFormat av_amf_to_av_format(enum AMF_SURFACE_FORMAT fmt)
> +{
> +    int i;
> +    for (i = 0; i < amf_countof(format_map); i++) {
> +        if (format_map[i].amf_format == fmt) {
> +            return format_map[i].av_format;
> +        }
> +    }
> +    return AMF_SURFACE_UNKNOWN;
> +}
> +
> +static const enum AVPixelFormat supported_formats[] = {
> +    AV_PIX_FMT_NV12,
> +    AV_PIX_FMT_YUV420P,
> +    AV_PIX_FMT_BGRA,
> +    AV_PIX_FMT_P010,
> +    AV_PIX_FMT_YUV420P10,
> +#if CONFIG_D3D11VA
> +    AV_PIX_FMT_D3D11,
> +#endif
> +#if CONFIG_DXVA2
> +    AV_PIX_FMT_DXVA2_VLD,
> +#endif
> +    AV_PIX_FMT_AMF_SURFACE
> +};
> +
> +static int amf_frames_get_constraints(AVHWDeviceContext *ctx,
> +                                       const void *hwconfig,
> +                                       AVHWFramesConstraints *constraints)
> +{
> +    int i;
> +
> +    constraints->valid_sw_formats = av_malloc_array(FF_ARRAY_ELEMS(supported_formats) + 1,
> +                                                    sizeof(*constraints->valid_sw_formats));
> +    if (!constraints->valid_sw_formats)
> +        return AVERROR(ENOMEM);
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++)
> +        constraints->valid_sw_formats[i] = supported_formats[i];
> +    constraints->valid_sw_formats[FF_ARRAY_ELEMS(supported_formats)] = AV_PIX_FMT_NONE;
> +
> +    constraints->valid_hw_formats = av_malloc_array(2, sizeof(*constraints->valid_hw_formats));
> +    if (!constraints->valid_hw_formats)
> +        return AVERROR(ENOMEM);
> +
> +    constraints->valid_hw_formats[0] = AV_PIX_FMT_AMF_SURFACE;
> +    constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE;
> +
> +    return 0;
> +}
> +
> +static void amf_dummy_free(void *opaque, uint8_t *data)
> +{
> +
> +}
> +
> +static AVBufferRef *amf_pool_alloc(void *opaque, size_t size)
> +{
> +    AVHWFramesContext *hwfc = (AVHWFramesContext *)opaque;
> +    AVBufferRef *buf;
> +
> +    buf = av_buffer_create(NULL, NULL, amf_dummy_free, hwfc, AV_BUFFER_FLAG_READONLY);
> +    if (!buf) {
> +        av_log(hwfc, AV_LOG_ERROR, "Failed to create buffer for AMF context.\n");
> +        return NULL;
> +    }
> +    return buf;
> +}
> +
> +static int amf_frames_init(AVHWFramesContext *ctx)
> +{
> +    int i;
> +
> +    for (i = 0; i < FF_ARRAY_ELEMS(supported_formats); i++) {
> +        if (ctx->sw_format == supported_formats[i])
> +            break;
> +    }
> +    if (i == FF_ARRAY_ELEMS(supported_formats)) {
> +        av_log(ctx, AV_LOG_ERROR, "Pixel format '%s' is not supported\n",
> +               av_get_pix_fmt_name(ctx->sw_format));
> +        return AVERROR(ENOSYS);
> +    }
> +
> +    ffhwframesctx(ctx)->pool_internal =
> +            av_buffer_pool_init2(sizeof(AMFSurface), ctx,
> +                                 &amf_pool_alloc, NULL);
> +
> +    return 0;
> +}
> +
> +int amf_context_create(  AVAMFDeviceContext * amf_ctx,
> +                                void* avcl,
> +                                const char *device,
> +                                AVDictionary *opts, int flags)
> +{
> +    AMF_RESULT         res;
> +
> +    amf_ctx->trace->pVtbl->EnableWriter(amf_ctx->trace, AMF_TRACE_WRITER_CONSOLE, 0);
> +    amf_ctx->trace->pVtbl->SetGlobalLevel(amf_ctx->trace, AMF_TRACE_TRACE);
> +
> +     // connect AMF logger to av_log
> +    amf_ctx->trace_writer = amf_writer_alloc(avcl);
> +    amf_ctx->trace->pVtbl->RegisterWriter(amf_ctx->trace, FFMPEG_AMF_WRITER_ID, (AMFTraceWriter*)amf_ctx->trace_writer, 1);
> +    amf_ctx->trace->pVtbl->SetWriterLevel(amf_ctx->trace, FFMPEG_AMF_WRITER_ID, AMF_TRACE_TRACE);
> +
> +    res = amf_ctx->factory->pVtbl->CreateContext(amf_ctx->factory, &amf_ctx->context);
> +    AMF_RETURN_IF_FALSE(avcl, res == AMF_OK, AVERROR_UNKNOWN, "CreateContext() failed with error %d\n", res);
> +
> +    return 0;
> +}
> +
> +static int amf_get_buffer(AVHWFramesContext *ctx, AVFrame *frame)
> +{
> +    frame->buf[0] = av_buffer_pool_get(ctx->pool);
> +    if (!frame->buf[0])
> +        return AVERROR(ENOMEM);
> +
> +    frame->data[0] = frame->buf[0]->data;
> +    frame->format  = AV_PIX_FMT_AMF_SURFACE;
> +    frame->width   = ctx->width;
> +    frame->height  = ctx->height;
> +    return 0;
> +}
> +
> +static int amf_transfer_get_formats(AVHWFramesContext *ctx,
> +                                     enum AVHWFrameTransferDirection dir,
> +                                     enum AVPixelFormat **formats)
> +{
> +    enum AVPixelFormat *fmts;
> +
> +    fmts = av_malloc_array(2, sizeof(*fmts));
> +    if (!fmts)
> +        return AVERROR(ENOMEM);
> +
> +    fmts[0] = ctx->sw_format;
> +    fmts[1] = AV_PIX_FMT_NONE;
> +
> +    *formats = fmts;
> +
> +    return 0;
> +}
> +
> +int av_amf_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
> +                                 const AVFrame *src)
> +{
> +    AMFSurface* surface = (AMFSurface*)dst->data[0];
> +    AMFPlane *plane;
> +    uint8_t  *dst_data[4];
> +    int       dst_linesize[4];
> +    int       planes;
> +    int       i;
> +    int w = FFMIN(dst->width,  src->width);
> +    int h = FFMIN(dst->height, src->height);
> +
> +    planes = (int)surface->pVtbl->GetPlanesCount(surface);
> +    av_assert0(planes < FF_ARRAY_ELEMS(dst_data));
> +
> +    for (i = 0; i < planes; i++) {
> +        plane = surface->pVtbl->GetPlaneAt(surface, i);
> +        dst_data[i] = plane->pVtbl->GetNative(plane);
> +        dst_linesize[i] = plane->pVtbl->GetHPitch(plane);
> +    }
> +    av_image_copy(dst_data, dst_linesize,
> +        (const uint8_t**)src->data, src->linesize, src->format,
> +        w, h);

av_image_copy2

> +
> +    return 0;
> +}
> +int av_amf_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
> +                                    const AVFrame *src)
> +{
> +    AMFSurface* surface = (AMFSurface*)src->data[0];
> +    AMFPlane *plane;
> +    uint8_t  *src_data[4];
> +    int       src_linesize[4];
> +    int       planes;
> +    int       i;
> +    int w = FFMIN(dst->width,  src->width);
> +    int h = FFMIN(dst->height, src->height);
> +    int ret;
> +
> +    ret = surface->pVtbl->Convert(surface, AMF_MEMORY_HOST);
> +    AMF_RETURN_IF_FALSE(ctx, ret == AMF_OK, AVERROR_UNKNOWN, "Convert(amf::AMF_MEMORY_HOST) failed with error %d\n", AVERROR_UNKNOWN);
> +
> +    planes = (int)surface->pVtbl->GetPlanesCount(surface);
> +    av_assert0(planes < FF_ARRAY_ELEMS(src_data));
> +
> +    for (i = 0; i < planes; i++) {
> +        plane = surface->pVtbl->GetPlaneAt(surface, i);
> +        src_data[i] = plane->pVtbl->GetNative(plane);
> +        src_linesize[i] = plane->pVtbl->GetHPitch(plane);
> +    }
> +    av_image_copy(dst->data, dst->linesize,
> +                  (const uint8_t **)src_data, src_linesize, dst->format,
> +                  w, h);
> +    surface->pVtbl->Release(surface);
> +    return 0;
> +}
> +
> +
> +static void amf_device_uninit(AVHWDeviceContext *device_ctx)
> +{
> +    AVAMFDeviceContext      *amf_ctx = device_ctx->hwctx;
> +    av_amf_context_free(0, (uint8_t *)amf_ctx);
> +}
> +
> +static int amf_device_init(AVHWDeviceContext *ctx)
> +{
> +    AVAMFDeviceContext *amf_ctx = ctx->hwctx;
> +    return av_amf_context_init(amf_ctx, ctx);
> +}
> +
> +static int amf_device_create(AVHWDeviceContext *device_ctx,
> +                              const char *device,
> +                              AVDictionary *opts, int flags)
> +{
> +    AVAMFDeviceContext        *ctx = device_ctx->hwctx;
> +    int ret;
> +    if ((ret = av_amf_load_library(ctx, device_ctx)) == 0) {
> +        if ((ret = amf_context_create(ctx, device_ctx, "", opts, flags)) == 0){
> +            return 0;
> +        }
> +    }
> +    amf_device_uninit(device_ctx);
> +    return ret;
> +}
> +
> +static int amf_device_derive(AVHWDeviceContext *device_ctx,
> +                              AVHWDeviceContext *child_device_ctx, AVDictionary *opts,
> +                              int flags)
> +{
> +    AVAMFDeviceContext        *ctx = device_ctx->hwctx;
> +    int ret;
> +
> +    ret = amf_device_create(device_ctx, "", opts, flags);
> +    if(ret < 0)
> +        return ret;
> +
> +    return av_amf_context_derive(ctx, child_device_ctx, opts, flags);
> +}
> +
> +#if CONFIG_DXVA2
> +static int amf_init_from_dxva2_device(AVAMFDeviceContext * amf_ctx, AVDXVA2DeviceContext *hwctx)
> +{
> +    IDirect3DDevice9    *device;
> +    HANDLE              device_handle;
> +    HRESULT             hr;
> +    AMF_RESULT          res;
> +    int ret;
> +
> +    hr = IDirect3DDeviceManager9_OpenDeviceHandle(hwctx->devmgr, &device_handle);
> +    if (FAILED(hr)) {
> +        av_log(hwctx, AV_LOG_ERROR, "Failed to open device handle for Direct3D9 device: %lx.\n", (unsigned long)hr);
> +        return AVERROR_EXTERNAL;
> +    }
> +
> +    hr = IDirect3DDeviceManager9_LockDevice(hwctx->devmgr, device_handle, &device, FALSE);
> +    if (SUCCEEDED(hr)) {
> +        IDirect3DDeviceManager9_UnlockDevice(hwctx->devmgr, device_handle, FALSE);
> +        ret = 0;
> +    } else {
> +        av_log(hwctx, AV_LOG_ERROR, "Failed to lock device handle for Direct3D9 device: %lx.\n", (unsigned long)hr);
> +        ret = AVERROR_EXTERNAL;
> +    }
> +
> +
> +    IDirect3DDeviceManager9_CloseDeviceHandle(hwctx->devmgr, device_handle);
> +
> +    if (ret < 0)
> +        return ret;
> +
> +    res = amf_ctx->context->pVtbl->InitDX9(amf_ctx->context, device);
> +
> +    IDirect3DDevice9_Release(device);
> +
> +    if (res != AMF_OK) {
> +        if (res == AMF_NOT_SUPPORTED)
> +            av_log(hwctx, AV_LOG_ERROR, "AMF via D3D9 is not supported on the given device.\n");
> +        else
> +            av_log(hwctx, AV_LOG_ERROR, "AMF failed to initialise on given D3D9 device: %d.\n", res);
> +        return AVERROR(ENODEV);
> +    }
> +    amf_ctx->mem_type = AMF_MEMORY_DX9;
> +    return 0;
> +}
> +#endif
> +
> +#if CONFIG_D3D11VA
> +static int amf_init_from_d3d11_device(AVAMFDeviceContext* amf_ctx, AVD3D11VADeviceContext *hwctx)
> +{
> +    AMF_RESULT res;
> +    res = amf_ctx->context->pVtbl->InitDX11(amf_ctx->context, hwctx->device, AMF_DX11_1);
> +    if (res != AMF_OK) {
> +        if (res == AMF_NOT_SUPPORTED)
> +            av_log(hwctx, AV_LOG_ERROR, "AMF via D3D11 is not supported on the given device.\n");
> +        else
> +            av_log(hwctx, AV_LOG_ERROR, "AMF failed to initialise on the given D3D11 device: %d.\n", res);
> +        return AVERROR(ENODEV);
> +    }
> +    amf_ctx->mem_type = AMF_MEMORY_DX11;
> +    return 0;
> +}
> +#endif
> +
> +int av_amf_context_init(AVAMFDeviceContext* amf_ctx, void* avcl)
> +{
> +     AMFContext1 *context1 = NULL;
> +     AMF_RESULT res;
> +
> +    res = amf_ctx->context->pVtbl->InitDX11(amf_ctx->context, NULL, AMF_DX11_1);
> +    if (res == AMF_OK) {
> +        amf_ctx->mem_type = AMF_MEMORY_DX11;
> +        av_log(avcl, AV_LOG_VERBOSE, "AMF initialisation succeeded via D3D11.\n");
> +    } else {
> +        res = amf_ctx->context->pVtbl->InitDX9(amf_ctx->context, NULL);
> +        if (res == AMF_OK) {
> +            amf_ctx->mem_type = AMF_MEMORY_DX9;
> +            av_log(avcl, AV_LOG_VERBOSE, "AMF initialisation succeeded via D3D9.\n");
> +        } else {
> +            AMFGuid guid = IID_AMFContext1();
> +            res = amf_ctx->context->pVtbl->QueryInterface(amf_ctx->context, &guid, (void**)&context1);
> +            AMF_RETURN_IF_FALSE(avcl, res == AMF_OK, AVERROR_UNKNOWN, "CreateContext1() failed with error %d\n", res);
> +
> +            res = context1->pVtbl->InitVulkan(context1, NULL);
> +            context1->pVtbl->Release(context1);
> +            if (res != AMF_OK) {
> +                if (res == AMF_NOT_SUPPORTED)
> +                    av_log(avcl, AV_LOG_ERROR, "AMF via Vulkan is not supported on the given device.\n");
> +                 else
> +                    av_log(avcl, AV_LOG_ERROR, "AMF failed to initialise on the given Vulkan device: %d.\n", res);
> +                 return AVERROR(ENOSYS);
> +            }
> +            amf_ctx->mem_type = AMF_MEMORY_VULKAN;
> +            av_log(avcl, AV_LOG_VERBOSE, "AMF initialisation succeeded via Vulkan.\n");
> +         }
> +     }
> +     return 0;
> +}
> +int av_amf_load_library(AVAMFDeviceContext* amf_ctx,  void* avcl)

This function will be exported due to its name, although it is not
declared in a public header. Rename it. In fact, make it static.

> +{
> +    AMFInit_Fn         init_fun;
> +    AMFQueryVersion_Fn version_fun;
> +    AMF_RESULT         res;
> +
> +    amf_ctx->library = dlopen(AMF_DLL_NAMEA, RTLD_NOW | RTLD_LOCAL);
> +    AMF_RETURN_IF_FALSE(avcl, amf_ctx->library != NULL,
> +        AVERROR_UNKNOWN, "DLL %s failed to open\n", AMF_DLL_NAMEA);
> +
> +    init_fun = (AMFInit_Fn)dlsym(amf_ctx->library, AMF_INIT_FUNCTION_NAME);
> +    AMF_RETURN_IF_FALSE(avcl, init_fun != NULL, AVERROR_UNKNOWN, "DLL %s failed to find function %s\n", AMF_DLL_NAMEA, AMF_INIT_FUNCTION_NAME);
> +
> +    version_fun = (AMFQueryVersion_Fn)dlsym(amf_ctx->library, AMF_QUERY_VERSION_FUNCTION_NAME);
> +    AMF_RETURN_IF_FALSE(avcl, version_fun != NULL, AVERROR_UNKNOWN, "DLL %s failed to find function %s\n", AMF_DLL_NAMEA, AMF_QUERY_VERSION_FUNCTION_NAME);
> +
> +    res = version_fun(&amf_ctx->version);
> +    AMF_RETURN_IF_FALSE(avcl, res == AMF_OK, AVERROR_UNKNOWN, "%s failed with error %d\n", AMF_QUERY_VERSION_FUNCTION_NAME, res);
> +    res = init_fun(AMF_FULL_VERSION, &amf_ctx->factory);
> +    AMF_RETURN_IF_FALSE(avcl, res == AMF_OK, AVERROR_UNKNOWN, "%s failed with error %d\n", AMF_INIT_FUNCTION_NAME, res);
> +    res = amf_ctx->factory->pVtbl->GetTrace(amf_ctx->factory, &amf_ctx->trace);
> +    AMF_RETURN_IF_FALSE(avcl, res == AMF_OK, AVERROR_UNKNOWN, "GetTrace() failed with error %d\n", res);
> +    res = amf_ctx->factory->pVtbl->GetDebug(amf_ctx->factory, &amf_ctx->debug);
> +    AMF_RETURN_IF_FALSE(avcl, res == AMF_OK, AVERROR_UNKNOWN, "GetDebug() failed with error %d\n", res);
> +    return 0;
> +}
> +
> +int av_amf_context_derive(AVAMFDeviceContext * amf_ctx,
> +                               AVHWDeviceContext *child_device_ctx, AVDictionary *opts,
> +                               int flags)
> +{
> +
> +    switch (child_device_ctx->type) {
> +
> +#if CONFIG_DXVA2
> +    case AV_HWDEVICE_TYPE_DXVA2:
> +        {
> +            AVDXVA2DeviceContext *child_device_hwctx = child_device_ctx->hwctx;
> +            return amf_init_from_dxva2_device(amf_ctx, child_device_hwctx);
> +        }
> +        break;
> +#endif
> +
> +#if CONFIG_D3D11VA
> +    case AV_HWDEVICE_TYPE_D3D11VA:
> +        {
> +            AVD3D11VADeviceContext *child_device_hwctx = child_device_ctx->hwctx;
> +            return amf_init_from_d3d11_device(amf_ctx, child_device_hwctx);
> +        }
> +        break;
> +#endif
> +    default:
> +        {
> +            av_log(child_device_ctx, AV_LOG_ERROR, "AMF initialisation from a %s device is not supported.\n",
> +                av_hwdevice_get_type_name(child_device_ctx->type));
> +            return AVERROR(ENOSYS);
> +        }
> +    }
> +    return 0;
> +}
> +
> +int av_amf_context_create(AVAMFDeviceContext * context,
> +                            void* avcl,
> +                            const char *device,
> +                            AVDictionary *opts, int flags)
> +{
> +    int ret;
> +    if ((ret = av_amf_load_library(context, avcl)) == 0) {
> +        if ((ret = amf_context_create(context, avcl, "", opts, flags)) == 0){
> +            return 0;
> +        }
> +    }
> +    av_amf_context_free(0, (uint8_t *)context);
> +    return ret;
> +}
> +
> +void av_amf_context_free(void *opaque, uint8_t *data)
> +{
> +    AVAMFDeviceContext *amf_ctx = (AVAMFDeviceContext *)data;
> +    if (amf_ctx->context) {
> +        amf_ctx->context->pVtbl->Terminate(amf_ctx->context);
> +        amf_ctx->context->pVtbl->Release(amf_ctx->context);
> +        amf_ctx->context = NULL;
> +    }
> +
> +    if (amf_ctx->trace) {
> +        amf_ctx->trace->pVtbl->UnregisterWriter(amf_ctx->trace, FFMPEG_AMF_WRITER_ID);
> +    }
> +
> +    if(amf_ctx->library) {
> +        dlclose(amf_ctx->library);
> +        amf_ctx->library = NULL;
> +    }
> +    if (amf_ctx->trace_writer) {
> +        amf_writer_free(amf_ctx->trace_writer);
> +    }
> +
> +    amf_ctx->debug = NULL;
> +    amf_ctx->version = 0;
> +}
> +
> +
> +const HWContextType ff_hwcontext_type_amf = {
> +    .type                 = AV_HWDEVICE_TYPE_AMF,
> +    .name                 = "AMF",
> +
> +    .device_hwctx_size    = sizeof(AVAMFDeviceContext),
> +    .frames_hwctx_size    = sizeof(AMFFramesContext),
> +
> +    .device_create        = amf_device_create,
> +    .device_derive        = amf_device_derive,
> +    .device_init          = amf_device_init,
> +    .device_uninit        = amf_device_uninit,
> +    .frames_get_constraints = amf_frames_get_constraints,
> +    .frames_init          = amf_frames_init,
> +    .frames_get_buffer    = amf_get_buffer,
> +    .transfer_get_formats = amf_transfer_get_formats,
> +    .transfer_data_to     = av_amf_transfer_data_to,
> +    .transfer_data_from   = av_amf_transfer_data_from,
> +
> +    .pix_fmts             = (const enum AVPixelFormat[]){ AV_PIX_FMT_AMF_SURFACE, AV_PIX_FMT_NONE },
> +};
> diff --git a/libavutil/hwcontext_amf_internal.h b/libavutil/hwcontext_amf_internal.h
> new file mode 100644
> index 0000000000..b991f357a6
> --- /dev/null
> +++ b/libavutil/hwcontext_amf_internal.h
> @@ -0,0 +1,44 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +
> +#ifndef AVUTIL_HWCONTEXT_AMF_INTERNAL_H
> +#define AVUTIL_HWCONTEXT_AMF_INTERNAL_H
> +#include <AMF/core/Factory.h>
> +#include <AMF/core/Context.h>
> +
> +/**
> +* Error handling helper
> +*/
> +#define AMF_RETURN_IF_FALSE(avctx, exp, ret_value, /*message,*/ ...) \
> +    if (!(exp)) { \
> +        av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
> +        return ret_value; \
> +    }
> +
> +#define AMF_GOTO_FAIL_IF_FALSE(avctx, exp, ret_value, /*message,*/ ...) \
> +    if (!(exp)) { \
> +        av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
> +        ret = ret_value; \
> +        goto fail; \
> +    }
> +
> +#define AMF_TIME_BASE_Q          (AVRational){1, AMF_SECOND}
> +
> +
> +#endif /* AVUTIL_HWCONTEXT_AMF_INTERNAL_H */
> \ No newline at end of file

IIRC this is against the C spec.

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf.
  2024-05-30 13:08 [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Dmitrii Ovchinnikov
                   ` (9 preceding siblings ...)
  2024-05-30 14:04 ` [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Andreas Rheinhardt
@ 2024-05-30 14:34 ` Lynne via ffmpeg-devel
  2024-05-30 16:06   ` Dmitrii Ovchinnikov
  2024-06-04 18:58 ` Mark Thompson
  11 siblings, 1 reply; 17+ messages in thread
From: Lynne via ffmpeg-devel @ 2024-05-30 14:34 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Lynne


[-- Attachment #1.1.1.1: Type: text/plain, Size: 1379 bytes --]

On 30/05/2024 15:08, Dmitrii Ovchinnikov wrote:
> Adds hwcontext_amf, which allows to use shared AMF
> context for the encoder, decoder and AMF-based filters,
> without copy to the host memory.
> It will also allow you to use some optimisations in
> the interaction of components (for example, SAV) and make a more
> manageable and optimal setup for using GPU devices with AMF
> in the case of a fully AMF pipeline.
> It will be a significant performance uplift when full AMF pipeline
> with filters is used.
> 
> We also plan to add Compression artefact removal filter in near feature.
> v2: cleanup header files
> v3: an unnecessary class has been removed.
> ---
>   libavutil/Makefile                 |   4 +
>   libavutil/hwcontext.c              |   4 +
>   libavutil/hwcontext.h              |   1 +
>   libavutil/hwcontext_amf.c          | 585 +++++++++++++++++++++++++++++
>   libavutil/hwcontext_amf.h          |  64 ++++
>   libavutil/hwcontext_amf_internal.h |  44 +++
>   libavutil/hwcontext_internal.h     |   1 +
>   libavutil/pixdesc.c                |   4 +
>   libavutil/pixfmt.h                 |   5 +
>   9 files changed, 712 insertions(+)
>   create mode 100644 libavutil/hwcontext_amf.c
>   create mode 100644 libavutil/hwcontext_amf.h
>   create mode 100644 libavutil/hwcontext_amf_internal.h

Still no answer to my question?

[-- Attachment #1.1.1.2: OpenPGP public key --]
[-- Type: application/pgp-keys, Size: 637 bytes --]

[-- Attachment #1.2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 236 bytes --]

[-- Attachment #2: Type: text/plain, Size: 251 bytes --]

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf.
  2024-05-30 14:34 ` Lynne via ffmpeg-devel
@ 2024-05-30 16:06   ` Dmitrii Ovchinnikov
  2024-05-30 16:48     ` Lynne via ffmpeg-devel
  0 siblings, 1 reply; 17+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-05-30 16:06 UTC (permalink / raw)
  To: FFmpeg development discussions and patches; +Cc: Lynne

DX12 and Vulkan native encoders will expose less features compare to AMF,

at least in foreseeable feature. The missing features include low latency,

PreAnalysis including look-ahead etc. AMF context on Windows allows fully

enable SAV - ability to utilize VCNs in dGPU and APU in a single session.

AMF components including encoder and decoder has some internal optimizations

in the area of memory access for APUs that are not available in standard

3D APIs.

Eventually specialized multimedia AMD cards could be added seamlessly to

FFmpeg with AMF integration.

AMF FSR(VSR) includes YUV version with focus on videos which is not

available in AMD FSR aimed for gaming.

More advanced filters that are not available in standard 3D APIs are coming.
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf.
  2024-05-30 16:06   ` Dmitrii Ovchinnikov
@ 2024-05-30 16:48     ` Lynne via ffmpeg-devel
  2024-05-30 19:51       ` Dmitrii Ovchinnikov
  0 siblings, 1 reply; 17+ messages in thread
From: Lynne via ffmpeg-devel @ 2024-05-30 16:48 UTC (permalink / raw)
  To: Dmitrii Ovchinnikov, FFmpeg development discussions and patches; +Cc: Lynne


[-- Attachment #1.1.1.1: Type: text/plain, Size: 1313 bytes --]

On 30/05/2024 18:06, Dmitrii Ovchinnikov wrote:
> DX12 and Vulkan native encoders will expose less features compare to 
> AMF,____
> 
> at least in foreseeable feature. The missing features include low 
> latency,____

That's plainly not true.

> PreAnalysis including look-ahead etc. AMF context on Windows allows 
> fully____
>
> enable SAV - ability to utilize VCNs in dGPU and APU in a single 
> session.____

You should try talking internally to learn what is in progress.

> AMF components including encoder and decoder has some internal 
> optimizations ____
> 
> in the area of memory access for APUs that are not available in standard 
> ____
> 
> 3D APIs.____

This isn't OpenGL.

> Eventually specialized multimedia AMD cards could be added seamlessly to____
> 
> FFmpeg with AMF integration.____
> 
> AMF FSR(VSR) includes YUV version with focus on videos which is not____
> 
> available in AMD FSR aimed for gaming.____

Why don't you open source it then?

> More advanced filters that are not available in standard 3D APIs are 
> coming. __
We could have them as Vulkan filters.


I'm not objecting on this patch, but I am concerned that it's more 
proprietary code which is soon going to be redundant.

I will have to review it properly at some point.

[-- Attachment #1.1.1.2: OpenPGP public key --]
[-- Type: application/pgp-keys, Size: 637 bytes --]

[-- Attachment #1.2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 236 bytes --]

[-- Attachment #2: Type: text/plain, Size: 251 bytes --]

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf.
  2024-05-30 16:48     ` Lynne via ffmpeg-devel
@ 2024-05-30 19:51       ` Dmitrii Ovchinnikov
  0 siblings, 0 replies; 17+ messages in thread
From: Dmitrii Ovchinnikov @ 2024-05-30 19:51 UTC (permalink / raw)
  To: Lynne; +Cc: FFmpeg development discussions and patches

I would appreciate your review.
Just to clarify: The information I provided is coming from AMF and driver
developers.
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf.
  2024-05-30 13:08 [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Dmitrii Ovchinnikov
                   ` (10 preceding siblings ...)
  2024-05-30 14:34 ` Lynne via ffmpeg-devel
@ 2024-06-04 18:58 ` Mark Thompson
  11 siblings, 0 replies; 17+ messages in thread
From: Mark Thompson @ 2024-06-04 18:58 UTC (permalink / raw)
  To: ffmpeg-devel

On 30/05/2024 14:08, Dmitrii Ovchinnikov wrote:
> Adds hwcontext_amf, which allows to use shared AMF
> context for the encoder, decoder and AMF-based filters,
> without copy to the host memory.
> It will also allow you to use some optimisations in
> the interaction of components (for example, SAV) and make a more
> manageable and optimal setup for using GPU devices with AMF
> in the case of a fully AMF pipeline.
> It will be a significant performance uplift when full AMF pipeline
> with filters is used.
> 
> We also plan to add Compression artefact removal filter in near feature.
> v2: cleanup header files
> v3: an unnecessary class has been removed.
> ---
>  libavutil/Makefile                 |   4 +
>  libavutil/hwcontext.c              |   4 +
>  libavutil/hwcontext.h              |   1 +
>  libavutil/hwcontext_amf.c          | 585 +++++++++++++++++++++++++++++
>  libavutil/hwcontext_amf.h          |  64 ++++
>  libavutil/hwcontext_amf_internal.h |  44 +++
>  libavutil/hwcontext_internal.h     |   1 +
>  libavutil/pixdesc.c                |   4 +
>  libavutil/pixfmt.h                 |   5 +
>  9 files changed, 712 insertions(+)
>  create mode 100644 libavutil/hwcontext_amf.c
>  create mode 100644 libavutil/hwcontext_amf.h
>  create mode 100644 libavutil/hwcontext_amf_internal.h
> 
> ...
> +
> +static void amf_dummy_free(void *opaque, uint8_t *data)
> +{
> +
> +}
> +
> +static AVBufferRef *amf_pool_alloc(void *opaque, size_t size)
> +{
> +    AVHWFramesContext *hwfc = (AVHWFramesContext *)opaque;
> +    AVBufferRef *buf;
> +
> +    buf = av_buffer_create(NULL, NULL, amf_dummy_free, hwfc, AV_BUFFER_FLAG_READONLY);
> +    if (!buf) {
> +        av_log(hwfc, AV_LOG_ERROR, "Failed to create buffer for AMF context.\n");
> +        return NULL;
> +    }
> +    return buf;
> +}

You're still allocating nothing here?

I think what this means is that you don't actually want to implement frames context creation at all because it doesn't do anything.

If it is not possible to make an AMFSurface as anything other than an output from an AMF component then this would make sense, the decoder can allocate the internals.

Look at the DRM hwcontext for an example that works like this - the DRM objects can only be made as outputs from devices or by mapping, so there is no frame context implementation.

> +
> ...
> diff --git a/libavutil/hwcontext_amf.h b/libavutil/hwcontext_amf.h
> new file mode 100644
> index 0000000000..ef2118dd4e
> --- /dev/null
> +++ b/libavutil/hwcontext_amf.h
> @@ -0,0 +1,64 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +
> +#ifndef AVUTIL_HWCONTEXT_AMF_H
> +#define AVUTIL_HWCONTEXT_AMF_H
> +
> +#include "pixfmt.h"
> +#include "hwcontext.h"
> +#include <AMF/core/Factory.h>
> +#include <AMF/core/Context.h>
> +#include <AMF/core/Trace.h>
> +#include <AMF/core/Debug.h>
> +
> +/**
> + * This struct is allocated as AVHWDeviceContext.hwctx
> + */
> +typedef struct AVAMFDeviceContext {
> +    HMODULE            library;

What is this type?  (It looks like a Windows type, but I thought this was cross-platform.)

> +    AMFFactory         *factory;
> +    AMFDebug           *debug;
> +    AMFTrace           *trace;
> +    void               *trace_writer;

Are all of these objects necessary to operation of the AMF device?  Please remove elements which are not necessary and add them to the private context if they are otherwise useful.

> +
> +    int64_t            version; ///< version of AMF runtime

Why is the version necessary to expose in the public API?  Is it not possible to call the QueryVersion function after starting?

> +    AMFContext         *context;
> +    int                mem_type;
Is mem_type really necessary to expose in the piblic API?  Can the user not determine this by some API call?

> +} AVAMFDeviceContext;
> +
> +enum AMF_SURFACE_FORMAT av_amf_av_to_amf_format(enum AVPixelFormat fmt);
> +enum AVPixelFormat av_amf_to_av_format(enum AMF_SURFACE_FORMAT fmt);
> +

All of the following functions should not be public symbols.  You want to implement the hwcontext functions so that these all work without needing special implementation for AMF, they should not be individually callable because that is not useful.

> +int av_amf_context_create(AVAMFDeviceContext * context,
> +                          void* avcl,
> +                          const char *device,
> +                          AVDictionary *opts, int flags);

Use device_create.

> +int av_amf_context_init(AVAMFDeviceContext* internal, void* avcl);

Use device_init.

> +void av_amf_context_free(void *opaque, uint8_t *data);

Use device_uninit (or maybe an AVBuffer destructor?).

> +int av_amf_context_derive(AVAMFDeviceContext * internal,
> +                          AVHWDeviceContext *child_device_ctx, AVDictionary *opts,
> +                          int flags);

Use device_derive.

> +
> +int av_amf_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
> +                                    const AVFrame *src);

Use transfer_data_from.

> +
> +int av_amf_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
> +                                 const AVFrame *src);

Use transfer_data_to.

> +
> +#endif /* AVUTIL_HWCONTEXT_AMF_H */
> ...

Thanks,

- Mark
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [FFmpeg-devel] [PATCH 02/10, v3] avcodec: add amfdec.
  2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 02/10, v3] avcodec: add amfdec Dmitrii Ovchinnikov
@ 2024-06-04 19:25   ` Mark Thompson
  0 siblings, 0 replies; 17+ messages in thread
From: Mark Thompson @ 2024-06-04 19:25 UTC (permalink / raw)
  To: ffmpeg-devel

On 30/05/2024 14:08, Dmitrii Ovchinnikov wrote:
> From: Evgeny Pavlov <lucenticus@gmail.com>
> 
> Added AMF based h264, hevc, av1 decoders.
> Co-authored-by: Dmitrii Ovchinnikov <ovchinnikov.dmitrii@gmail.com>
> v2: added encoder reinitialisation
> v3: use AMF_SURFACE_UNKNOWN to int decoder(ctx->output_format before)
> ---
>  libavcodec/Makefile    |   7 +-
>  libavcodec/allcodecs.c |   3 +
>  libavcodec/amfdec.c    | 696 +++++++++++++++++++++++++++++++++++++++++
>  libavcodec/amfdec.h    |  63 ++++
>  4 files changed, 767 insertions(+), 2 deletions(-)
>  create mode 100644 libavcodec/amfdec.c
>  create mode 100644 libavcodec/amfdec.h
> 
> ...
> +
> +const enum AVPixelFormat amf_dec_pix_fmts[] = {
> +    AV_PIX_FMT_YUV420P,
> +    AV_PIX_FMT_NV12,
> +    AV_PIX_FMT_BGRA,
> +    AV_PIX_FMT_ARGB,
> +    AV_PIX_FMT_RGBA,
> +    AV_PIX_FMT_GRAY8,
> +    AV_PIX_FMT_BGR0,
> +    AV_PIX_FMT_YUYV422,
> +    AV_PIX_FMT_P010,
> +    AV_PIX_FMT_P012,
> +    AV_PIX_FMT_YUV420P10,
> +    AV_PIX_FMT_YUV420P12,
> +    AV_PIX_FMT_YUV420P16,
> +#if CONFIG_D3D11VA
> +    AV_PIX_FMT_D3D11,
> +#endif
> +#if CONFIG_DXVA2
> +    AV_PIX_FMT_DXVA2_VLD,
> +#endif
> +    AV_PIX_FMT_AMF_SURFACE,
> +    AV_PIX_FMT_NONE
> +};

What is this set of formats doing?  Most of them are ignored becase get_format below only ever offers two choices.

> +
> +static const AVCodecHWConfigInternal *const amf_hw_configs[] = {
> +    &(const AVCodecHWConfigInternal) {
> +        .public = {
> +            .pix_fmt     = AV_PIX_FMT_AMF_SURFACE,
> +            .methods     = AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX |

See below, I don't think it makes sense to have HW_FRAMES_CTX in this decoder.

> +                           AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX,
> +            .device_type = AV_HWDEVICE_TYPE_AMF,
> +        },
> +        .hwaccel = NULL,
> +    },
> +    NULL
> +};
> +
> ...
> +
> +static int amf_init_decoder(AVCodecContext *avctx)
> +{
> +    AMFDecoderContext     *ctx = avctx->priv_data;
> +    AVAMFDeviceContext * internal = ctx->amf_device_ctx;
> +    const wchar_t           *codec_id = NULL;
> +    AMF_RESULT              res;
> +    AMFBuffer               *buffer;
> +    amf_int64               color_profile;
> +    int                     pool_size = 36;
> +
> +    ctx->drain = 0;
> +    ctx->resolution_changed = 0;
> +
> +    switch (avctx->codec->id) {
> +        case AV_CODEC_ID_H264:
> +            codec_id = AMFVideoDecoderUVD_H264_AVC;
> +            break;
> +        case AV_CODEC_ID_HEVC: {
> +            if (avctx->profile == AV_PROFILE_HEVC_MAIN_10)

You won't know profile here?  It is an output field, the decoder has to set it once it determines it from the stream.

> +                codec_id = AMFVideoDecoderHW_H265_MAIN10;
> +            else
> +                codec_id = AMFVideoDecoderHW_H265_HEVC;
> +        } break;
> +        case AV_CODEC_ID_AV1:
> +            if (avctx->profile == AV_PROFILE_AV1_PROFESSIONAL)
> +                codec_id = AMFVideoDecoderHW_AV1_12BIT;
> +            else
> +                codec_id = AMFVideoDecoderHW_AV1;
> +            break;
> +        default:
> +            break;
> +    }
> +    AMF_RETURN_IF_FALSE(ctx, codec_id != NULL, AVERROR(EINVAL), "Codec %d is not supported\n", avctx->codec->id);
> +
> +    ...> +
> +static int amf_decode_init(AVCodecContext *avctx)
> +{
> +    AMFDecoderContext *ctx = avctx->priv_data;
> +    int ret;
> +    ctx->local_context = 0;
> +    ctx->in_pkt = av_packet_alloc();
> +    if (!ctx->in_pkt)
> +        return AVERROR(ENOMEM);
> +
> +    if (avctx->hw_frames_ctx){

This will never be set at init time because the user sets it in the get_format callback (see documentation for the field).

Even ignoring that, I don't see how this would make sense ayway?  The AMF frames context is a dummy shell containing nothing, so the AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX can't do anything useful.

(How are you testing this path?)

> +        AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
> +        if (frames_ctx->device_ctx->type == AV_HWDEVICE_TYPE_AMF) {
> +            ctx->amf_device_ctx =  frames_ctx->device_ctx->hwctx;
> +        }
> +    }
> +    else if  (avctx->hw_device_ctx && !avctx->hw_frames_ctx) {
> +        AVHWDeviceContext   *hwdev_ctx;
> +        AVHWFramesContext *hwframes_ctx;
> +        hwdev_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
> +        if (hwdev_ctx->type == AV_HWDEVICE_TYPE_AMF)
> +        {
> +            ctx->amf_device_ctx =  hwdev_ctx->hwctx;
> +        }
> +
> +        avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx);
> +
> +        if (!avctx->hw_frames_ctx) {
> +            av_log(avctx, AV_LOG_ERROR, "av_hwframe_ctx_alloc failed\n");
> +            return AVERROR(ENOMEM);
> +        }
> +
> +        hwframes_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
> +        hwframes_ctx->width             = FFALIGN(avctx->coded_width,  32);
> +        hwframes_ctx->height            = FFALIGN(avctx->coded_height, 32);

I don't see how you can ensure that you have a correct value for the sizes here?  (See documentation for the fields; sometimes they are set from codecpar to help the user and I would guess that you are only testing with that in the ffmpeg utility rather than using the decoder standalone.)

> +        hwframes_ctx->format            = AV_PIX_FMT_AMF_SURFACE;
> +        hwframes_ctx->sw_format         = avctx->sw_pix_fmt == AV_PIX_FMT_YUV420P10 ? AV_PIX_FMT_P010 : AV_PIX_FMT_NV12;

I don't see where sw_pix_fmt would have come from either.

> +        hwframes_ctx->initial_pool_size = ctx->surface_pool_size + 8;
> +        avctx->pix_fmt = AV_PIX_FMT_AMF_SURFACE;
> +
> +        ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
> +
> +        if (ret < 0) {
> +            av_log(NULL, AV_LOG_ERROR, "Error initializing a AMF frame pool\n");
> +            av_buffer_unref(&avctx->hw_frames_ctx);
> +            return ret;
> +        }
> +    }  else {
> +        ctx->amf_device_ctx = av_mallocz(sizeof(AVAMFDeviceContext));

sizeof(AVAMFDeviceContext) is not allowed in a different library.

I think this is trying to do what making a device would do except without calling the normal functions.  Just make a device normally rather than having special functions to bypass that?

> +        ctx->local_context = 1;
> +        if ((ret = av_amf_context_create(ctx->amf_device_ctx, avctx, "", NULL, 0)) != 0) {
> +            amf_decode_close(avctx);
> +            return ret;
> +        }
> +        if ((ret = amf_init_decoder_context(avctx)) != 0) {
> +            return ret;
> +        }
> +    }
> +    if ((ret = amf_init_decoder(avctx)) == 0) {
> +        AMFVariantStruct    format_var = {0};
> +        ret = ctx->decoder->pVtbl->GetProperty(ctx->decoder, AMF_VIDEO_DECODER_OUTPUT_FORMAT, &format_var);
> +        if (ret != AMF_OK) {
> +            return AVERROR(EINVAL);
> +        }
> +        enum AVPixelFormat format = av_amf_to_av_format((AMF_SURFACE_FORMAT)format_var.int64Value);
> +        enum AVPixelFormat pix_fmts[3] = {
> +        AV_PIX_FMT_AMF_SURFACE,
> +        format,
> +        AV_PIX_FMT_NONE };
> +
> +
> +        ret = ff_get_format(avctx, pix_fmts);
> +        if (ret < 0) {
> +            avctx->pix_fmt = AV_PIX_FMT_NONE;
> +        }
> +
> +        return 0;
> +    }
> +    amf_decode_close(avctx);
> +    return ret;
> +}
> +
> ...> diff --git a/libavcodec/amfdec.h b/libavcodec/amfdec.h

For a single-file implementation with no external functions there is no need to make a header.

Thanks,

- Mark
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2024-06-04 19:25 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-05-30 13:08 [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Dmitrii Ovchinnikov
2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 02/10, v3] avcodec: add amfdec Dmitrii Ovchinnikov
2024-06-04 19:25   ` Mark Thompson
2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 03/10, v3] avcodec/amfenc: Fixes the color information in the output Dmitrii Ovchinnikov
2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 04/10, v3] avcodec/amfenc: HDR metadata Dmitrii Ovchinnikov
2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 05/10, v3] avcodec/amfenc: add 10 bit encoding in av1_amf Dmitrii Ovchinnikov
2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 06/10, v3] avcodec/amfenc: GPU driver version check Dmitrii Ovchinnikov
2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 07/10, v3] avcodec/amfenc: add smart access video option Dmitrii Ovchinnikov
2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 08/10, v3] avcodec/amfenc: redesign to use hwcontext_amf Dmitrii Ovchinnikov
2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 09/10, v3] avfilter/scale_amf: Add AMF VPP & super resolution filters Dmitrii Ovchinnikov
2024-05-30 13:08 ` [FFmpeg-devel] [PATCH 10/10, v3] doc/filters: Add documentation for AMF filters Dmitrii Ovchinnikov
2024-05-30 14:04 ` [FFmpeg-devel] [PATCH 01/10, v3] avutil: add hwcontext_amf Andreas Rheinhardt
2024-05-30 14:34 ` Lynne via ffmpeg-devel
2024-05-30 16:06   ` Dmitrii Ovchinnikov
2024-05-30 16:48     ` Lynne via ffmpeg-devel
2024-05-30 19:51       ` Dmitrii Ovchinnikov
2024-06-04 18:58 ` Mark Thompson

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git