Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
* [FFmpeg-devel] [PATCH 1/3] lavfi/scale_qsv: simplify scale_qsv filter
@ 2023-01-17  6:20 Xiang, Haihao
  2023-01-17  6:20 ` [FFmpeg-devel] [PATCH 2/3] lavfi/scale_qsv: re-use VPPContext for " Xiang, Haihao
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Xiang, Haihao @ 2023-01-17  6:20 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Haihao Xiang

From: Haihao Xiang <haihao.xiang@intel.com>

Use QSVVPPContext as a base context of QSVScaleContext, hence we may
re-use functions defined for QSVVPPContext to manage MFX session for
scale_qsv filter.

In addition, system memory has been taken into account in
QSVVVPPContext, we may add support for non-QSV pixel formats in the
future.

Signed-off-by: Haihao Xiang <haihao.xiang@intel.com>
---
 configure                  |   1 +
 libavfilter/vf_scale_qsv.c | 441 +++----------------------------------
 2 files changed, 35 insertions(+), 407 deletions(-)

diff --git a/configure b/configure
index 6e88c32223..86f930024b 100755
--- a/configure
+++ b/configure
@@ -3721,6 +3721,7 @@ sab_filter_deps="gpl swscale"
 scale2ref_filter_deps="swscale"
 scale_filter_deps="swscale"
 scale_qsv_filter_deps="libmfx"
+scale_qsv_filter_select="qsvvpp"
 scdet_filter_select="scene_sad"
 select_filter_select="scene_sad"
 sharpness_vaapi_filter_deps="vaapi"
diff --git a/libavfilter/vf_scale_qsv.c b/libavfilter/vf_scale_qsv.c
index a89a3ba6e6..8eb8bc9ec1 100644
--- a/libavfilter/vf_scale_qsv.c
+++ b/libavfilter/vf_scale_qsv.c
@@ -66,35 +66,11 @@ enum var_name {
 #define MFX_IMPL_VIA_MASK(impl) (0x0f00 & (impl))
 
 typedef struct QSVScaleContext {
-    const AVClass *class;
-
-    /* a clone of the main session, used internally for scaling */
-    mfxSession   session;
-
-    mfxMemId *mem_ids_in;
-    int nb_mem_ids_in;
-
-    mfxMemId *mem_ids_out;
-    int nb_mem_ids_out;
-
-    mfxFrameSurface1 **surface_ptrs_in;
-    int             nb_surface_ptrs_in;
-
-    mfxFrameSurface1 **surface_ptrs_out;
-    int             nb_surface_ptrs_out;
-
-#if QSV_HAVE_OPAQUE
-    mfxExtOpaqueSurfaceAlloc opaque_alloc;
-#endif
+    QSVVPPContext qsv;
 
     mfxExtVPPScaling         scale_conf;
     int                      mode;
 
-    mfxExtBuffer             *ext_buffers[2];
-    int                      num_ext_buf;
-
-    int shift_width, shift_height;
-
     /**
      * New dimensions. Special values are:
      *   0 = original width/height
@@ -131,338 +107,21 @@ static av_cold int qsvscale_init(AVFilterContext *ctx)
 
 static av_cold void qsvscale_uninit(AVFilterContext *ctx)
 {
-    QSVScaleContext *s = ctx->priv;
-
-    if (s->session) {
-        MFXClose(s->session);
-        s->session = NULL;
-    }
-
-    av_freep(&s->mem_ids_in);
-    av_freep(&s->mem_ids_out);
-    s->nb_mem_ids_in  = 0;
-    s->nb_mem_ids_out = 0;
-
-    av_freep(&s->surface_ptrs_in);
-    av_freep(&s->surface_ptrs_out);
-    s->nb_surface_ptrs_in  = 0;
-    s->nb_surface_ptrs_out = 0;
-}
-
-static int init_out_pool(AVFilterContext *ctx,
-                         int out_width, int out_height)
-{
-    QSVScaleContext *s = ctx->priv;
-    AVFilterLink *outlink = ctx->outputs[0];
-
-    AVHWFramesContext *in_frames_ctx;
-    AVHWFramesContext *out_frames_ctx;
-    AVQSVFramesContext *in_frames_hwctx;
-    AVQSVFramesContext *out_frames_hwctx;
-    enum AVPixelFormat in_format;
-    enum AVPixelFormat out_format;
-    int i, ret;
-
-    /* check that we have a hw context */
-    if (!ctx->inputs[0]->hw_frames_ctx) {
-        av_log(ctx, AV_LOG_ERROR, "No hw context provided on input\n");
-        return AVERROR(EINVAL);
-    }
-    in_frames_ctx   = (AVHWFramesContext*)ctx->inputs[0]->hw_frames_ctx->data;
-    in_frames_hwctx = in_frames_ctx->hwctx;
-
-    in_format     = in_frames_ctx->sw_format;
-    out_format    = (s->format == AV_PIX_FMT_NONE) ? in_format : s->format;
-
-    outlink->hw_frames_ctx = av_hwframe_ctx_alloc(in_frames_ctx->device_ref);
-    if (!outlink->hw_frames_ctx)
-        return AVERROR(ENOMEM);
-    out_frames_ctx   = (AVHWFramesContext*)outlink->hw_frames_ctx->data;
-    out_frames_hwctx = out_frames_ctx->hwctx;
-
-    out_frames_ctx->format            = AV_PIX_FMT_QSV;
-    out_frames_ctx->width             = FFALIGN(out_width,  16);
-    out_frames_ctx->height            = FFALIGN(out_height, 16);
-    out_frames_ctx->sw_format         = out_format;
-    out_frames_ctx->initial_pool_size = 4;
-
-    out_frames_hwctx->frame_type = in_frames_hwctx->frame_type | MFX_MEMTYPE_FROM_VPPOUT;
-
-    ret = ff_filter_init_hw_frames(ctx, outlink, 32);
-    if (ret < 0)
-        return ret;
-
-    ret = av_hwframe_ctx_init(outlink->hw_frames_ctx);
-    if (ret < 0)
-        return ret;
-
-    for (i = 0; i < out_frames_hwctx->nb_surfaces; i++) {
-        mfxFrameInfo *info = &out_frames_hwctx->surfaces[i].Info;
-        info->CropW = out_width;
-        info->CropH = out_height;
-    }
-
-    return 0;
-}
-
-static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
-                             mfxFrameAllocResponse *resp)
-{
-    AVFilterContext *ctx = pthis;
-    QSVScaleContext   *s = ctx->priv;
-
-    if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
-        !(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) ||
-        !(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME))
-        return MFX_ERR_UNSUPPORTED;
-
-    if (req->Type & MFX_MEMTYPE_FROM_VPPIN) {
-        resp->mids           = s->mem_ids_in;
-        resp->NumFrameActual = s->nb_mem_ids_in;
-    } else {
-        resp->mids           = s->mem_ids_out;
-        resp->NumFrameActual = s->nb_mem_ids_out;
-    }
-
-    return MFX_ERR_NONE;
-}
-
-static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
-{
-    return MFX_ERR_NONE;
-}
-
-static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
-{
-    return MFX_ERR_UNSUPPORTED;
-}
-
-static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
-{
-    return MFX_ERR_UNSUPPORTED;
-}
-
-static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
-{
-    mfxHDLPair *pair_dst = (mfxHDLPair*)hdl;
-    mfxHDLPair *pair_src = (mfxHDLPair*)mid;
-
-    pair_dst->first = pair_src->first;
-
-    if (pair_src->second != (mfxMemId)MFX_INFINITE)
-        pair_dst->second = pair_src->second;
-    return MFX_ERR_NONE;
-}
-
-static int init_out_session(AVFilterContext *ctx)
-{
-
-    QSVScaleContext                   *s = ctx->priv;
-    AVHWFramesContext     *in_frames_ctx = (AVHWFramesContext*)ctx->inputs[0]->hw_frames_ctx->data;
-    AVHWFramesContext    *out_frames_ctx = (AVHWFramesContext*)ctx->outputs[0]->hw_frames_ctx->data;
-    AVQSVFramesContext  *in_frames_hwctx = in_frames_ctx->hwctx;
-    AVQSVFramesContext *out_frames_hwctx = out_frames_ctx->hwctx;
-    AVQSVDeviceContext     *device_hwctx = in_frames_ctx->device_ctx->hwctx;
-
-    int opaque = 0;
-
-    mfxHDL handle = NULL;
-    mfxHandleType handle_type;
-    mfxVersion ver;
-    mfxIMPL impl;
-    mfxVideoParam par;
-    mfxStatus err;
-    int i, ret;
-
-#if QSV_HAVE_OPAQUE
-    opaque = !!(in_frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME);
-#endif
-    s->num_ext_buf = 0;
-
-    /* extract the properties of the "master" session given to us */
-    err = MFXQueryIMPL(device_hwctx->session, &impl);
-    if (err == MFX_ERR_NONE)
-        err = MFXQueryVersion(device_hwctx->session, &ver);
-    if (err != MFX_ERR_NONE) {
-        av_log(ctx, AV_LOG_ERROR, "Error querying the session attributes\n");
-        return AVERROR_UNKNOWN;
-    }
-
-    if (MFX_IMPL_VIA_VAAPI == MFX_IMPL_VIA_MASK(impl)) {
-        handle_type = MFX_HANDLE_VA_DISPLAY;
-    } else if (MFX_IMPL_VIA_D3D11 == MFX_IMPL_VIA_MASK(impl)) {
-        handle_type = MFX_HANDLE_D3D11_DEVICE;
-    } else if (MFX_IMPL_VIA_D3D9 == MFX_IMPL_VIA_MASK(impl)) {
-        handle_type = MFX_HANDLE_D3D9_DEVICE_MANAGER;
-    } else {
-        av_log(ctx, AV_LOG_ERROR, "Error unsupported handle type\n");
-        return AVERROR_UNKNOWN;
-    }
-
-    err = MFXVideoCORE_GetHandle(device_hwctx->session, handle_type, &handle);
-    if (err < 0)
-        return ff_qsvvpp_print_error(ctx, err, "Error getting the session handle");
-    else if (err > 0) {
-        ff_qsvvpp_print_warning(ctx, err, "Warning in getting the session handle");
-        return AVERROR_UNKNOWN;
-    }
-
-    /* create a "slave" session with those same properties, to be used for
-     * actual scaling */
-    ret = ff_qsvvpp_create_mfx_session(ctx, device_hwctx->loader, impl, &ver,
-                                       &s->session);
-    if (ret)
-        return ret;
-
-    if (handle) {
-        err = MFXVideoCORE_SetHandle(s->session, handle_type, handle);
-        if (err != MFX_ERR_NONE)
-            return AVERROR_UNKNOWN;
-    }
-
-    if (QSV_RUNTIME_VERSION_ATLEAST(ver, 1, 25)) {
-        err = MFXJoinSession(device_hwctx->session, s->session);
-            if (err != MFX_ERR_NONE)
-                return AVERROR_UNKNOWN;
-    }
-
-    memset(&par, 0, sizeof(par));
-
-    if (!opaque) {
-        mfxFrameAllocator frame_allocator = {
-            .pthis  = ctx,
-            .Alloc  = frame_alloc,
-            .Lock   = frame_lock,
-            .Unlock = frame_unlock,
-            .GetHDL = frame_get_hdl,
-            .Free   = frame_free,
-        };
-
-        s->mem_ids_in = av_calloc(in_frames_hwctx->nb_surfaces,
-                                  sizeof(*s->mem_ids_in));
-        if (!s->mem_ids_in)
-            return AVERROR(ENOMEM);
-        for (i = 0; i < in_frames_hwctx->nb_surfaces; i++)
-            s->mem_ids_in[i] = in_frames_hwctx->surfaces[i].Data.MemId;
-        s->nb_mem_ids_in = in_frames_hwctx->nb_surfaces;
-
-        s->mem_ids_out = av_calloc(out_frames_hwctx->nb_surfaces,
-                                   sizeof(*s->mem_ids_out));
-        if (!s->mem_ids_out)
-            return AVERROR(ENOMEM);
-        for (i = 0; i < out_frames_hwctx->nb_surfaces; i++)
-            s->mem_ids_out[i] = out_frames_hwctx->surfaces[i].Data.MemId;
-        s->nb_mem_ids_out = out_frames_hwctx->nb_surfaces;
-
-        err = MFXVideoCORE_SetFrameAllocator(s->session, &frame_allocator);
-        if (err != MFX_ERR_NONE)
-            return AVERROR_UNKNOWN;
-
-        par.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY | MFX_IOPATTERN_OUT_VIDEO_MEMORY;
-    }
-#if QSV_HAVE_OPAQUE
-    else {
-        s->surface_ptrs_in = av_calloc(in_frames_hwctx->nb_surfaces,
-                                       sizeof(*s->surface_ptrs_in));
-        if (!s->surface_ptrs_in)
-            return AVERROR(ENOMEM);
-        for (i = 0; i < in_frames_hwctx->nb_surfaces; i++)
-            s->surface_ptrs_in[i] = in_frames_hwctx->surfaces + i;
-        s->nb_surface_ptrs_in = in_frames_hwctx->nb_surfaces;
-
-        s->surface_ptrs_out = av_calloc(out_frames_hwctx->nb_surfaces,
-                                        sizeof(*s->surface_ptrs_out));
-        if (!s->surface_ptrs_out)
-            return AVERROR(ENOMEM);
-        for (i = 0; i < out_frames_hwctx->nb_surfaces; i++)
-            s->surface_ptrs_out[i] = out_frames_hwctx->surfaces + i;
-        s->nb_surface_ptrs_out = out_frames_hwctx->nb_surfaces;
-
-        s->opaque_alloc.In.Surfaces   = s->surface_ptrs_in;
-        s->opaque_alloc.In.NumSurface = s->nb_surface_ptrs_in;
-        s->opaque_alloc.In.Type       = in_frames_hwctx->frame_type;
-
-        s->opaque_alloc.Out.Surfaces   = s->surface_ptrs_out;
-        s->opaque_alloc.Out.NumSurface = s->nb_surface_ptrs_out;
-        s->opaque_alloc.Out.Type       = out_frames_hwctx->frame_type;
-
-        s->opaque_alloc.Header.BufferId = MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
-        s->opaque_alloc.Header.BufferSz = sizeof(s->opaque_alloc);
-
-        s->ext_buffers[s->num_ext_buf++] = (mfxExtBuffer*)&s->opaque_alloc;
-
-        par.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY | MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
-    }
-#endif
-
-    memset(&s->scale_conf, 0, sizeof(mfxExtVPPScaling));
-    s->scale_conf.Header.BufferId     = MFX_EXTBUFF_VPP_SCALING;
-    s->scale_conf.Header.BufferSz     = sizeof(mfxExtVPPScaling);
-    s->scale_conf.ScalingMode         = s->mode;
-    s->ext_buffers[s->num_ext_buf++]  = (mfxExtBuffer*)&s->scale_conf;
-    av_log(ctx, AV_LOG_VERBOSE, "Scaling mode: %d\n", s->mode);
-
-    par.ExtParam    = s->ext_buffers;
-    par.NumExtParam = s->num_ext_buf;
-
-    par.AsyncDepth = 1;    // TODO async
-
-    par.vpp.In  = in_frames_hwctx->surfaces[0].Info;
-    par.vpp.Out = out_frames_hwctx->surfaces[0].Info;
-
-    /* Apparently VPP requires the frame rate to be set to some value, otherwise
-     * init will fail (probably for the framerate conversion filter). Since we
-     * are only doing scaling here, we just invent an arbitrary
-     * value */
-    par.vpp.In.FrameRateExtN  = 25;
-    par.vpp.In.FrameRateExtD  = 1;
-    par.vpp.Out.FrameRateExtN = 25;
-    par.vpp.Out.FrameRateExtD = 1;
-
-    /* Print input memory mode */
-    ff_qsvvpp_print_iopattern(ctx, par.IOPattern & 0x0F, "VPP");
-    /* Print output memory mode */
-    ff_qsvvpp_print_iopattern(ctx, par.IOPattern & 0xF0, "VPP");
-    err = MFXVideoVPP_Init(s->session, &par);
-    if (err < 0)
-        return ff_qsvvpp_print_error(ctx, err,
-                                     "Error opening the VPP for scaling");
-    else if (err > 0) {
-        ff_qsvvpp_print_warning(ctx, err,
-                                "Warning in VPP initialization");
-        return AVERROR_UNKNOWN;
-    }
-
-    return 0;
-}
-
-static int init_scale_session(AVFilterContext *ctx, int in_width, int in_height,
-                              int out_width, int out_height)
-{
-    int ret;
-
-    qsvscale_uninit(ctx);
-
-    ret = init_out_pool(ctx, out_width, out_height);
-    if (ret < 0)
-        return ret;
-
-    ret = init_out_session(ctx);
-    if (ret < 0)
-        return ret;
-
-    return 0;
+    ff_qsvvpp_close(ctx);
 }
 
 static int qsvscale_config_props(AVFilterLink *outlink)
 {
     AVFilterContext *ctx = outlink->src;
     AVFilterLink *inlink = outlink->src->inputs[0];
-    QSVScaleContext  *s = ctx->priv;
+    QSVScaleContext   *s = ctx->priv;
+    QSVVPPParam    param = { NULL };
+    mfxExtBuffer    *ext_buf[1];
     int64_t w, h;
     double var_values[VARS_NB], res;
     char *expr;
     int ret;
+    enum AVPixelFormat in_format;
 
     var_values[VAR_IN_W]  = var_values[VAR_IW] = inlink->w;
     var_values[VAR_IN_H]  = var_values[VAR_IH] = inlink->h;
@@ -518,7 +177,30 @@ static int qsvscale_config_props(AVFilterLink *outlink)
     outlink->w = w;
     outlink->h = h;
 
-    ret = init_scale_session(ctx, inlink->w, inlink->h, w, h);
+    if (inlink->format == AV_PIX_FMT_QSV) {
+        if (!inlink->hw_frames_ctx || !inlink->hw_frames_ctx->data)
+            return AVERROR(EINVAL);
+        else
+            in_format = ((AVHWFramesContext*)inlink->hw_frames_ctx->data)->sw_format;
+    } else
+        in_format = inlink->format;
+
+    if (s->format == AV_PIX_FMT_NONE)
+        s->format = in_format;
+
+    outlink->frame_rate = inlink->frame_rate;
+    outlink->time_base = av_inv_q(inlink->frame_rate);
+    param.out_sw_format = s->format;
+
+    param.ext_buf                      = ext_buf;
+    memset(&s->scale_conf, 0, sizeof(mfxExtVPPScaling));
+    s->scale_conf.Header.BufferId      = MFX_EXTBUFF_VPP_SCALING;
+    s->scale_conf.Header.BufferSz      = sizeof(mfxExtVPPScaling);
+    s->scale_conf.ScalingMode          = s->mode;
+    param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&s->scale_conf;
+    av_log(ctx, AV_LOG_VERBOSE, "Scaling mode: %d\n", s->mode);
+
+    ret = ff_qsvvpp_init(ctx, &param);
     if (ret < 0)
         return ret;
 
@@ -542,67 +224,12 @@ fail:
 
 static int qsvscale_filter_frame(AVFilterLink *link, AVFrame *in)
 {
-    AVFilterContext             *ctx = link->dst;
-    QSVScaleContext               *s = ctx->priv;
-    AVFilterLink            *outlink = ctx->outputs[0];
-
-    mfxSyncPoint sync = NULL;
-    mfxStatus err;
-
-    AVFrame *out = NULL;
-    int ret = 0;
-
-    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
-    if (!out) {
-        ret = AVERROR(ENOMEM);
-        goto fail;
-    }
+    int               ret = 0;
+    AVFilterContext  *ctx = link->dst;
+    QSVVPPContext    *qsv = ctx->priv;
 
-    do {
-        err = MFXVideoVPP_RunFrameVPPAsync(s->session,
-                                           (mfxFrameSurface1*)in->data[3],
-                                           (mfxFrameSurface1*)out->data[3],
-                                           NULL, &sync);
-        if (err == MFX_WRN_DEVICE_BUSY)
-            av_usleep(1);
-    } while (err == MFX_WRN_DEVICE_BUSY);
-
-    if (err < 0) {
-        ret = ff_qsvvpp_print_error(ctx, err, "Error during scaling");
-        goto fail;
-    }
-
-    if (!sync) {
-        av_log(ctx, AV_LOG_ERROR, "No sync during scaling\n");
-        ret = AVERROR_UNKNOWN;
-        goto fail;
-    }
-
-    do {
-        err = MFXVideoCORE_SyncOperation(s->session, sync, 1000);
-    } while (err == MFX_WRN_IN_EXECUTION);
-    if (err < 0) {
-        ret = ff_qsvvpp_print_error(ctx, err, "Error synchronizing the operation");
-        goto fail;
-    }
-
-    ret = av_frame_copy_props(out, in);
-    if (ret < 0)
-        goto fail;
-
-    out->width  = outlink->w;
-    out->height = outlink->h;
-
-    av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
-              (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
-              (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
-              INT_MAX);
-
-    av_frame_free(&in);
-    return ff_filter_frame(outlink, out);
-fail:
+    ret = ff_qsvvpp_filter_frame(qsv, link, in);
     av_frame_free(&in);
-    av_frame_free(&out);
     return ret;
 }
 
-- 
2.17.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [FFmpeg-devel] [PATCH 2/3] lavfi/scale_qsv: re-use VPPContext for scale_qsv filter
  2023-01-17  6:20 [FFmpeg-devel] [PATCH 1/3] lavfi/scale_qsv: simplify scale_qsv filter Xiang, Haihao
@ 2023-01-17  6:20 ` Xiang, Haihao
  2023-01-17  6:20 ` [FFmpeg-devel] [PATCH 3/3] lavfi/vpp_qsv: factor common QSV filter definition Xiang, Haihao
  2023-01-28  6:04 ` [FFmpeg-devel] [PATCH 1/3] lavfi/scale_qsv: simplify scale_qsv filter Xiang, Haihao
  2 siblings, 0 replies; 4+ messages in thread
From: Xiang, Haihao @ 2023-01-17  6:20 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Haihao Xiang

From: Haihao Xiang <haihao.xiang@intel.com>

QSVScaleContext and VPPContext have the same base context, and all
features in scale_qsv are implemented in vpp_qsv filter,  so scale_qsv
can be taken as a special case of vpp_qsv filter, and we may use
VPPContext with a different option array, preinit callback and supported
pixel formats to implement scale_qsv then remove QSVScaleContext

Signed-off-by: Haihao Xiang <haihao.xiang@intel.com>
---
 libavfilter/Makefile       |   2 +-
 libavfilter/vf_scale_qsv.c | 290 -------------------------------------
 libavfilter/vf_vpp_qsv.c   | 186 ++++++++++++++++--------
 3 files changed, 124 insertions(+), 354 deletions(-)
 delete mode 100644 libavfilter/vf_scale_qsv.c

diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 211ff4daaa..89e27fc4fa 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -446,7 +446,7 @@ OBJS-$(CONFIG_SCALE_FILTER)                  += vf_scale.o scale_eval.o
 OBJS-$(CONFIG_SCALE_CUDA_FILTER)             += vf_scale_cuda.o scale_eval.o \
                                                 vf_scale_cuda.ptx.o cuda/load_helper.o
 OBJS-$(CONFIG_SCALE_NPP_FILTER)              += vf_scale_npp.o scale_eval.o
-OBJS-$(CONFIG_SCALE_QSV_FILTER)              += vf_scale_qsv.o
+OBJS-$(CONFIG_SCALE_QSV_FILTER)              += vf_vpp_qsv.o
 OBJS-$(CONFIG_SCALE_VAAPI_FILTER)            += vf_scale_vaapi.o scale_eval.o vaapi_vpp.o
 OBJS-$(CONFIG_SCALE_VULKAN_FILTER)           += vf_scale_vulkan.o vulkan.o vulkan_filter.o
 OBJS-$(CONFIG_SCALE2REF_FILTER)              += vf_scale.o scale_eval.o
diff --git a/libavfilter/vf_scale_qsv.c b/libavfilter/vf_scale_qsv.c
deleted file mode 100644
index 8eb8bc9ec1..0000000000
--- a/libavfilter/vf_scale_qsv.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-/**
- * @file
- * scale video filter - QSV
- */
-
-#include <mfxvideo.h>
-
-#include <stdio.h>
-#include <string.h>
-
-#include "libavutil/avstring.h"
-#include "libavutil/common.h"
-#include "libavutil/eval.h"
-#include "libavutil/hwcontext.h"
-#include "libavutil/hwcontext_qsv.h"
-#include "libavutil/internal.h"
-#include "libavutil/mathematics.h"
-#include "libavutil/opt.h"
-#include "libavutil/pixdesc.h"
-#include "libavutil/time.h"
-#include "libavfilter/qsvvpp.h"
-
-#include "avfilter.h"
-#include "formats.h"
-#include "internal.h"
-#include "video.h"
-
-static const char *const var_names[] = {
-    "in_w",   "iw",
-    "in_h",   "ih",
-    "out_w",  "ow",
-    "out_h",  "oh",
-    "a", "dar",
-    "sar",
-    NULL
-};
-
-enum var_name {
-    VAR_IN_W,   VAR_IW,
-    VAR_IN_H,   VAR_IH,
-    VAR_OUT_W,  VAR_OW,
-    VAR_OUT_H,  VAR_OH,
-    VAR_A, VAR_DAR,
-    VAR_SAR,
-    VARS_NB
-};
-
-#define MFX_IMPL_VIA_MASK(impl) (0x0f00 & (impl))
-
-typedef struct QSVScaleContext {
-    QSVVPPContext qsv;
-
-    mfxExtVPPScaling         scale_conf;
-    int                      mode;
-
-    /**
-     * New dimensions. Special values are:
-     *   0 = original width/height
-     *  -1 = keep original aspect
-     */
-    int w, h;
-
-    /**
-     * Output sw format. AV_PIX_FMT_NONE for no conversion.
-     */
-    enum AVPixelFormat format;
-
-    char *w_expr;               ///< width  expression string
-    char *h_expr;               ///< height expression string
-    char *format_str;
-} QSVScaleContext;
-
-static av_cold int qsvscale_init(AVFilterContext *ctx)
-{
-    QSVScaleContext *s = ctx->priv;
-
-    if (!strcmp(s->format_str, "same")) {
-        s->format = AV_PIX_FMT_NONE;
-    } else {
-        s->format = av_get_pix_fmt(s->format_str);
-        if (s->format == AV_PIX_FMT_NONE) {
-            av_log(ctx, AV_LOG_ERROR, "Unrecognized pixel format: %s\n", s->format_str);
-            return AVERROR(EINVAL);
-        }
-    }
-
-    return 0;
-}
-
-static av_cold void qsvscale_uninit(AVFilterContext *ctx)
-{
-    ff_qsvvpp_close(ctx);
-}
-
-static int qsvscale_config_props(AVFilterLink *outlink)
-{
-    AVFilterContext *ctx = outlink->src;
-    AVFilterLink *inlink = outlink->src->inputs[0];
-    QSVScaleContext   *s = ctx->priv;
-    QSVVPPParam    param = { NULL };
-    mfxExtBuffer    *ext_buf[1];
-    int64_t w, h;
-    double var_values[VARS_NB], res;
-    char *expr;
-    int ret;
-    enum AVPixelFormat in_format;
-
-    var_values[VAR_IN_W]  = var_values[VAR_IW] = inlink->w;
-    var_values[VAR_IN_H]  = var_values[VAR_IH] = inlink->h;
-    var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
-    var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
-    var_values[VAR_A]     = (double) inlink->w / inlink->h;
-    var_values[VAR_SAR]   = inlink->sample_aspect_ratio.num ?
-        (double) inlink->sample_aspect_ratio.num / inlink->sample_aspect_ratio.den : 1;
-    var_values[VAR_DAR]   = var_values[VAR_A] * var_values[VAR_SAR];
-
-    /* evaluate width and height */
-    av_expr_parse_and_eval(&res, (expr = s->w_expr),
-                           var_names, var_values,
-                           NULL, NULL, NULL, NULL, NULL, 0, ctx);
-    s->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
-    if ((ret = av_expr_parse_and_eval(&res, (expr = s->h_expr),
-                                      var_names, var_values,
-                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
-        goto fail;
-    s->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
-    /* evaluate again the width, as it may depend on the output height */
-    if ((ret = av_expr_parse_and_eval(&res, (expr = s->w_expr),
-                                      var_names, var_values,
-                                      NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
-        goto fail;
-    s->w = res;
-
-    w = s->w;
-    h = s->h;
-
-    /* sanity check params */
-    if (w <  -1 || h <  -1) {
-        av_log(ctx, AV_LOG_ERROR, "Size values less than -1 are not acceptable.\n");
-        return AVERROR(EINVAL);
-    }
-    if (w == -1 && h == -1)
-        s->w = s->h = 0;
-
-    if (!(w = s->w))
-        w = inlink->w;
-    if (!(h = s->h))
-        h = inlink->h;
-    if (w == -1)
-        w = av_rescale(h, inlink->w, inlink->h);
-    if (h == -1)
-        h = av_rescale(w, inlink->h, inlink->w);
-
-    if (w > INT_MAX || h > INT_MAX ||
-        (h * inlink->w) > INT_MAX  ||
-        (w * inlink->h) > INT_MAX)
-        av_log(ctx, AV_LOG_ERROR, "Rescaled value for width or height is too big.\n");
-
-    outlink->w = w;
-    outlink->h = h;
-
-    if (inlink->format == AV_PIX_FMT_QSV) {
-        if (!inlink->hw_frames_ctx || !inlink->hw_frames_ctx->data)
-            return AVERROR(EINVAL);
-        else
-            in_format = ((AVHWFramesContext*)inlink->hw_frames_ctx->data)->sw_format;
-    } else
-        in_format = inlink->format;
-
-    if (s->format == AV_PIX_FMT_NONE)
-        s->format = in_format;
-
-    outlink->frame_rate = inlink->frame_rate;
-    outlink->time_base = av_inv_q(inlink->frame_rate);
-    param.out_sw_format = s->format;
-
-    param.ext_buf                      = ext_buf;
-    memset(&s->scale_conf, 0, sizeof(mfxExtVPPScaling));
-    s->scale_conf.Header.BufferId      = MFX_EXTBUFF_VPP_SCALING;
-    s->scale_conf.Header.BufferSz      = sizeof(mfxExtVPPScaling);
-    s->scale_conf.ScalingMode          = s->mode;
-    param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&s->scale_conf;
-    av_log(ctx, AV_LOG_VERBOSE, "Scaling mode: %d\n", s->mode);
-
-    ret = ff_qsvvpp_init(ctx, &param);
-    if (ret < 0)
-        return ret;
-
-    av_log(ctx, AV_LOG_VERBOSE, "w:%d h:%d -> w:%d h:%d\n",
-           inlink->w, inlink->h, outlink->w, outlink->h);
-
-    if (inlink->sample_aspect_ratio.num)
-        outlink->sample_aspect_ratio = av_mul_q((AVRational){outlink->h*inlink->w,
-                                                             outlink->w*inlink->h},
-                                                inlink->sample_aspect_ratio);
-    else
-        outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
-
-    return 0;
-
-fail:
-    av_log(ctx, AV_LOG_ERROR,
-           "Error when evaluating the expression '%s'\n", expr);
-    return ret;
-}
-
-static int qsvscale_filter_frame(AVFilterLink *link, AVFrame *in)
-{
-    int               ret = 0;
-    AVFilterContext  *ctx = link->dst;
-    QSVVPPContext    *qsv = ctx->priv;
-
-    ret = ff_qsvvpp_filter_frame(qsv, link, in);
-    av_frame_free(&in);
-    return ret;
-}
-
-#define OFFSET(x) offsetof(QSVScaleContext, x)
-#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
-static const AVOption options[] = {
-    { "w",      "Output video width",  OFFSET(w_expr),     AV_OPT_TYPE_STRING, { .str = "iw"   }, .flags = FLAGS },
-    { "h",      "Output video height", OFFSET(h_expr),     AV_OPT_TYPE_STRING, { .str = "ih"   }, .flags = FLAGS },
-    { "format", "Output pixel format", OFFSET(format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
-
-    { "mode",      "set scaling mode",    OFFSET(mode),    AV_OPT_TYPE_INT,    { .i64 = MFX_SCALING_MODE_DEFAULT}, MFX_SCALING_MODE_DEFAULT, MFX_SCALING_MODE_QUALITY, FLAGS, "mode"},
-    { "low_power", "low power mode",        0,             AV_OPT_TYPE_CONST,  { .i64 = MFX_SCALING_MODE_LOWPOWER}, INT_MIN, INT_MAX, FLAGS, "mode"},
-    { "hq",        "high quality mode",     0,             AV_OPT_TYPE_CONST,  { .i64 = MFX_SCALING_MODE_QUALITY},  INT_MIN, INT_MAX, FLAGS, "mode"},
-
-    { NULL },
-};
-
-static const AVClass qsvscale_class = {
-    .class_name = "scale_qsv",
-    .item_name  = av_default_item_name,
-    .option     = options,
-    .version    = LIBAVUTIL_VERSION_INT,
-};
-
-static const AVFilterPad qsvscale_inputs[] = {
-    {
-        .name         = "default",
-        .type         = AVMEDIA_TYPE_VIDEO,
-        .filter_frame = qsvscale_filter_frame,
-        .get_buffer.video = ff_qsvvpp_get_video_buffer,
-    },
-};
-
-static const AVFilterPad qsvscale_outputs[] = {
-    {
-        .name         = "default",
-        .type         = AVMEDIA_TYPE_VIDEO,
-        .config_props = qsvscale_config_props,
-    },
-};
-
-const AVFilter ff_vf_scale_qsv = {
-    .name      = "scale_qsv",
-    .description = NULL_IF_CONFIG_SMALL("QuickSync video scaling and format conversion"),
-
-    .init          = qsvscale_init,
-    .uninit        = qsvscale_uninit,
-
-    .priv_size = sizeof(QSVScaleContext),
-    .priv_class = &qsvscale_class,
-
-    FILTER_INPUTS(qsvscale_inputs),
-    FILTER_OUTPUTS(qsvscale_outputs),
-
-    FILTER_SINGLE_PIXFMT(AV_PIX_FMT_QSV),
-
-    .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
-};
diff --git a/libavfilter/vf_vpp_qsv.c b/libavfilter/vf_vpp_qsv.c
index 4a1af7146a..34732bb2c9 100644
--- a/libavfilter/vf_vpp_qsv.c
+++ b/libavfilter/vf_vpp_qsv.c
@@ -23,6 +23,8 @@
 
 #include <float.h>
 
+#include "config_components.h"
+
 #include "libavutil/opt.h"
 #include "libavutil/eval.h"
 #include "libavutil/hwcontext.h"
@@ -101,48 +103,6 @@ typedef struct VPPContext{
     int has_passthrough;        /* apply pass through mode if possible */
 } VPPContext;
 
-static const AVOption options[] = {
-    { "deinterlace", "deinterlace mode: 0=off, 1=bob, 2=advanced", OFFSET(deinterlace), AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, MFX_DEINTERLACING_ADVANCED, .flags = FLAGS, "deinterlace" },
-    { "bob",         "Bob deinterlace mode.",                      0,                   AV_OPT_TYPE_CONST,    { .i64 = MFX_DEINTERLACING_BOB },            .flags = FLAGS, "deinterlace" },
-    { "advanced",    "Advanced deinterlace mode. ",                0,                   AV_OPT_TYPE_CONST,    { .i64 = MFX_DEINTERLACING_ADVANCED },       .flags = FLAGS, "deinterlace" },
-
-    { "denoise",     "denoise level [0, 100]",       OFFSET(denoise),     AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 100, .flags = FLAGS },
-    { "detail",      "enhancement level [0, 100]",   OFFSET(detail),      AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 100, .flags = FLAGS },
-    { "framerate",   "output framerate",             OFFSET(framerate),   AV_OPT_TYPE_RATIONAL, { .dbl = 0.0 },0, DBL_MAX, .flags = FLAGS },
-    { "procamp",     "Enable ProcAmp",               OFFSET(procamp),     AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 1, .flags = FLAGS},
-    { "hue",         "ProcAmp hue",                  OFFSET(hue),         AV_OPT_TYPE_FLOAT,    { .dbl = 0.0 }, -180.0, 180.0, .flags = FLAGS},
-    { "saturation",  "ProcAmp saturation",           OFFSET(saturation),  AV_OPT_TYPE_FLOAT,    { .dbl = 1.0 }, 0.0, 10.0, .flags = FLAGS},
-    { "contrast",    "ProcAmp contrast",             OFFSET(contrast),    AV_OPT_TYPE_FLOAT,    { .dbl = 1.0 }, 0.0, 10.0, .flags = FLAGS},
-    { "brightness",  "ProcAmp brightness",           OFFSET(brightness),  AV_OPT_TYPE_FLOAT,    { .dbl = 0.0 }, -100.0, 100.0, .flags = FLAGS},
-
-    { "transpose",  "set transpose direction",       OFFSET(transpose),   AV_OPT_TYPE_INT,      { .i64 = -1 }, -1, 6, FLAGS, "transpose"},
-        { "cclock_hflip",  "rotate counter-clockwise with horizontal flip",  0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, .flags=FLAGS, .unit = "transpose" },
-        { "clock",         "rotate clockwise",                               0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK       }, .flags=FLAGS, .unit = "transpose" },
-        { "cclock",        "rotate counter-clockwise",                       0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK      }, .flags=FLAGS, .unit = "transpose" },
-        { "clock_hflip",   "rotate clockwise with horizontal flip",          0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP  }, .flags=FLAGS, .unit = "transpose" },
-        { "reversal",      "rotate by half-turn",                            0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_REVERSAL    }, .flags=FLAGS, .unit = "transpose" },
-        { "hflip",         "flip horizontally",                              0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_HFLIP       }, .flags=FLAGS, .unit = "transpose" },
-        { "vflip",         "flip vertically",                                0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_VFLIP       }, .flags=FLAGS, .unit = "transpose" },
-
-    { "cw",   "set the width crop area expression",   OFFSET(cw), AV_OPT_TYPE_STRING, { .str = "iw" }, 0, 0, FLAGS },
-    { "ch",   "set the height crop area expression",  OFFSET(ch), AV_OPT_TYPE_STRING, { .str = "ih" }, 0, 0, FLAGS },
-    { "cx",   "set the x crop area expression",       OFFSET(cx), AV_OPT_TYPE_STRING, { .str = "(in_w-out_w)/2" }, 0, 0, FLAGS },
-    { "cy",   "set the y crop area expression",       OFFSET(cy), AV_OPT_TYPE_STRING, { .str = "(in_h-out_h)/2" }, 0, 0, FLAGS },
-
-    { "w",      "Output video width(0=input video width, -1=keep input video aspect)",  OFFSET(ow), AV_OPT_TYPE_STRING, { .str="cw" }, 0, 255, .flags = FLAGS },
-    { "width",  "Output video width(0=input video width, -1=keep input video aspect)",  OFFSET(ow), AV_OPT_TYPE_STRING, { .str="cw" }, 0, 255, .flags = FLAGS },
-    { "h",      "Output video height(0=input video height, -1=keep input video aspect)", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS },
-    { "height", "Output video height(0=input video height, -1=keep input video aspect)", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS },
-    { "format", "Output pixel format", OFFSET(output_format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
-    { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, .flags = FLAGS },
-    { "scale_mode", "scale & format conversion mode: 0=auto, 1=low power, 2=high quality", OFFSET(scale_mode), AV_OPT_TYPE_INT, { .i64 = MFX_SCALING_MODE_DEFAULT }, MFX_SCALING_MODE_DEFAULT, MFX_SCALING_MODE_QUALITY, .flags = FLAGS, "scale mode" },
-    { "auto",      "auto mode",             0,    AV_OPT_TYPE_CONST,  { .i64 = MFX_SCALING_MODE_DEFAULT},  INT_MIN, INT_MAX, FLAGS, "scale mode"},
-    { "low_power", "low power mode",        0,    AV_OPT_TYPE_CONST,  { .i64 = MFX_SCALING_MODE_LOWPOWER}, INT_MIN, INT_MAX, FLAGS, "scale mode"},
-    { "hq",        "high quality mode",     0,    AV_OPT_TYPE_CONST,  { .i64 = MFX_SCALING_MODE_QUALITY},  INT_MIN, INT_MAX, FLAGS, "scale mode"},
-
-    { NULL }
-};
-
 static const char *const var_names[] = {
     "iw", "in_w",
     "ih", "in_h",
@@ -633,6 +593,30 @@ eof:
     return 0;
 }
 
+static av_cold void vpp_uninit(AVFilterContext *ctx)
+{
+    ff_qsvvpp_close(ctx);
+}
+
+static const AVFilterPad vpp_inputs[] = {
+    {
+        .name          = "default",
+        .type          = AVMEDIA_TYPE_VIDEO,
+        .config_props  = config_input,
+        .get_buffer.video = ff_qsvvpp_get_video_buffer,
+    },
+};
+
+static const AVFilterPad vpp_outputs[] = {
+    {
+        .name          = "default",
+        .type          = AVMEDIA_TYPE_VIDEO,
+        .config_props  = config_output,
+    },
+};
+
+#if CONFIG_VPP_QSV_FILTER
+
 static int query_formats(AVFilterContext *ctx)
 {
     int ret;
@@ -660,10 +644,47 @@ static int query_formats(AVFilterContext *ctx)
                           &ctx->outputs[0]->incfg.formats);
 }
 
-static av_cold void vpp_uninit(AVFilterContext *ctx)
-{
-    ff_qsvvpp_close(ctx);
-}
+static const AVOption options[] = {
+    { "deinterlace", "deinterlace mode: 0=off, 1=bob, 2=advanced", OFFSET(deinterlace), AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, MFX_DEINTERLACING_ADVANCED, .flags = FLAGS, "deinterlace" },
+    { "bob",         "Bob deinterlace mode.",                      0,                   AV_OPT_TYPE_CONST,    { .i64 = MFX_DEINTERLACING_BOB },            .flags = FLAGS, "deinterlace" },
+    { "advanced",    "Advanced deinterlace mode. ",                0,                   AV_OPT_TYPE_CONST,    { .i64 = MFX_DEINTERLACING_ADVANCED },       .flags = FLAGS, "deinterlace" },
+
+    { "denoise",     "denoise level [0, 100]",       OFFSET(denoise),     AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 100, .flags = FLAGS },
+    { "detail",      "enhancement level [0, 100]",   OFFSET(detail),      AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 100, .flags = FLAGS },
+    { "framerate",   "output framerate",             OFFSET(framerate),   AV_OPT_TYPE_RATIONAL, { .dbl = 0.0 },0, DBL_MAX, .flags = FLAGS },
+    { "procamp",     "Enable ProcAmp",               OFFSET(procamp),     AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, 1, .flags = FLAGS},
+    { "hue",         "ProcAmp hue",                  OFFSET(hue),         AV_OPT_TYPE_FLOAT,    { .dbl = 0.0 }, -180.0, 180.0, .flags = FLAGS},
+    { "saturation",  "ProcAmp saturation",           OFFSET(saturation),  AV_OPT_TYPE_FLOAT,    { .dbl = 1.0 }, 0.0, 10.0, .flags = FLAGS},
+    { "contrast",    "ProcAmp contrast",             OFFSET(contrast),    AV_OPT_TYPE_FLOAT,    { .dbl = 1.0 }, 0.0, 10.0, .flags = FLAGS},
+    { "brightness",  "ProcAmp brightness",           OFFSET(brightness),  AV_OPT_TYPE_FLOAT,    { .dbl = 0.0 }, -100.0, 100.0, .flags = FLAGS},
+
+    { "transpose",  "set transpose direction",       OFFSET(transpose),   AV_OPT_TYPE_INT,      { .i64 = -1 }, -1, 6, FLAGS, "transpose"},
+        { "cclock_hflip",  "rotate counter-clockwise with horizontal flip",  0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK_FLIP }, .flags=FLAGS, .unit = "transpose" },
+        { "clock",         "rotate clockwise",                               0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK       }, .flags=FLAGS, .unit = "transpose" },
+        { "cclock",        "rotate counter-clockwise",                       0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CCLOCK      }, .flags=FLAGS, .unit = "transpose" },
+        { "clock_hflip",   "rotate clockwise with horizontal flip",          0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_CLOCK_FLIP  }, .flags=FLAGS, .unit = "transpose" },
+        { "reversal",      "rotate by half-turn",                            0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_REVERSAL    }, .flags=FLAGS, .unit = "transpose" },
+        { "hflip",         "flip horizontally",                              0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_HFLIP       }, .flags=FLAGS, .unit = "transpose" },
+        { "vflip",         "flip vertically",                                0, AV_OPT_TYPE_CONST, { .i64 = TRANSPOSE_VFLIP       }, .flags=FLAGS, .unit = "transpose" },
+
+    { "cw",   "set the width crop area expression",   OFFSET(cw), AV_OPT_TYPE_STRING, { .str = "iw" }, 0, 0, FLAGS },
+    { "ch",   "set the height crop area expression",  OFFSET(ch), AV_OPT_TYPE_STRING, { .str = "ih" }, 0, 0, FLAGS },
+    { "cx",   "set the x crop area expression",       OFFSET(cx), AV_OPT_TYPE_STRING, { .str = "(in_w-out_w)/2" }, 0, 0, FLAGS },
+    { "cy",   "set the y crop area expression",       OFFSET(cy), AV_OPT_TYPE_STRING, { .str = "(in_h-out_h)/2" }, 0, 0, FLAGS },
+
+    { "w",      "Output video width(0=input video width, -1=keep input video aspect)",  OFFSET(ow), AV_OPT_TYPE_STRING, { .str="cw" }, 0, 255, .flags = FLAGS },
+    { "width",  "Output video width(0=input video width, -1=keep input video aspect)",  OFFSET(ow), AV_OPT_TYPE_STRING, { .str="cw" }, 0, 255, .flags = FLAGS },
+    { "h",      "Output video height(0=input video height, -1=keep input video aspect)", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS },
+    { "height", "Output video height(0=input video height, -1=keep input video aspect)", OFFSET(oh), AV_OPT_TYPE_STRING, { .str="w*ch/cw" }, 0, 255, .flags = FLAGS },
+    { "format", "Output pixel format", OFFSET(output_format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
+    { "async_depth", "Internal parallelization depth, the higher the value the higher the latency.", OFFSET(qsv.async_depth), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, .flags = FLAGS },
+    { "scale_mode", "scale & format conversion mode: 0=auto, 1=low power, 2=high quality", OFFSET(scale_mode), AV_OPT_TYPE_INT, { .i64 = MFX_SCALING_MODE_DEFAULT }, MFX_SCALING_MODE_DEFAULT, MFX_SCALING_MODE_QUALITY, .flags = FLAGS, "scale mode" },
+    { "auto",      "auto mode",             0,    AV_OPT_TYPE_CONST,  { .i64 = MFX_SCALING_MODE_DEFAULT},  INT_MIN, INT_MAX, FLAGS, "scale mode"},
+    { "low_power", "low power mode",        0,    AV_OPT_TYPE_CONST,  { .i64 = MFX_SCALING_MODE_LOWPOWER}, INT_MIN, INT_MAX, FLAGS, "scale mode"},
+    { "hq",        "high quality mode",     0,    AV_OPT_TYPE_CONST,  { .i64 = MFX_SCALING_MODE_QUALITY},  INT_MIN, INT_MAX, FLAGS, "scale mode"},
+
+    { NULL }
+};
 
 static const AVClass vpp_class = {
     .class_name = "vpp_qsv",
@@ -672,23 +693,6 @@ static const AVClass vpp_class = {
     .version    = LIBAVUTIL_VERSION_INT,
 };
 
-static const AVFilterPad vpp_inputs[] = {
-    {
-        .name          = "default",
-        .type          = AVMEDIA_TYPE_VIDEO,
-        .config_props  = config_input,
-        .get_buffer.video = ff_qsvvpp_get_video_buffer,
-    },
-};
-
-static const AVFilterPad vpp_outputs[] = {
-    {
-        .name          = "default",
-        .type          = AVMEDIA_TYPE_VIDEO,
-        .config_props  = config_output,
-    },
-};
-
 const AVFilter ff_vf_vpp_qsv = {
     .name          = "vpp_qsv",
     .description   = NULL_IF_CONFIG_SMALL("Quick Sync Video VPP."),
@@ -703,3 +707,59 @@ const AVFilter ff_vf_vpp_qsv = {
     .priv_class    = &vpp_class,
     .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
 };
+
+#endif
+
+#if CONFIG_SCALE_QSV_FILTER
+
+static const AVOption qsvscale_options[] = {
+    { "w",      "Output video width(0=input video width, -1=keep input video aspect)",  OFFSET(ow), AV_OPT_TYPE_STRING, { .str = "iw"   }, .flags = FLAGS },
+    { "h",      "Output video height(0=input video height, -1=keep input video aspect)", OFFSET(oh), AV_OPT_TYPE_STRING, { .str = "ih"   }, .flags = FLAGS },
+    { "format", "Output pixel format", OFFSET(output_format_str), AV_OPT_TYPE_STRING, { .str = "same" }, .flags = FLAGS },
+
+    { "mode",      "set scaling mode",    OFFSET(scale_mode),    AV_OPT_TYPE_INT,    { .i64 = MFX_SCALING_MODE_DEFAULT}, MFX_SCALING_MODE_DEFAULT, MFX_SCALING_MODE_QUALITY, FLAGS, "mode"},
+    { "low_power", "low power mode",        0,             AV_OPT_TYPE_CONST,  { .i64 = MFX_SCALING_MODE_LOWPOWER}, INT_MIN, INT_MAX, FLAGS, "mode"},
+    { "hq",        "high quality mode",     0,             AV_OPT_TYPE_CONST,  { .i64 = MFX_SCALING_MODE_QUALITY},  INT_MIN, INT_MAX, FLAGS, "mode"},
+
+    { NULL },
+};
+
+static av_cold int qsvscale_preinit(AVFilterContext *ctx)
+{
+    VPPContext  *vpp  = ctx->priv;
+
+    vpp_preinit(ctx);
+    vpp->has_passthrough = 0;
+
+    return 0;
+}
+
+static const AVClass qsvscale_class = {
+    .class_name = "scale_qsv",
+    .item_name  = av_default_item_name,
+    .option     = qsvscale_options,
+    .version    = LIBAVUTIL_VERSION_INT,
+};
+
+const AVFilter ff_vf_scale_qsv = {
+    .name               = "scale_qsv",
+    .description        = NULL_IF_CONFIG_SMALL("Quick Sync Video scaling and format conversion"),
+
+    .preinit            = qsvscale_preinit,
+    .init               = vpp_init,
+    .uninit             = vpp_uninit,
+
+    .priv_size          = sizeof(VPPContext),
+    .priv_class         = &qsvscale_class,
+
+    FILTER_INPUTS(vpp_inputs),
+    FILTER_OUTPUTS(vpp_outputs),
+
+    FILTER_SINGLE_PIXFMT(AV_PIX_FMT_QSV),
+
+    .activate           = activate,
+
+    .flags_internal     = FF_FILTER_FLAG_HWFRAME_AWARE,
+};
+
+#endif
-- 
2.17.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [FFmpeg-devel] [PATCH 3/3] lavfi/vpp_qsv: factor common QSV filter definition
  2023-01-17  6:20 [FFmpeg-devel] [PATCH 1/3] lavfi/scale_qsv: simplify scale_qsv filter Xiang, Haihao
  2023-01-17  6:20 ` [FFmpeg-devel] [PATCH 2/3] lavfi/scale_qsv: re-use VPPContext for " Xiang, Haihao
@ 2023-01-17  6:20 ` Xiang, Haihao
  2023-01-28  6:04 ` [FFmpeg-devel] [PATCH 1/3] lavfi/scale_qsv: simplify scale_qsv filter Xiang, Haihao
  2 siblings, 0 replies; 4+ messages in thread
From: Xiang, Haihao @ 2023-01-17  6:20 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: Haihao Xiang

From: Haihao Xiang <haihao.xiang@intel.com>

Signed-off-by: Haihao Xiang <haihao.xiang@intel.com>
---
 libavfilter/vf_vpp_qsv.c | 126 ++++++++++++++++-----------------------
 1 file changed, 51 insertions(+), 75 deletions(-)

diff --git a/libavfilter/vf_vpp_qsv.c b/libavfilter/vf_vpp_qsv.c
index 34732bb2c9..34e530e244 100644
--- a/libavfilter/vf_vpp_qsv.c
+++ b/libavfilter/vf_vpp_qsv.c
@@ -615,36 +615,31 @@ static const AVFilterPad vpp_outputs[] = {
     },
 };
 
-#if CONFIG_VPP_QSV_FILTER
-
-static int query_formats(AVFilterContext *ctx)
-{
-    int ret;
-    static const enum AVPixelFormat in_pix_fmts[] = {
-        AV_PIX_FMT_YUV420P,
-        AV_PIX_FMT_NV12,
-        AV_PIX_FMT_YUYV422,
-        AV_PIX_FMT_RGB32,
-        AV_PIX_FMT_P010,
-        AV_PIX_FMT_QSV,
-        AV_PIX_FMT_NONE
-    };
-    static const enum AVPixelFormat out_pix_fmts[] = {
-        AV_PIX_FMT_NV12,
-        AV_PIX_FMT_P010,
-        AV_PIX_FMT_QSV,
-        AV_PIX_FMT_NONE
-    };
+#define DEFINE_QSV_FILTER(x, sn, ln, fmts) \
+static const AVClass x##_class = { \
+    .class_name = #sn "_qsv", \
+    .item_name  = av_default_item_name, \
+    .option     = x##_options, \
+    .version    = LIBAVUTIL_VERSION_INT, \
+}; \
+const AVFilter ff_vf_##sn##_qsv = { \
+    .name           = #sn "_qsv", \
+    .description    = NULL_IF_CONFIG_SMALL("Quick Sync Video " #ln), \
+    .preinit        = x##_preinit, \
+    .init           = vpp_init, \
+    .uninit         = vpp_uninit, \
+    .priv_size      = sizeof(VPPContext), \
+    .priv_class     = &x##_class, \
+    FILTER_INPUTS(vpp_inputs), \
+    FILTER_OUTPUTS(vpp_outputs), \
+    fmts, \
+    .activate       = activate, \
+    .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, \
+};
 
-    ret = ff_formats_ref(ff_make_format_list(in_pix_fmts),
-                         &ctx->inputs[0]->outcfg.formats);
-    if (ret < 0)
-        return ret;
-    return ff_formats_ref(ff_make_format_list(out_pix_fmts),
-                          &ctx->outputs[0]->incfg.formats);
-}
+#if CONFIG_VPP_QSV_FILTER
 
-static const AVOption options[] = {
+static const AVOption vpp_options[] = {
     { "deinterlace", "deinterlace mode: 0=off, 1=bob, 2=advanced", OFFSET(deinterlace), AV_OPT_TYPE_INT,      { .i64 = 0 }, 0, MFX_DEINTERLACING_ADVANCED, .flags = FLAGS, "deinterlace" },
     { "bob",         "Bob deinterlace mode.",                      0,                   AV_OPT_TYPE_CONST,    { .i64 = MFX_DEINTERLACING_BOB },            .flags = FLAGS, "deinterlace" },
     { "advanced",    "Advanced deinterlace mode. ",                0,                   AV_OPT_TYPE_CONST,    { .i64 = MFX_DEINTERLACING_ADVANCED },       .flags = FLAGS, "deinterlace" },
@@ -686,27 +681,34 @@ static const AVOption options[] = {
     { NULL }
 };
 
-static const AVClass vpp_class = {
-    .class_name = "vpp_qsv",
-    .item_name  = av_default_item_name,
-    .option     = options,
-    .version    = LIBAVUTIL_VERSION_INT,
-};
+static int vpp_query_formats(AVFilterContext *ctx)
+{
+    int ret;
+    static const enum AVPixelFormat in_pix_fmts[] = {
+        AV_PIX_FMT_YUV420P,
+        AV_PIX_FMT_NV12,
+        AV_PIX_FMT_YUYV422,
+        AV_PIX_FMT_RGB32,
+        AV_PIX_FMT_P010,
+        AV_PIX_FMT_QSV,
+        AV_PIX_FMT_NONE
+    };
+    static const enum AVPixelFormat out_pix_fmts[] = {
+        AV_PIX_FMT_NV12,
+        AV_PIX_FMT_P010,
+        AV_PIX_FMT_QSV,
+        AV_PIX_FMT_NONE
+    };
 
-const AVFilter ff_vf_vpp_qsv = {
-    .name          = "vpp_qsv",
-    .description   = NULL_IF_CONFIG_SMALL("Quick Sync Video VPP."),
-    .priv_size     = sizeof(VPPContext),
-    .preinit       = vpp_preinit,
-    .init          = vpp_init,
-    .uninit        = vpp_uninit,
-    FILTER_INPUTS(vpp_inputs),
-    FILTER_OUTPUTS(vpp_outputs),
-    FILTER_QUERY_FUNC(query_formats),
-    .activate      = activate,
-    .priv_class    = &vpp_class,
-    .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
-};
+    ret = ff_formats_ref(ff_make_format_list(in_pix_fmts),
+                         &ctx->inputs[0]->outcfg.formats);
+    if (ret < 0)
+        return ret;
+    return ff_formats_ref(ff_make_format_list(out_pix_fmts),
+                          &ctx->outputs[0]->incfg.formats);
+}
+
+DEFINE_QSV_FILTER(vpp, vpp, "VPP", FILTER_QUERY_FUNC(vpp_query_formats));
 
 #endif
 
@@ -734,32 +736,6 @@ static av_cold int qsvscale_preinit(AVFilterContext *ctx)
     return 0;
 }
 
-static const AVClass qsvscale_class = {
-    .class_name = "scale_qsv",
-    .item_name  = av_default_item_name,
-    .option     = qsvscale_options,
-    .version    = LIBAVUTIL_VERSION_INT,
-};
-
-const AVFilter ff_vf_scale_qsv = {
-    .name               = "scale_qsv",
-    .description        = NULL_IF_CONFIG_SMALL("Quick Sync Video scaling and format conversion"),
-
-    .preinit            = qsvscale_preinit,
-    .init               = vpp_init,
-    .uninit             = vpp_uninit,
-
-    .priv_size          = sizeof(VPPContext),
-    .priv_class         = &qsvscale_class,
-
-    FILTER_INPUTS(vpp_inputs),
-    FILTER_OUTPUTS(vpp_outputs),
-
-    FILTER_SINGLE_PIXFMT(AV_PIX_FMT_QSV),
-
-    .activate           = activate,
-
-    .flags_internal     = FF_FILTER_FLAG_HWFRAME_AWARE,
-};
+DEFINE_QSV_FILTER(qsvscale, scale, "scaling and format conversion", FILTER_SINGLE_PIXFMT(AV_PIX_FMT_QSV));
 
 #endif
-- 
2.17.1

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [FFmpeg-devel] [PATCH 1/3] lavfi/scale_qsv: simplify scale_qsv filter
  2023-01-17  6:20 [FFmpeg-devel] [PATCH 1/3] lavfi/scale_qsv: simplify scale_qsv filter Xiang, Haihao
  2023-01-17  6:20 ` [FFmpeg-devel] [PATCH 2/3] lavfi/scale_qsv: re-use VPPContext for " Xiang, Haihao
  2023-01-17  6:20 ` [FFmpeg-devel] [PATCH 3/3] lavfi/vpp_qsv: factor common QSV filter definition Xiang, Haihao
@ 2023-01-28  6:04 ` Xiang, Haihao
  2 siblings, 0 replies; 4+ messages in thread
From: Xiang, Haihao @ 2023-01-28  6:04 UTC (permalink / raw)
  To: ffmpeg-devel

On Di, 2023-01-17 at 14:20 +0800, Xiang, Haihao wrote:
> From: Haihao Xiang <haihao.xiang@intel.com>
> 
> Use QSVVPPContext as a base context of QSVScaleContext, hence we may
> re-use functions defined for QSVVPPContext to manage MFX session for
> scale_qsv filter.
> 
> In addition, system memory has been taken into account in
> QSVVVPPContext, we may add support for non-QSV pixel formats in the
> future.
> 
> Signed-off-by: Haihao Xiang <haihao.xiang@intel.com>
> ---
>  configure                  |   1 +
>  libavfilter/vf_scale_qsv.c | 441 +++----------------------------------
>  2 files changed, 35 insertions(+), 407 deletions(-)
> 
> diff --git a/configure b/configure
> index 6e88c32223..86f930024b 100755
> --- a/configure
> +++ b/configure
> @@ -3721,6 +3721,7 @@ sab_filter_deps="gpl swscale"
>  scale2ref_filter_deps="swscale"
>  scale_filter_deps="swscale"
>  scale_qsv_filter_deps="libmfx"
> +scale_qsv_filter_select="qsvvpp"
>  scdet_filter_select="scene_sad"
>  select_filter_select="scene_sad"
>  sharpness_vaapi_filter_deps="vaapi"
> diff --git a/libavfilter/vf_scale_qsv.c b/libavfilter/vf_scale_qsv.c
> index a89a3ba6e6..8eb8bc9ec1 100644
> --- a/libavfilter/vf_scale_qsv.c
> +++ b/libavfilter/vf_scale_qsv.c
> @@ -66,35 +66,11 @@ enum var_name {
>  #define MFX_IMPL_VIA_MASK(impl) (0x0f00 & (impl))
>  
>  typedef struct QSVScaleContext {
> -    const AVClass *class;
> -
> -    /* a clone of the main session, used internally for scaling */
> -    mfxSession   session;
> -
> -    mfxMemId *mem_ids_in;
> -    int nb_mem_ids_in;
> -
> -    mfxMemId *mem_ids_out;
> -    int nb_mem_ids_out;
> -
> -    mfxFrameSurface1 **surface_ptrs_in;
> -    int             nb_surface_ptrs_in;
> -
> -    mfxFrameSurface1 **surface_ptrs_out;
> -    int             nb_surface_ptrs_out;
> -
> -#if QSV_HAVE_OPAQUE
> -    mfxExtOpaqueSurfaceAlloc opaque_alloc;
> -#endif
> +    QSVVPPContext qsv;
>  
>      mfxExtVPPScaling         scale_conf;
>      int                      mode;
>  
> -    mfxExtBuffer             *ext_buffers[2];
> -    int                      num_ext_buf;
> -
> -    int shift_width, shift_height;
> -
>      /**
>       * New dimensions. Special values are:
>       *   0 = original width/height
> @@ -131,338 +107,21 @@ static av_cold int qsvscale_init(AVFilterContext *ctx)
>  
>  static av_cold void qsvscale_uninit(AVFilterContext *ctx)
>  {
> -    QSVScaleContext *s = ctx->priv;
> -
> -    if (s->session) {
> -        MFXClose(s->session);
> -        s->session = NULL;
> -    }
> -
> -    av_freep(&s->mem_ids_in);
> -    av_freep(&s->mem_ids_out);
> -    s->nb_mem_ids_in  = 0;
> -    s->nb_mem_ids_out = 0;
> -
> -    av_freep(&s->surface_ptrs_in);
> -    av_freep(&s->surface_ptrs_out);
> -    s->nb_surface_ptrs_in  = 0;
> -    s->nb_surface_ptrs_out = 0;
> -}
> -
> -static int init_out_pool(AVFilterContext *ctx,
> -                         int out_width, int out_height)
> -{
> -    QSVScaleContext *s = ctx->priv;
> -    AVFilterLink *outlink = ctx->outputs[0];
> -
> -    AVHWFramesContext *in_frames_ctx;
> -    AVHWFramesContext *out_frames_ctx;
> -    AVQSVFramesContext *in_frames_hwctx;
> -    AVQSVFramesContext *out_frames_hwctx;
> -    enum AVPixelFormat in_format;
> -    enum AVPixelFormat out_format;
> -    int i, ret;
> -
> -    /* check that we have a hw context */
> -    if (!ctx->inputs[0]->hw_frames_ctx) {
> -        av_log(ctx, AV_LOG_ERROR, "No hw context provided on input\n");
> -        return AVERROR(EINVAL);
> -    }
> -    in_frames_ctx   = (AVHWFramesContext*)ctx->inputs[0]->hw_frames_ctx-
> >data;
> -    in_frames_hwctx = in_frames_ctx->hwctx;
> -
> -    in_format     = in_frames_ctx->sw_format;
> -    out_format    = (s->format == AV_PIX_FMT_NONE) ? in_format : s->format;
> -
> -    outlink->hw_frames_ctx = av_hwframe_ctx_alloc(in_frames_ctx->device_ref);
> -    if (!outlink->hw_frames_ctx)
> -        return AVERROR(ENOMEM);
> -    out_frames_ctx   = (AVHWFramesContext*)outlink->hw_frames_ctx->data;
> -    out_frames_hwctx = out_frames_ctx->hwctx;
> -
> -    out_frames_ctx->format            = AV_PIX_FMT_QSV;
> -    out_frames_ctx->width             = FFALIGN(out_width,  16);
> -    out_frames_ctx->height            = FFALIGN(out_height, 16);
> -    out_frames_ctx->sw_format         = out_format;
> -    out_frames_ctx->initial_pool_size = 4;
> -
> -    out_frames_hwctx->frame_type = in_frames_hwctx->frame_type |
> MFX_MEMTYPE_FROM_VPPOUT;
> -
> -    ret = ff_filter_init_hw_frames(ctx, outlink, 32);
> -    if (ret < 0)
> -        return ret;
> -
> -    ret = av_hwframe_ctx_init(outlink->hw_frames_ctx);
> -    if (ret < 0)
> -        return ret;
> -
> -    for (i = 0; i < out_frames_hwctx->nb_surfaces; i++) {
> -        mfxFrameInfo *info = &out_frames_hwctx->surfaces[i].Info;
> -        info->CropW = out_width;
> -        info->CropH = out_height;
> -    }
> -
> -    return 0;
> -}
> -
> -static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
> -                             mfxFrameAllocResponse *resp)
> -{
> -    AVFilterContext *ctx = pthis;
> -    QSVScaleContext   *s = ctx->priv;
> -
> -    if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_PROCESSOR_TARGET) ||
> -        !(req->Type & (MFX_MEMTYPE_FROM_VPPIN | MFX_MEMTYPE_FROM_VPPOUT)) ||
> -        !(req->Type & MFX_MEMTYPE_EXTERNAL_FRAME))
> -        return MFX_ERR_UNSUPPORTED;
> -
> -    if (req->Type & MFX_MEMTYPE_FROM_VPPIN) {
> -        resp->mids           = s->mem_ids_in;
> -        resp->NumFrameActual = s->nb_mem_ids_in;
> -    } else {
> -        resp->mids           = s->mem_ids_out;
> -        resp->NumFrameActual = s->nb_mem_ids_out;
> -    }
> -
> -    return MFX_ERR_NONE;
> -}
> -
> -static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
> -{
> -    return MFX_ERR_NONE;
> -}
> -
> -static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
> -{
> -    return MFX_ERR_UNSUPPORTED;
> -}
> -
> -static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
> -{
> -    return MFX_ERR_UNSUPPORTED;
> -}
> -
> -static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
> -{
> -    mfxHDLPair *pair_dst = (mfxHDLPair*)hdl;
> -    mfxHDLPair *pair_src = (mfxHDLPair*)mid;
> -
> -    pair_dst->first = pair_src->first;
> -
> -    if (pair_src->second != (mfxMemId)MFX_INFINITE)
> -        pair_dst->second = pair_src->second;
> -    return MFX_ERR_NONE;
> -}
> -
> -static int init_out_session(AVFilterContext *ctx)
> -{
> -
> -    QSVScaleContext                   *s = ctx->priv;
> -    AVHWFramesContext     *in_frames_ctx = (AVHWFramesContext*)ctx-
> >inputs[0]->hw_frames_ctx->data;
> -    AVHWFramesContext    *out_frames_ctx = (AVHWFramesContext*)ctx-
> >outputs[0]->hw_frames_ctx->data;
> -    AVQSVFramesContext  *in_frames_hwctx = in_frames_ctx->hwctx;
> -    AVQSVFramesContext *out_frames_hwctx = out_frames_ctx->hwctx;
> -    AVQSVDeviceContext     *device_hwctx = in_frames_ctx->device_ctx->hwctx;
> -
> -    int opaque = 0;
> -
> -    mfxHDL handle = NULL;
> -    mfxHandleType handle_type;
> -    mfxVersion ver;
> -    mfxIMPL impl;
> -    mfxVideoParam par;
> -    mfxStatus err;
> -    int i, ret;
> -
> -#if QSV_HAVE_OPAQUE
> -    opaque = !!(in_frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME);
> -#endif
> -    s->num_ext_buf = 0;
> -
> -    /* extract the properties of the "master" session given to us */
> -    err = MFXQueryIMPL(device_hwctx->session, &impl);
> -    if (err == MFX_ERR_NONE)
> -        err = MFXQueryVersion(device_hwctx->session, &ver);
> -    if (err != MFX_ERR_NONE) {
> -        av_log(ctx, AV_LOG_ERROR, "Error querying the session attributes\n");
> -        return AVERROR_UNKNOWN;
> -    }
> -
> -    if (MFX_IMPL_VIA_VAAPI == MFX_IMPL_VIA_MASK(impl)) {
> -        handle_type = MFX_HANDLE_VA_DISPLAY;
> -    } else if (MFX_IMPL_VIA_D3D11 == MFX_IMPL_VIA_MASK(impl)) {
> -        handle_type = MFX_HANDLE_D3D11_DEVICE;
> -    } else if (MFX_IMPL_VIA_D3D9 == MFX_IMPL_VIA_MASK(impl)) {
> -        handle_type = MFX_HANDLE_D3D9_DEVICE_MANAGER;
> -    } else {
> -        av_log(ctx, AV_LOG_ERROR, "Error unsupported handle type\n");
> -        return AVERROR_UNKNOWN;
> -    }
> -
> -    err = MFXVideoCORE_GetHandle(device_hwctx->session, handle_type,
> &handle);
> -    if (err < 0)
> -        return ff_qsvvpp_print_error(ctx, err, "Error getting the session
> handle");
> -    else if (err > 0) {
> -        ff_qsvvpp_print_warning(ctx, err, "Warning in getting the session
> handle");
> -        return AVERROR_UNKNOWN;
> -    }
> -
> -    /* create a "slave" session with those same properties, to be used for
> -     * actual scaling */
> -    ret = ff_qsvvpp_create_mfx_session(ctx, device_hwctx->loader, impl, &ver,
> -                                       &s->session);
> -    if (ret)
> -        return ret;
> -
> -    if (handle) {
> -        err = MFXVideoCORE_SetHandle(s->session, handle_type, handle);
> -        if (err != MFX_ERR_NONE)
> -            return AVERROR_UNKNOWN;
> -    }
> -
> -    if (QSV_RUNTIME_VERSION_ATLEAST(ver, 1, 25)) {
> -        err = MFXJoinSession(device_hwctx->session, s->session);
> -            if (err != MFX_ERR_NONE)
> -                return AVERROR_UNKNOWN;
> -    }
> -
> -    memset(&par, 0, sizeof(par));
> -
> -    if (!opaque) {
> -        mfxFrameAllocator frame_allocator = {
> -            .pthis  = ctx,
> -            .Alloc  = frame_alloc,
> -            .Lock   = frame_lock,
> -            .Unlock = frame_unlock,
> -            .GetHDL = frame_get_hdl,
> -            .Free   = frame_free,
> -        };
> -
> -        s->mem_ids_in = av_calloc(in_frames_hwctx->nb_surfaces,
> -                                  sizeof(*s->mem_ids_in));
> -        if (!s->mem_ids_in)
> -            return AVERROR(ENOMEM);
> -        for (i = 0; i < in_frames_hwctx->nb_surfaces; i++)
> -            s->mem_ids_in[i] = in_frames_hwctx->surfaces[i].Data.MemId;
> -        s->nb_mem_ids_in = in_frames_hwctx->nb_surfaces;
> -
> -        s->mem_ids_out = av_calloc(out_frames_hwctx->nb_surfaces,
> -                                   sizeof(*s->mem_ids_out));
> -        if (!s->mem_ids_out)
> -            return AVERROR(ENOMEM);
> -        for (i = 0; i < out_frames_hwctx->nb_surfaces; i++)
> -            s->mem_ids_out[i] = out_frames_hwctx->surfaces[i].Data.MemId;
> -        s->nb_mem_ids_out = out_frames_hwctx->nb_surfaces;
> -
> -        err = MFXVideoCORE_SetFrameAllocator(s->session, &frame_allocator);
> -        if (err != MFX_ERR_NONE)
> -            return AVERROR_UNKNOWN;
> -
> -        par.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY |
> MFX_IOPATTERN_OUT_VIDEO_MEMORY;
> -    }
> -#if QSV_HAVE_OPAQUE
> -    else {
> -        s->surface_ptrs_in = av_calloc(in_frames_hwctx->nb_surfaces,
> -                                       sizeof(*s->surface_ptrs_in));
> -        if (!s->surface_ptrs_in)
> -            return AVERROR(ENOMEM);
> -        for (i = 0; i < in_frames_hwctx->nb_surfaces; i++)
> -            s->surface_ptrs_in[i] = in_frames_hwctx->surfaces + i;
> -        s->nb_surface_ptrs_in = in_frames_hwctx->nb_surfaces;
> -
> -        s->surface_ptrs_out = av_calloc(out_frames_hwctx->nb_surfaces,
> -                                        sizeof(*s->surface_ptrs_out));
> -        if (!s->surface_ptrs_out)
> -            return AVERROR(ENOMEM);
> -        for (i = 0; i < out_frames_hwctx->nb_surfaces; i++)
> -            s->surface_ptrs_out[i] = out_frames_hwctx->surfaces + i;
> -        s->nb_surface_ptrs_out = out_frames_hwctx->nb_surfaces;
> -
> -        s->opaque_alloc.In.Surfaces   = s->surface_ptrs_in;
> -        s->opaque_alloc.In.NumSurface = s->nb_surface_ptrs_in;
> -        s->opaque_alloc.In.Type       = in_frames_hwctx->frame_type;
> -
> -        s->opaque_alloc.Out.Surfaces   = s->surface_ptrs_out;
> -        s->opaque_alloc.Out.NumSurface = s->nb_surface_ptrs_out;
> -        s->opaque_alloc.Out.Type       = out_frames_hwctx->frame_type;
> -
> -        s->opaque_alloc.Header.BufferId =
> MFX_EXTBUFF_OPAQUE_SURFACE_ALLOCATION;
> -        s->opaque_alloc.Header.BufferSz = sizeof(s->opaque_alloc);
> -
> -        s->ext_buffers[s->num_ext_buf++] = (mfxExtBuffer*)&s->opaque_alloc;
> -
> -        par.IOPattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY |
> MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
> -    }
> -#endif
> -
> -    memset(&s->scale_conf, 0, sizeof(mfxExtVPPScaling));
> -    s->scale_conf.Header.BufferId     = MFX_EXTBUFF_VPP_SCALING;
> -    s->scale_conf.Header.BufferSz     = sizeof(mfxExtVPPScaling);
> -    s->scale_conf.ScalingMode         = s->mode;
> -    s->ext_buffers[s->num_ext_buf++]  = (mfxExtBuffer*)&s->scale_conf;
> -    av_log(ctx, AV_LOG_VERBOSE, "Scaling mode: %d\n", s->mode);
> -
> -    par.ExtParam    = s->ext_buffers;
> -    par.NumExtParam = s->num_ext_buf;
> -
> -    par.AsyncDepth = 1;    // TODO async
> -
> -    par.vpp.In  = in_frames_hwctx->surfaces[0].Info;
> -    par.vpp.Out = out_frames_hwctx->surfaces[0].Info;
> -
> -    /* Apparently VPP requires the frame rate to be set to some value,
> otherwise
> -     * init will fail (probably for the framerate conversion filter). Since
> we
> -     * are only doing scaling here, we just invent an arbitrary
> -     * value */
> -    par.vpp.In.FrameRateExtN  = 25;
> -    par.vpp.In.FrameRateExtD  = 1;
> -    par.vpp.Out.FrameRateExtN = 25;
> -    par.vpp.Out.FrameRateExtD = 1;
> -
> -    /* Print input memory mode */
> -    ff_qsvvpp_print_iopattern(ctx, par.IOPattern & 0x0F, "VPP");
> -    /* Print output memory mode */
> -    ff_qsvvpp_print_iopattern(ctx, par.IOPattern & 0xF0, "VPP");
> -    err = MFXVideoVPP_Init(s->session, &par);
> -    if (err < 0)
> -        return ff_qsvvpp_print_error(ctx, err,
> -                                     "Error opening the VPP for scaling");
> -    else if (err > 0) {
> -        ff_qsvvpp_print_warning(ctx, err,
> -                                "Warning in VPP initialization");
> -        return AVERROR_UNKNOWN;
> -    }
> -
> -    return 0;
> -}
> -
> -static int init_scale_session(AVFilterContext *ctx, int in_width, int
> in_height,
> -                              int out_width, int out_height)
> -{
> -    int ret;
> -
> -    qsvscale_uninit(ctx);
> -
> -    ret = init_out_pool(ctx, out_width, out_height);
> -    if (ret < 0)
> -        return ret;
> -
> -    ret = init_out_session(ctx);
> -    if (ret < 0)
> -        return ret;
> -
> -    return 0;
> +    ff_qsvvpp_close(ctx);
>  }
>  
>  static int qsvscale_config_props(AVFilterLink *outlink)
>  {
>      AVFilterContext *ctx = outlink->src;
>      AVFilterLink *inlink = outlink->src->inputs[0];
> -    QSVScaleContext  *s = ctx->priv;
> +    QSVScaleContext   *s = ctx->priv;
> +    QSVVPPParam    param = { NULL };
> +    mfxExtBuffer    *ext_buf[1];
>      int64_t w, h;
>      double var_values[VARS_NB], res;
>      char *expr;
>      int ret;
> +    enum AVPixelFormat in_format;
>  
>      var_values[VAR_IN_W]  = var_values[VAR_IW] = inlink->w;
>      var_values[VAR_IN_H]  = var_values[VAR_IH] = inlink->h;
> @@ -518,7 +177,30 @@ static int qsvscale_config_props(AVFilterLink *outlink)
>      outlink->w = w;
>      outlink->h = h;
>  
> -    ret = init_scale_session(ctx, inlink->w, inlink->h, w, h);
> +    if (inlink->format == AV_PIX_FMT_QSV) {
> +        if (!inlink->hw_frames_ctx || !inlink->hw_frames_ctx->data)
> +            return AVERROR(EINVAL);
> +        else
> +            in_format = ((AVHWFramesContext*)inlink->hw_frames_ctx->data)-
> >sw_format;
> +    } else
> +        in_format = inlink->format;
> +
> +    if (s->format == AV_PIX_FMT_NONE)
> +        s->format = in_format;
> +
> +    outlink->frame_rate = inlink->frame_rate;
> +    outlink->time_base = av_inv_q(inlink->frame_rate);
> +    param.out_sw_format = s->format;
> +
> +    param.ext_buf                      = ext_buf;
> +    memset(&s->scale_conf, 0, sizeof(mfxExtVPPScaling));
> +    s->scale_conf.Header.BufferId      = MFX_EXTBUFF_VPP_SCALING;
> +    s->scale_conf.Header.BufferSz      = sizeof(mfxExtVPPScaling);
> +    s->scale_conf.ScalingMode          = s->mode;
> +    param.ext_buf[param.num_ext_buf++] = (mfxExtBuffer*)&s->scale_conf;
> +    av_log(ctx, AV_LOG_VERBOSE, "Scaling mode: %d\n", s->mode);
> +
> +    ret = ff_qsvvpp_init(ctx, &param);
>      if (ret < 0)
>          return ret;
>  
> @@ -542,67 +224,12 @@ fail:
>  
>  static int qsvscale_filter_frame(AVFilterLink *link, AVFrame *in)
>  {
> -    AVFilterContext             *ctx = link->dst;
> -    QSVScaleContext               *s = ctx->priv;
> -    AVFilterLink            *outlink = ctx->outputs[0];
> -
> -    mfxSyncPoint sync = NULL;
> -    mfxStatus err;
> -
> -    AVFrame *out = NULL;
> -    int ret = 0;
> -
> -    out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
> -    if (!out) {
> -        ret = AVERROR(ENOMEM);
> -        goto fail;
> -    }
> +    int               ret = 0;
> +    AVFilterContext  *ctx = link->dst;
> +    QSVVPPContext    *qsv = ctx->priv;
>  
> -    do {
> -        err = MFXVideoVPP_RunFrameVPPAsync(s->session,
> -                                           (mfxFrameSurface1*)in->data[3],
> -                                           (mfxFrameSurface1*)out->data[3],
> -                                           NULL, &sync);
> -        if (err == MFX_WRN_DEVICE_BUSY)
> -            av_usleep(1);
> -    } while (err == MFX_WRN_DEVICE_BUSY);
> -
> -    if (err < 0) {
> -        ret = ff_qsvvpp_print_error(ctx, err, "Error during scaling");
> -        goto fail;
> -    }
> -
> -    if (!sync) {
> -        av_log(ctx, AV_LOG_ERROR, "No sync during scaling\n");
> -        ret = AVERROR_UNKNOWN;
> -        goto fail;
> -    }
> -
> -    do {
> -        err = MFXVideoCORE_SyncOperation(s->session, sync, 1000);
> -    } while (err == MFX_WRN_IN_EXECUTION);
> -    if (err < 0) {
> -        ret = ff_qsvvpp_print_error(ctx, err, "Error synchronizing the
> operation");
> -        goto fail;
> -    }
> -
> -    ret = av_frame_copy_props(out, in);
> -    if (ret < 0)
> -        goto fail;
> -
> -    out->width  = outlink->w;
> -    out->height = outlink->h;
> -
> -    av_reduce(&out->sample_aspect_ratio.num, &out->sample_aspect_ratio.den,
> -              (int64_t)in->sample_aspect_ratio.num * outlink->h * link->w,
> -              (int64_t)in->sample_aspect_ratio.den * outlink->w * link->h,
> -              INT_MAX);
> -
> -    av_frame_free(&in);
> -    return ff_filter_frame(outlink, out);
> -fail:
> +    ret = ff_qsvvpp_filter_frame(qsv, link, in);
>      av_frame_free(&in);
> -    av_frame_free(&out);
>      return ret;
>  }
>  

Will apply,

-Haihao

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2023-01-28  6:05 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-17  6:20 [FFmpeg-devel] [PATCH 1/3] lavfi/scale_qsv: simplify scale_qsv filter Xiang, Haihao
2023-01-17  6:20 ` [FFmpeg-devel] [PATCH 2/3] lavfi/scale_qsv: re-use VPPContext for " Xiang, Haihao
2023-01-17  6:20 ` [FFmpeg-devel] [PATCH 3/3] lavfi/vpp_qsv: factor common QSV filter definition Xiang, Haihao
2023-01-28  6:04 ` [FFmpeg-devel] [PATCH 1/3] lavfi/scale_qsv: simplify scale_qsv filter Xiang, Haihao

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git