Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
* [FFmpeg-devel] [PATCH] avcodec/quadra: add netint h264/h265 hwaccel encoders (PR #20364)
@ 2025-08-28 21:14 desmondliu via ffmpeg-devel
  0 siblings, 0 replies; only message in thread
From: desmondliu via ffmpeg-devel @ 2025-08-28 21:14 UTC (permalink / raw)
  To: ffmpeg-devel; +Cc: desmondliu

PR #20364 opened by desmondliu
URL: https://code.ffmpeg.org/FFmpeg/FFmpeg/pulls/20364
Patch URL: https://code.ffmpeg.org/FFmpeg/FFmpeg/pulls/20364.patch

Add support for NETINT Quadra h264/h265 hardware video
encoders. This commit enables compile configuration for
linking to Quadra's driver 'Libxcoder' and HW frames
support.

More information:

https://netint.com/products/quadra-t1a-video-processing-unit/
https://docs.netint.com/vpu/quadra/


>From 7489107ce8e3e4168b4048b7f0e8b9042abf247e Mon Sep 17 00:00:00 2001
From: Desmond Liu <desmond.liu@netint.ca>
Date: Mon, 28 Jul 2025 16:13:28 -0700
Subject: [PATCH] avcodec/quadra: add netint h264/h265 hwaccel encoders

Add support for NETINT Quadra h264/h265 hardware video
encoders. This commit enables compile configuration for
linking to Quadra's driver 'Libxcoder' and HW frames
support.

More information:

https://netint.com/products/quadra-t1a-video-processing-unit/
https://docs.netint.com/vpu/quadra/
---
 configure                      |   15 +-
 libavcodec/Makefile            |    3 +
 libavcodec/allcodecs.c         |    2 +
 libavcodec/nicodec.h           |   63 +
 libavcodec/nienc.c             | 3011 ++++++++++++++++++++++++++++++++
 libavcodec/nienc.h             |  161 ++
 libavcodec/nienc_h264.c        |   61 +
 libavcodec/nienc_hevc.c        |   61 +
 libavutil/Makefile             |    3 +
 libavutil/hwcontext.c          |    4 +
 libavutil/hwcontext.h          |    1 +
 libavutil/hwcontext_internal.h |    1 +
 libavutil/hwcontext_ni_quad.c  | 1257 +++++++++++++
 libavutil/hwcontext_ni_quad.h  |   99 ++
 libavutil/pixdesc.c            |   15 +
 libavutil/pixfmt.h             |    8 +
 16 files changed, 4764 insertions(+), 1 deletion(-)
 create mode 100644 libavcodec/nicodec.h
 create mode 100644 libavcodec/nienc.c
 create mode 100644 libavcodec/nienc.h
 create mode 100644 libavcodec/nienc_h264.c
 create mode 100644 libavcodec/nienc_hevc.c
 create mode 100644 libavutil/hwcontext_ni_quad.c
 create mode 100644 libavutil/hwcontext_ni_quad.h

diff --git a/configure b/configure
index 9fe28c5af4..1d40bf4787 100755
--- a/configure
+++ b/configure
@@ -357,6 +357,7 @@ External library support:
   --enable-libvpl          enable Intel oneVPL code via libvpl if libmfx is not used [no]
   --enable-libnpp          enable Nvidia Performance Primitives-based code [no]
   --enable-mmal            enable Broadcom Multi-Media Abstraction Layer (Raspberry Pi) via MMAL [no]
+  --disable-ni_quadra      disable NetInt Quadra HWaccel codecs/filters [autodetect]
   --disable-nvdec          disable Nvidia video decoding acceleration (via hwaccel) [autodetect]
   --disable-nvenc          disable Nvidia video encoding code [autodetect]
   --enable-omx             enable OpenMAX IL code [no]
@@ -2023,6 +2024,7 @@ HWACCEL_AUTODETECT_LIBRARY_LIST="
     d3d12va
     dxva2
     ffnvcodec
+    ni_quadra
     libdrm
     nvdec
     nvenc
@@ -3670,6 +3672,8 @@ libx264_encoder_select="atsc_a53 golomb"
 libx264rgb_encoder_deps="libx264"
 libx264rgb_encoder_select="libx264_encoder"
 libx265_encoder_deps="libx265"
+h264_ni_quadra_encoder_deps="ni_quadra"
+h265_ni_quadra_encoder_deps="ni_quadra"
 libx265_encoder_select="atsc_a53 dovi_rpuenc"
 libxavs_encoder_deps="libxavs"
 libxavs2_encoder_deps="libxavs2"
@@ -4139,7 +4143,7 @@ swscale_suggest="libm stdatomic"
 
 avcodec_extralibs="pthreads_extralibs iconv_extralibs dxva2_extralibs liblcevc_dec_extralibs lcms2_extralibs"
 avfilter_extralibs="pthreads_extralibs"
-avutil_extralibs="d3d11va_extralibs d3d12va_extralibs mediacodec_extralibs nanosleep_extralibs pthreads_extralibs vaapi_drm_extralibs vaapi_x11_extralibs vaapi_win32_extralibs vdpau_x11_extralibs"
+avutil_extralibs="d3d11va_extralibs d3d12va_extralibs mediacodec_extralibs nanosleep_extralibs pthreads_extralibs vaapi_drm_extralibs vaapi_x11_extralibs vaapi_win32_extralibs vdpau_x11_extralibs ni_quadra_extralibs"
 
 # programs
 ffmpeg_deps="avcodec avfilter avformat threads"
@@ -7010,6 +7014,15 @@ for func in $MATH_FUNCS; do
     eval check_mathfunc $func \${${func}_args:-1} $libm_extralibs
 done
 
+# Auto-detect ni_quadra and check libxcoder API version
+if enabled ni_quadra; then
+    if ! check_pkg_config ni_quadra xcoder ni_device_api.h ni_device_open; then
+        disable_with_reason ni_quadra "libxcoder not found"
+    elif ! check_cpp_condition xcoder ni_defs.h "LIBXCODER_API_VERSION_MAJOR == 2 && LIBXCODER_API_VERSION_MINOR >= 77"; then
+        disable_with_reason ni_quadra "libxcoder API version must be >= 2.77"
+    fi
+fi
+
 # these are off by default, so fail if requested and not available
 enabled avisynth          && { require_headers "avisynth/avisynth_c.h avisynth/avs/version.h" &&
                                { test_cpp_condition avisynth/avs/version.h "AVS_MAJOR_VER >= 3 && AVS_MINOR_VER >= 7 && AVS_BUGFIX_VER >= 3 || AVS_MAJOR_VER >= 3 && AVS_MINOR_VER > 7 || AVS_MAJOR_VER > 3" ||
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 3d036de4b6..5ca3198d71 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -433,6 +433,7 @@ OBJS-$(CONFIG_H264_MEDIACODEC_DECODER) += mediacodecdec.o
 OBJS-$(CONFIG_H264_MEDIACODEC_ENCODER) += mediacodecenc.o
 OBJS-$(CONFIG_H264_MF_ENCODER)         += mfenc.o mf_utils.o
 OBJS-$(CONFIG_H264_MMAL_DECODER)       += mmaldec.o
+OBJS-$(CONFIG_H264_NI_QUADRA_ENCODER)  += nienc_h264.o nienc.o
 OBJS-$(CONFIG_H264_NVENC_ENCODER)      += nvenc_h264.o nvenc.o
 OBJS-$(CONFIG_H264_OH_DECODER)         += ohcodec.o ohdec.o
 OBJS-$(CONFIG_H264_OH_ENCODER)         += ohcodec.o ohenc.o
@@ -463,6 +464,7 @@ OBJS-$(CONFIG_HEVC_D3D12VA_ENCODER)    += d3d12va_encode_hevc.o h265_profile_lev
 OBJS-$(CONFIG_HEVC_MEDIACODEC_DECODER) += mediacodecdec.o
 OBJS-$(CONFIG_HEVC_MEDIACODEC_ENCODER) += mediacodecenc.o
 OBJS-$(CONFIG_HEVC_MF_ENCODER)         += mfenc.o mf_utils.o
+OBJS-$(CONFIG_H265_NI_QUADRA_ENCODER)  += nienc_hevc.o nienc.o
 OBJS-$(CONFIG_HEVC_NVENC_ENCODER)      += nvenc_hevc.o nvenc.o
 OBJS-$(CONFIG_HEVC_OH_DECODER)         += ohcodec.o ohdec.o
 OBJS-$(CONFIG_HEVC_OH_ENCODER)         += ohcodec.o ohenc.o
@@ -1326,6 +1328,7 @@ SKIPHEADERS-$(CONFIG_LIBVPX)           += libvpx.h
 SKIPHEADERS-$(CONFIG_LIBWEBP_ENCODER)  += libwebpenc_common.h
 SKIPHEADERS-$(CONFIG_MEDIACODEC)       += mediacodecdec_common.h mediacodec_surface.h mediacodec_wrapper.h mediacodec_sw_buffer.h
 SKIPHEADERS-$(CONFIG_MEDIAFOUNDATION)  += mf_utils.h
+SKIPHEADERS-$(CONFIG_NI_QUADRA)        += nicodec.h nienc.h
 SKIPHEADERS-$(CONFIG_NVDEC)            += nvdec.h
 SKIPHEADERS-$(CONFIG_NVENC)            += nvenc.h
 SKIPHEADERS-$(CONFIG_OHCODEC)          += ohcodec.h
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index f5ec2e01e8..be227c893b 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -843,6 +843,8 @@ extern const FFCodec ff_amrwb_mediacodec_decoder;
 extern const FFCodec ff_h263_v4l2m2m_encoder;
 extern const FFCodec ff_libaom_av1_decoder;
 /* hwaccel hooks only, so prefer external decoders */
+extern const FFCodec ff_h264_ni_quadra_encoder;
+extern const FFCodec ff_h265_ni_quadra_encoder;
 extern const FFCodec ff_av1_decoder;
 extern const FFCodec ff_av1_cuvid_decoder;
 extern const FFCodec ff_av1_mediacodec_decoder;
diff --git a/libavcodec/nicodec.h b/libavcodec/nicodec.h
new file mode 100644
index 0000000000..4d14dfe05d
--- /dev/null
+++ b/libavcodec/nicodec.h
@@ -0,0 +1,63 @@
+/*
+ * XCoder Codec Lib Wrapper
+ * Copyright (c) 2018 NetInt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * XCoder codec lib wrapper header.
+ */
+
+#ifndef AVCODEC_NICODEC_H
+#define AVCODEC_NICODEC_H
+
+#include <stdbool.h>
+#include <time.h>
+#include "avcodec.h"
+#include "startcode.h"
+#include "bsf.h"
+
+#include <ni_device_api.h>
+#include "libavutil/hwcontext_ni_quad.h"
+#include "libavutil/hwcontext.h"
+
+#define NI_NAL_VPS_BIT (0x01)
+#define NI_NAL_SPS_BIT (0x01 << 1)
+#define NI_NAL_PPS_BIT (0x01 << 2)
+#define NI_GENERATE_ALL_NAL_HEADER_BIT (0x01 << 3)
+
+/* enum for specifying xcoder device/coder index; can be specified in either
+   decoder or encoder options. */
+enum {
+    BEST_DEVICE_INST = -2,
+    BEST_DEVICE_LOAD = -1
+};
+
+enum {
+    HW_FRAMES_OFF = 0,
+    HW_FRAMES_ON = 1
+};
+
+enum {
+    GEN_GLOBAL_HEADERS_AUTO = -1,
+    GEN_GLOBAL_HEADERS_OFF = 0,
+    GEN_GLOBAL_HEADERS_ON = 1
+};
+
+#endif /* AVCODEC_NICODEC_H */
diff --git a/libavcodec/nienc.c b/libavcodec/nienc.c
new file mode 100644
index 0000000000..6ffa8f5f61
--- /dev/null
+++ b/libavcodec/nienc.c
@@ -0,0 +1,3011 @@
+/*
+ * NetInt XCoder H.264/HEVC Encoder common code
+ * Copyright (c) 2018-2019 NetInt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "nienc.h"
+#include "bytestream.h"
+#include "libavcodec/h264.h"
+#include "libavcodec/h264_sei.h"
+#include "libavcodec/hevc/hevc.h"
+#include "libavcodec/hevc/sei.h"
+#include "libavutil/mem.h"
+
+#include "libavcodec/put_bits.h"
+#include "libavutil/avstring.h"
+#include "libavutil/hdr_dynamic_metadata.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_ni_quad.h"
+#include "libavutil/mastering_display_metadata.h"
+#include "ni_av_codec.h"
+#include "ni_util.h"
+#include "put_bits.h"
+#include "packet_internal.h"
+
+#include <unistd.h>
+#include "encode.h"
+
+static bool free_frames_isempty(XCoderEncContext *ctx);
+
+static bool free_frames_isfull(XCoderEncContext *ctx);
+
+static int deq_free_frames(XCoderEncContext *ctx);
+
+static int enq_free_frames(XCoderEncContext *ctx, int idx);
+
+static int recycle_index_2_avframe_index(XCoderEncContext *ctx, uint32_t recycleIndex);
+
+static bool gop_params_check(AVDictionary *dict, AVCodecContext *avctx)
+{
+    XCoderEncContext *s = avctx->priv_data;
+    AVDictionaryEntry *en = NULL;
+    char *key;
+
+    while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
+        key = en->key;
+        ni_gop_params_check_set(&s->api_param, key);
+    }
+    return ni_gop_params_check(&s->api_param);
+}
+
+static int xcoder_encoder_headers(AVCodecContext *avctx)
+{
+    // use a copy of encoder context, take care to restore original config
+    // cropping setting
+    XCoderEncContext *ctx = NULL;
+    ni_xcoder_params_t *p_param = NULL;
+    ni_packet_t *xpkt = NULL;
+    int orig_conf_win_right;
+    int orig_conf_win_bottom;
+    int linesize_aligned, height_aligned;
+    int ret, recv;
+
+    ctx = av_malloc(sizeof(XCoderEncContext));
+    if (!ctx) {
+        return AVERROR(ENOMEM);
+    }
+
+    memcpy(ctx, (XCoderEncContext *)(avctx->priv_data),
+           sizeof(XCoderEncContext));
+
+    p_param = (ni_xcoder_params_t *)(ctx->api_ctx.p_session_config);
+
+    orig_conf_win_right  = p_param->cfg_enc_params.conf_win_right;
+    orig_conf_win_bottom = p_param->cfg_enc_params.conf_win_bottom;
+
+    linesize_aligned = avctx->width;
+    if (linesize_aligned < NI_MIN_WIDTH) {
+        p_param->cfg_enc_params.conf_win_right +=
+            (NI_MIN_WIDTH - avctx->width) / 2 * 2;
+        linesize_aligned = NI_MIN_WIDTH;
+    } else {
+        if (avctx->sw_pix_fmt == AV_PIX_FMT_NI_QUAD_8_TILE_4X4 ||
+            avctx->sw_pix_fmt == AV_PIX_FMT_NI_QUAD_10_TILE_4X4) {
+            linesize_aligned = FFALIGN(avctx->width, 4);
+            p_param->cfg_enc_params.conf_win_right +=
+                (linesize_aligned - avctx->width) / 2 * 2;
+        } else {
+            linesize_aligned = FFALIGN(avctx->width, 2);
+            p_param->cfg_enc_params.conf_win_right +=
+                (linesize_aligned - avctx->width) / 2 * 2;
+        }
+    }
+    p_param->source_width = linesize_aligned;
+
+    height_aligned = avctx->height;
+    if (height_aligned < NI_MIN_HEIGHT) {
+        p_param->cfg_enc_params.conf_win_bottom +=
+            (NI_MIN_HEIGHT - avctx->height) / 2 * 2;
+        height_aligned = NI_MIN_HEIGHT;
+    } else {
+        if (avctx->sw_pix_fmt == AV_PIX_FMT_NI_QUAD_8_TILE_4X4 ||
+            avctx->sw_pix_fmt == AV_PIX_FMT_NI_QUAD_10_TILE_4X4) {
+            height_aligned = FFALIGN(avctx->height, 4);
+            p_param->cfg_enc_params.conf_win_bottom +=
+                (height_aligned - avctx->height) / 4 * 4;
+        } else {
+            height_aligned = FFALIGN(avctx->height, 2);
+            p_param->cfg_enc_params.conf_win_bottom +=
+                (height_aligned - avctx->height) / 2 * 2;
+        }
+    }
+    p_param->source_height = height_aligned;
+    p_param->cfg_enc_params.enable_acq_limit = 1;
+
+    ctx->api_ctx.hw_id = ctx->dev_enc_idx;
+
+    if (ctx->dev_blk_name)
+        av_strlcpy(ctx->api_ctx.blk_dev_name, ctx->dev_blk_name, NI_MAX_DEVICE_NAME_LEN);
+    if (ctx->dev_xcoder)
+        av_strlcpy(ctx->api_ctx.dev_xcoder_name, ctx->dev_xcoder, MAX_CHAR_IN_DEVICE_NAME);
+
+    ret = ni_device_session_open(&(ctx->api_ctx), NI_DEVICE_TYPE_ENCODER);
+
+    ctx->dev_xcoder_name = ctx->api_ctx.dev_xcoder_name;
+    ctx->blk_xcoder_name = ctx->api_ctx.blk_xcoder_name;
+    ctx->dev_enc_idx = ctx->api_ctx.hw_id;
+
+    switch (ret) {
+    case NI_RETCODE_SUCCESS:
+        av_log(avctx, AV_LOG_VERBOSE,
+               "XCoder %s.%d (inst: %d) opened successfully\n",
+               ctx->dev_xcoder_name, ctx->dev_enc_idx, ctx->api_ctx.session_id);
+        break;
+    case NI_RETCODE_INVALID_PARAM:
+        av_log(avctx, AV_LOG_ERROR,
+               "Failed to open encoder (status = %d), invalid parameter values "
+               "given: %s\n", ret, ctx->api_ctx.param_err_msg);
+        ret = AVERROR_EXTERNAL;
+        goto end;
+    default:
+        av_log(avctx, AV_LOG_ERROR,
+               "Failed to open encoder (status = %d), resource unavailable\n",
+               ret);
+        ret = AVERROR_EXTERNAL;
+        goto end;
+    }
+
+    xpkt = &(ctx->api_pkt.data.packet);
+    ni_packet_buffer_alloc(xpkt, NI_MAX_TX_SZ);
+
+    while (1) {
+        recv = ni_device_session_read(&(ctx->api_ctx), &(ctx->api_pkt),
+                                    NI_DEVICE_TYPE_ENCODER);
+
+        if (recv > 0) {
+            av_freep(&avctx->extradata);
+            avctx->extradata_size = recv - (int)(ctx->api_ctx.meta_size);
+            avctx->extradata =
+                av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
+            memcpy(avctx->extradata,
+                   (uint8_t *)xpkt->p_data + ctx->api_ctx.meta_size,
+                   avctx->extradata_size);
+            av_log(avctx, AV_LOG_VERBOSE, "Xcoder encoder headers len: %d\n",
+                   avctx->extradata_size);
+            break;
+        }
+    }
+
+end:
+    // close and clean up the temporary session
+    if (ret != 0) {
+        ni_device_session_close(&(ctx->api_ctx), ctx->encoder_eof,
+                               NI_DEVICE_TYPE_ENCODER);
+    } else {
+        ret = ni_device_session_close(&(ctx->api_ctx), ctx->encoder_eof,
+                                     NI_DEVICE_TYPE_ENCODER);
+    }
+#ifdef _WIN32
+    ni_device_close(ctx->api_ctx.device_handle);
+#elif __linux__
+    ni_device_close(ctx->api_ctx.device_handle);
+    ni_device_close(ctx->api_ctx.blk_io_handle);
+#endif
+    ctx->api_ctx.device_handle = NI_INVALID_DEVICE_HANDLE;
+    ctx->api_ctx.blk_io_handle = NI_INVALID_DEVICE_HANDLE;
+
+    ni_packet_buffer_free(&(ctx->api_pkt.data.packet));
+
+    ni_rsrc_free_device_context(ctx->rsrc_ctx);
+    ctx->rsrc_ctx = NULL;
+
+    p_param->cfg_enc_params.conf_win_right  = orig_conf_win_right;
+    p_param->cfg_enc_params.conf_win_bottom = orig_conf_win_bottom;
+
+    av_freep(&ctx);
+
+    return ret;
+}
+
+static int xcoder_encoder_header_check_set(AVCodecContext *avctx)
+{
+    XCoderEncContext *ctx = avctx->priv_data;
+    ni_xcoder_params_t *p_param;
+    // set color metrics
+    enum AVColorPrimaries color_primaries = avctx->color_primaries;
+    enum AVColorTransferCharacteristic color_trc = avctx->color_trc;
+    enum AVColorSpace color_space = avctx->colorspace;
+
+    p_param = (ni_xcoder_params_t *)ctx->api_ctx.p_session_config;
+
+    if (5 == p_param->dolby_vision_profile) {
+        switch (avctx->codec_id) {
+        case AV_CODEC_ID_HEVC:
+            color_primaries = AVCOL_PRI_UNSPECIFIED;
+            color_trc       = AVCOL_TRC_UNSPECIFIED;
+            color_space     = AVCOL_SPC_UNSPECIFIED;
+            p_param->cfg_enc_params.hrdEnable =
+                p_param->cfg_enc_params.EnableAUD        = 1;
+            p_param->cfg_enc_params.forced_header_enable = 1;
+            p_param->cfg_enc_params.videoFullRange       = 1;
+            break;
+        case AV_CODEC_ID_AV1:
+            av_log(avctx, AV_LOG_ERROR,
+                   "dolbyVisionProfile is not supported on av1 encoder.\n");
+            return -1;
+        case AV_CODEC_ID_MJPEG:
+            av_log(avctx, AV_LOG_ERROR,
+                   "dolbyVisionProfile is not supported on jpeg encoder.\n");
+            return -1;
+        case AV_CODEC_ID_H264:
+            av_log(avctx, AV_LOG_ERROR,
+                   "dolbyVisionProfile is not supported on h264 encoder.\n");
+            return -1;
+        default:
+            break;
+        }
+    }
+
+    if (avctx->codec_id != AV_CODEC_ID_MJPEG &&
+        ((5 == p_param->dolby_vision_profile &&
+          AV_CODEC_ID_HEVC == avctx->codec_id) ||
+         color_primaries != AVCOL_PRI_UNSPECIFIED ||
+         color_trc != AVCOL_TRC_UNSPECIFIED ||
+         color_space != AVCOL_SPC_UNSPECIFIED)) {
+        p_param->cfg_enc_params.colorDescPresent = 1;
+        p_param->cfg_enc_params.colorPrimaries   = color_primaries;
+        p_param->cfg_enc_params.colorTrc         = color_trc;
+        p_param->cfg_enc_params.colorSpace       = color_space;
+
+        av_log(avctx, AV_LOG_VERBOSE,
+               "XCoder HDR color info color_primaries: %d "
+               "color_trc: %d  color_space %d\n",
+               color_primaries, color_trc, color_space);
+    }
+    if (avctx->color_range == AVCOL_RANGE_JPEG ||
+        AV_PIX_FMT_YUVJ420P == avctx->pix_fmt ||
+        AV_PIX_FMT_YUVJ420P == avctx->sw_pix_fmt) {
+        p_param->cfg_enc_params.videoFullRange = 1;
+    }
+
+    return 0;
+}
+
+static int xcoder_setup_encoder(AVCodecContext *avctx)
+{
+    XCoderEncContext *s = avctx->priv_data;
+    int i, ret = 0;
+    uint32_t  xcoder_timeout;
+    ni_xcoder_params_t *p_param       = &s->api_param;
+    ni_xcoder_params_t *pparams       = NULL;
+    ni_session_run_state_t prev_state = s->api_ctx.session_run_state;
+
+    av_log(avctx, AV_LOG_VERBOSE, "XCoder setup device encoder\n");
+
+    if (ni_device_session_context_init(&(s->api_ctx)) < 0) {
+        av_log(avctx, AV_LOG_ERROR,
+               "Error XCoder init encoder context failure\n");
+        return AVERROR_EXTERNAL;
+    }
+
+    switch (avctx->codec_id) {
+    case AV_CODEC_ID_HEVC:
+        s->api_ctx.codec_format = NI_CODEC_FORMAT_H265;
+        break;
+    case AV_CODEC_ID_AV1:
+        s->api_ctx.codec_format = NI_CODEC_FORMAT_AV1;
+        break;
+    case AV_CODEC_ID_MJPEG:
+        s->api_ctx.codec_format = NI_CODEC_FORMAT_JPEG;
+        break;
+    default:
+        s->api_ctx.codec_format = NI_CODEC_FORMAT_H264;
+        break;
+    }
+
+    s->api_ctx.session_run_state = prev_state;
+    s->av_rois = NULL;
+    s->firstPktArrived = 0;
+    s->spsPpsArrived = 0;
+    s->spsPpsHdrLen = 0;
+    s->p_spsPpsHdr = NULL;
+    s->xcode_load_pixel = 0;
+    s->reconfigCount = 0;
+    s->latest_dts = 0;
+    s->first_frame_pts = INT_MIN;
+
+    if (SESSION_RUN_STATE_SEQ_CHANGE_DRAINING != s->api_ctx.session_run_state) {
+        av_log(avctx, AV_LOG_INFO, "Session state: %d allocate frame fifo.\n",
+               s->api_ctx.session_run_state);
+        s->fme_fifo = av_fifo_alloc2((size_t) 1, sizeof(AVFrame), 0);
+    } else {
+        av_log(avctx, AV_LOG_INFO, "Session seq change, fifo size: %lu.\n",
+               av_fifo_can_read(s->fme_fifo));
+    }
+
+    if (!s->fme_fifo) {
+        return AVERROR(ENOMEM);
+    }
+    s->eos_fme_received = 0;
+
+    //Xcoder User Configuration
+    ret = ni_encoder_init_default_params(
+        p_param, avctx->framerate.num,
+        avctx->framerate.den, avctx->bit_rate,
+        avctx->width, avctx->height, s->api_ctx.codec_format);
+    switch (ret) {
+    case NI_RETCODE_PARAM_ERROR_WIDTH_TOO_BIG:
+        if (avctx->codec_id == AV_CODEC_ID_AV1 && avctx->width < NI_PARAM_MAX_WIDTH) {
+            // AV1 resolution will be checked again when encoder session open (ni_validate_custom_template) since crop size may meet AV1 resolution constraint (E.g. AV1 tile encode)
+            av_log(avctx, AV_LOG_ERROR, "AV1 Picture Width exceeds %d - picture needs to be cropped:\n",
+                   NI_PARAM_AV1_MAX_WIDTH);
+            ret = NI_RETCODE_SUCCESS;
+        } else {
+            av_log(avctx, AV_LOG_ERROR, "Invalid Picture Width: too big\n");
+            return AVERROR_EXTERNAL;
+        }
+        break;
+    case NI_RETCODE_PARAM_ERROR_WIDTH_TOO_SMALL:
+        av_log(avctx, AV_LOG_ERROR, "Invalid Picture Width: too small\n");
+        return AVERROR_EXTERNAL;
+    case NI_RETCODE_PARAM_ERROR_HEIGHT_TOO_BIG:
+        if (avctx->codec_id == AV_CODEC_ID_AV1) {
+            // AV1 resolution will be checked again when encoder session open (ni_validate_custom_template) since crop size may meet AV1 resolution constraint (E.g. AV1 tile encode)
+            av_log(avctx, AV_LOG_ERROR, "AV1 Picture Height exceeds %d - picture needs to be cropped:\n",
+                   NI_PARAM_AV1_MAX_HEIGHT);
+            ret = NI_RETCODE_SUCCESS;
+        } else {
+            av_log(avctx, AV_LOG_ERROR, "Invalid Picture Height: too big\n");
+            return AVERROR_EXTERNAL;
+        }
+        break;
+    case NI_RETCODE_PARAM_ERROR_HEIGHT_TOO_SMALL:
+        av_log(avctx, AV_LOG_ERROR, "Invalid Picture Height: too small\n");
+        return AVERROR_EXTERNAL;
+    case NI_RETCODE_PARAM_ERROR_AREA_TOO_BIG:
+        if (avctx->codec_id == AV_CODEC_ID_AV1) {
+            // AV1 resolution will be checked again when encoder session open (ni_validate_custom_template) since crop size may meet AV1 resolution constraint (E.g. AV1 tile encode)
+            av_log(avctx, AV_LOG_ERROR, "AV1 Picture Width x Height exceeds %d - picture needs to be cropped:\n",
+                   NI_PARAM_AV1_MAX_AREA);
+            ret = NI_RETCODE_SUCCESS;
+        } else {
+            av_log(avctx, AV_LOG_ERROR,
+                   "Invalid Picture Width x Height: exceeds %d\n",
+                   NI_MAX_RESOLUTION_AREA);
+            return AVERROR_EXTERNAL;
+        }
+        break;
+    case NI_RETCODE_PARAM_ERROR_PIC_WIDTH:
+        av_log(avctx, AV_LOG_ERROR, "Invalid Picture Width\n");
+        return AVERROR_EXTERNAL;
+    case NI_RETCODE_PARAM_ERROR_PIC_HEIGHT:
+        av_log(avctx, AV_LOG_ERROR, "Invalid Picture Height\n");
+        return AVERROR_EXTERNAL;
+    default:
+        if (ret < 0) {
+            av_log(avctx, AV_LOG_ERROR, "Error setting preset or log.\n");
+            av_log(avctx, AV_LOG_INFO, "Possible presets:");
+            for (i = 0; g_xcoder_preset_names[i]; i++)
+                av_log(avctx, AV_LOG_INFO, " %s", g_xcoder_preset_names[i]);
+            av_log(avctx, AV_LOG_INFO, "\n");
+
+            av_log(avctx, AV_LOG_INFO, "Possible log:");
+            for (i = 0; g_xcoder_log_names[i]; i++)
+                av_log(avctx, AV_LOG_INFO, " %s", g_xcoder_log_names[i]);
+            av_log(avctx, AV_LOG_INFO, "\n");
+
+            return AVERROR(EINVAL);
+        }
+        break;
+    }
+
+    av_log(avctx, AV_LOG_INFO, "pix_fmt is %d, sw_pix_fmt is %d resolution %dx%d\n", avctx->pix_fmt, avctx->sw_pix_fmt, avctx->width, avctx->height);
+    if (avctx->pix_fmt != AV_PIX_FMT_NI_QUAD) {
+        av_log(avctx, AV_LOG_INFO, "sw_pix_fmt assigned to pix_fmt was %d, is now %d\n", avctx->pix_fmt, avctx->sw_pix_fmt);
+        avctx->sw_pix_fmt = avctx->pix_fmt;
+    } else {
+        if ((avctx->height >= NI_MIN_HEIGHT) && (avctx->width >= NI_MIN_WIDTH)) {
+            p_param->hwframes = 1;
+        } else if (avctx->sw_pix_fmt == AV_PIX_FMT_NI_QUAD_8_TILE_4X4 ||
+                   avctx->sw_pix_fmt == AV_PIX_FMT_NI_QUAD_10_TILE_4X4) {
+            av_log(avctx, AV_LOG_ERROR, "Invalid Picture Height or Width: too small\n");
+            return AVERROR_EXTERNAL;
+        }
+
+        if (avctx->codec_id == AV_CODEC_ID_MJPEG) {
+            if (avctx->sw_pix_fmt == AV_PIX_FMT_YUVJ420P) {
+                av_log(avctx, AV_LOG_DEBUG, "Pixfmt %s supported in %s encoder\n",
+                       av_get_pix_fmt_name(avctx->sw_pix_fmt), avctx->codec->name);
+            } else if ((avctx->color_range == AVCOL_RANGE_JPEG || avctx->color_range == AVCOL_RANGE_UNSPECIFIED) &&
+                      (avctx->sw_pix_fmt == AV_PIX_FMT_YUV420P || avctx->sw_pix_fmt == AV_PIX_FMT_YUV420P10LE ||
+                       avctx->sw_pix_fmt == AV_PIX_FMT_NV12 || avctx->sw_pix_fmt == AV_PIX_FMT_P010LE)) {
+                av_log(avctx, AV_LOG_DEBUG, "Pixfmt %s supported in %s encoder when color_range is AVCOL_RANGE_JPEG\n",
+                       av_get_pix_fmt_name(avctx->sw_pix_fmt), avctx->codec->name);
+            } else {
+                av_log(avctx, AV_LOG_ERROR, "Pixfmt %s not supported in %s encoder when color_range is %d\n",
+                       av_get_pix_fmt_name(avctx->sw_pix_fmt), avctx->codec->name, avctx->color_range);
+                return AVERROR_INVALIDDATA;
+            }
+        }
+    }
+
+    switch (avctx->sw_pix_fmt) {
+    case AV_PIX_FMT_YUV420P:
+    case AV_PIX_FMT_YUVJ420P:
+        s->api_ctx.pixel_format = NI_PIX_FMT_YUV420P;
+        break;
+    case AV_PIX_FMT_YUV420P10LE:
+        s->api_ctx.pixel_format = NI_PIX_FMT_YUV420P10LE;
+        break;
+    case AV_PIX_FMT_NV12:
+        s->api_ctx.pixel_format = NI_PIX_FMT_NV12;
+        break;
+    case AV_PIX_FMT_P010LE:
+        s->api_ctx.pixel_format = NI_PIX_FMT_P010LE;
+        break;
+    case AV_PIX_FMT_NI_QUAD_8_TILE_4X4:
+        s->api_ctx.pixel_format = NI_PIX_FMT_8_TILED4X4;
+        break;
+    case AV_PIX_FMT_NI_QUAD_10_TILE_4X4:
+        s->api_ctx.pixel_format = NI_PIX_FMT_10_TILED4X4;
+        break;
+    case AV_PIX_FMT_ARGB:
+        s->api_ctx.pixel_format = NI_PIX_FMT_ARGB;
+        break;
+    case AV_PIX_FMT_ABGR:
+        s->api_ctx.pixel_format = NI_PIX_FMT_ABGR;
+        break;
+    case AV_PIX_FMT_RGBA:
+        s->api_ctx.pixel_format = NI_PIX_FMT_RGBA;
+        break;
+    case AV_PIX_FMT_BGRA:
+        s->api_ctx.pixel_format = NI_PIX_FMT_BGRA;
+        break;
+    default:
+        av_log(avctx, AV_LOG_ERROR, "Pixfmt %s not supported in Quadra encoder\n",
+               av_get_pix_fmt_name(avctx->sw_pix_fmt));
+        return AVERROR_INVALIDDATA;
+    }
+
+    if (s->xcoder_opts) {
+        AVDictionary *dict = NULL;
+        AVDictionaryEntry *en = NULL;
+
+        if (!av_dict_parse_string(&dict, s->xcoder_opts, "=", ":", 0)) {
+            while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
+                int parse_ret = ni_encoder_params_set_value(p_param, en->key, en->value);
+                if (parse_ret != NI_RETCODE_SUCCESS) {
+                    switch (parse_ret) {
+                    case NI_RETCODE_PARAM_INVALID_NAME:
+                        av_log(avctx, AV_LOG_ERROR, "Unknown option: %s.\n", en->key);
+                        av_dict_free(&dict);
+                        return AVERROR_EXTERNAL;
+                    case NI_RETCODE_PARAM_ERROR_TOO_BIG:
+                        av_log(avctx, AV_LOG_ERROR, "Invalid %s: too big\n", en->key);
+                        av_dict_free(&dict);
+                        return AVERROR_EXTERNAL;
+                    case NI_RETCODE_PARAM_ERROR_TOO_SMALL:
+                        av_log(avctx, AV_LOG_ERROR, "Invalid %s: too small\n", en->key);
+                        av_dict_free(&dict);
+                        return AVERROR_EXTERNAL;
+                    case NI_RETCODE_PARAM_ERROR_OOR:
+                        av_log(avctx, AV_LOG_ERROR, "Invalid %s: out of range\n",
+                               en->key);
+                        av_dict_free(&dict);
+                        return AVERROR_EXTERNAL;
+                    case NI_RETCODE_PARAM_ERROR_ZERO:
+                        av_log(avctx, AV_LOG_ERROR,
+                               "Error setting option %s to value 0\n", en->key);
+                        av_dict_free(&dict);
+                        return AVERROR_EXTERNAL;
+                    case NI_RETCODE_PARAM_INVALID_VALUE:
+                        av_log(avctx, AV_LOG_ERROR, "Invalid value for %s: %s.\n",
+                               en->key, en->value);
+                        av_dict_free(&dict);
+                        return AVERROR_EXTERNAL;
+                    case NI_RETCODE_PARAM_WARNING_DEPRECATED:
+                        av_log(avctx, AV_LOG_WARNING, "Parameter %s is deprecated\n",
+                               en->key);
+                        break;
+                    default:
+                        av_log(avctx, AV_LOG_ERROR, "Invalid %s: ret %d\n", en->key,
+                               parse_ret);
+                        av_dict_free(&dict);
+                        return AVERROR_EXTERNAL;
+                    }
+                }
+            }
+            av_dict_free(&dict);
+        }
+    }
+
+    if (p_param->enable_vfr) {
+        // in the vfr mode, if the initial framerate is out of [5-120]
+        // think the initial framerate is incorrect, set it to default 30 fps
+        if (p_param->cfg_enc_params.frame_rate < 5 ||
+            p_param->cfg_enc_params.frame_rate > 120) {
+            p_param->cfg_enc_params.frame_rate = 30;
+            s->api_ctx.prev_fps                = 30;
+        } else {
+            s->api_ctx.prev_fps = p_param->cfg_enc_params.frame_rate;
+        }
+        s->api_ctx.last_change_framenum    = 0;
+        s->api_ctx.fps_change_detect_count = 0;
+    }
+
+    av_log(avctx, AV_LOG_DEBUG, "p_param->hwframes = %d\n", p_param->hwframes);
+    if (s->xcoder_gop) {
+        AVDictionary *dict = NULL;
+        AVDictionaryEntry *en = NULL;
+
+        if (!av_dict_parse_string(&dict, s->xcoder_gop, "=", ":", 0)) {
+            if (!gop_params_check(dict, avctx)) {
+                av_dict_free(&dict);
+                return AVERROR_EXTERNAL;
+            }
+
+            while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
+                int parse_ret = ni_encoder_gop_params_set_value(p_param, en->key, en->value);
+                if (parse_ret != NI_RETCODE_SUCCESS) {
+                    switch (parse_ret) {
+                    case NI_RETCODE_PARAM_INVALID_NAME:
+                        av_log(avctx, AV_LOG_ERROR, "Unknown option: %s.\n", en->key);
+                        av_dict_free(&dict);
+                        return AVERROR_EXTERNAL;
+                    case NI_RETCODE_PARAM_ERROR_TOO_BIG:
+                        av_log(avctx, AV_LOG_ERROR,
+                               "Invalid custom GOP parameters: %s too big\n", en->key);
+                        av_dict_free(&dict);
+                        return AVERROR_EXTERNAL;
+                    case NI_RETCODE_PARAM_ERROR_TOO_SMALL:
+                        av_log(avctx, AV_LOG_ERROR,
+                               "Invalid custom GOP parameters: %s too small\n",
+                               en->key);
+                        av_dict_free(&dict);
+                        return AVERROR_EXTERNAL;
+                    case NI_RETCODE_PARAM_ERROR_OOR:
+                        av_log(avctx, AV_LOG_ERROR,
+                               "Invalid custom GOP parameters: %s out of range \n",
+                               en->key);
+                        av_dict_free(&dict);
+                        return AVERROR_EXTERNAL;
+                    case NI_RETCODE_PARAM_ERROR_ZERO:
+                        av_log(avctx, AV_LOG_ERROR,
+                               "Invalid custom GOP paramaters: Error setting option %s "
+                               "to value 0 \n",
+                               en->key);
+                        av_dict_free(&dict);
+                        return AVERROR_EXTERNAL;
+                    case NI_RETCODE_PARAM_INVALID_VALUE:
+                        av_log(avctx, AV_LOG_ERROR,
+                               "Invalid value for GOP param %s: %s.\n", en->key,
+                               en->value);
+                        av_dict_free(&dict);
+                        return AVERROR_EXTERNAL;
+                    case NI_RETCODE_PARAM_WARNING_DEPRECATED:
+                        av_log(avctx, AV_LOG_WARNING, "Parameter %s is deprecated\n",
+                               en->key);
+                        break;
+                    default:
+                        av_log(avctx, AV_LOG_ERROR, "Invalid %s: ret %d\n", en->key,
+                               parse_ret);
+                        av_dict_free(&dict);
+                        return AVERROR_EXTERNAL;
+                    }
+                }
+            }
+            av_dict_free(&dict);
+        }
+    }
+    if (s->nvme_io_size > 0 && s->nvme_io_size % 4096 != 0) {
+        av_log(avctx, AV_LOG_ERROR, "Error XCoder iosize is not 4KB aligned!\n");
+        return AVERROR_EXTERNAL;
+    }
+
+    s->api_ctx.p_session_config = &s->api_param;
+    pparams = (ni_xcoder_params_t *)s->api_ctx.p_session_config;
+    switch (pparams->cfg_enc_params.gop_preset_index) {
+    /* dtsOffset is the max number of non-reference frames in a GOP
+     * (derived from x264/5 algo) In case of IBBBP the first dts of the I
+     * frame should be input_pts-(3*ticks_per_frame) In case of IBP the
+     * first dts of the I frame should be input_pts-(1*ticks_per_frame)
+     * thus we ensure pts>dts in all cases */
+    case 1:
+    case 9:
+    case 10:
+        s->dtsOffset = 0;
+        break;
+    /* ts requires dts/pts of I frame not same when there are B frames in
+       streams */
+    case 3:
+    case 4:
+    case 7:
+        s->dtsOffset = 1;
+        break;
+    case 5:
+        s->dtsOffset = 2;
+        break;
+    case -1: // adaptive GOP
+    case 8:
+        s->dtsOffset = 3;
+        break;
+    default:
+        s->dtsOffset = 7;
+        break;
+    }
+
+    if (pparams->cfg_enc_params.custom_gop_params.custom_gop_size) {
+        int dts_offset = 0;
+        s->dtsOffset   = 0;
+        bool has_b_frame = false;
+        for (int idx = 0;
+             idx < pparams->cfg_enc_params.custom_gop_params.custom_gop_size;
+             idx++) {
+            if (pparams->cfg_enc_params.custom_gop_params.pic_param[idx].poc_offset <
+                idx + 1) {
+                dts_offset = (idx + 1) -
+                            pparams->cfg_enc_params.custom_gop_params.pic_param[idx].
+                            poc_offset;
+                if (s->dtsOffset < dts_offset) {
+                    s->dtsOffset = dts_offset;
+                }
+            }
+
+            if (!has_b_frame &&
+                (pparams->cfg_enc_params.custom_gop_params.pic_param[idx].pic_type ==
+                 PIC_TYPE_B)) {
+                has_b_frame = true;
+            }
+        }
+
+        if (has_b_frame && !s->dtsOffset) {
+            s->dtsOffset = 1;
+        }
+    }
+    av_log(avctx, AV_LOG_VERBOSE, "dts offset set to %ld\n", s->dtsOffset);
+
+    s->total_frames_received = 0;
+    s->gop_offset_count = 0;
+    av_log(avctx, AV_LOG_INFO, "dts offset: %ld, gop_offset_count: %d\n",
+           s->dtsOffset, s->gop_offset_count);
+
+    //overwrite the nvme io size here with a custom value if it was provided
+    if (s->nvme_io_size > 0) {
+        s->api_ctx.max_nvme_io_size = s->nvme_io_size;
+        av_log(avctx, AV_LOG_VERBOSE, "Custom NVME IO Size set to = %u\n",
+               s->api_ctx.max_nvme_io_size);
+        av_log(avctx, AV_LOG_INFO, "Encoder user specified NVMe IO Size set to: %u\n",
+               s->api_ctx.max_nvme_io_size);
+    }
+
+    // overwrite keep alive timeout value here with a custom value if it was
+    // provided
+    // if xcoder option is set then overwrite the (legacy) decoder option
+    xcoder_timeout = s->api_param.cfg_enc_params.keep_alive_timeout;
+    if (xcoder_timeout != NI_DEFAULT_KEEP_ALIVE_TIMEOUT) {
+        s->api_ctx.keep_alive_timeout = xcoder_timeout;
+    } else {
+        s->api_ctx.keep_alive_timeout = s->keep_alive_timeout;
+    }
+    av_log(avctx, AV_LOG_VERBOSE, "Custom NVME Keep Alive Timeout set to = %d\n",
+           s->api_ctx.keep_alive_timeout);
+
+    s->encoder_eof = 0;
+    avctx->bit_rate = pparams->bitrate;
+
+    s->api_ctx.src_bit_depth = 8;
+    s->api_ctx.src_endian = NI_FRAME_LITTLE_ENDIAN;
+    s->api_ctx.roi_len = 0;
+    s->api_ctx.roi_avg_qp = 0;
+    s->api_ctx.bit_depth_factor = 1;
+    if (AV_PIX_FMT_YUV420P10BE == avctx->sw_pix_fmt ||
+        AV_PIX_FMT_YUV420P10LE == avctx->sw_pix_fmt ||
+        AV_PIX_FMT_P010LE == avctx->sw_pix_fmt ||
+        AV_PIX_FMT_NI_QUAD_10_TILE_4X4 == avctx->sw_pix_fmt) {
+        s->api_ctx.bit_depth_factor = 2;
+        s->api_ctx.src_bit_depth = 10;
+        if (AV_PIX_FMT_YUV420P10BE == avctx->sw_pix_fmt) {
+            s->api_ctx.src_endian = NI_FRAME_BIG_ENDIAN;
+        }
+    }
+    switch (avctx->sw_pix_fmt) {
+        case AV_PIX_FMT_NV12:
+        case AV_PIX_FMT_P010LE:
+            pparams->cfg_enc_params.planar = NI_PIXEL_PLANAR_FORMAT_SEMIPLANAR;
+            break;
+        case AV_PIX_FMT_NI_QUAD_8_TILE_4X4:
+        case AV_PIX_FMT_NI_QUAD_10_TILE_4X4:
+            pparams->cfg_enc_params.planar = NI_PIXEL_PLANAR_FORMAT_TILED4X4;
+            break;
+        default:
+            pparams->cfg_enc_params.planar = NI_PIXEL_PLANAR_FORMAT_PLANAR;
+            break;
+    }
+
+    if (1) {
+        s->freeHead = 0;
+        s->freeTail = 0;
+        for (i = 0; i < MAX_NUM_FRAMEPOOL_HWAVFRAME; i++) {
+            s->sframe_pool[i] = av_frame_alloc();
+            if (!s->sframe_pool[i]) {
+                return AVERROR(ENOMEM);
+            }
+            s->aFree_Avframes_list[i] = i;
+            s->freeTail++;
+        }
+        s->aFree_Avframes_list[i] = -1;
+    }
+
+    // init HDR SEI stuff
+    s->api_ctx.sei_hdr_content_light_level_info_len =
+        s->api_ctx.light_level_data_len =
+        s->api_ctx.sei_hdr_mastering_display_color_vol_len =
+        s->api_ctx.mdcv_max_min_lum_data_len = 0;
+    s->api_ctx.p_master_display_meta_data = NULL;
+
+    memset( &(s->api_fme), 0, sizeof(ni_session_data_io_t) );
+    memset( &(s->api_pkt), 0, sizeof(ni_session_data_io_t) );
+
+    s->api_pkt.data.packet.av1_buffer_index = 0;
+
+    //validate encoded bitstream headers struct for encoder open
+    if (xcoder_encoder_header_check_set(avctx) < 0) {
+        return AVERROR_EXTERNAL;
+    }
+
+    // aspect ratio
+    // Use the value passed in from FFmpeg if aspect ratio from xcoder-params have default values
+    if ((p_param->cfg_enc_params.aspectRatioWidth == 0) && (p_param->cfg_enc_params.aspectRatioHeight == 1)) {
+        p_param->cfg_enc_params.aspectRatioWidth  = avctx->sample_aspect_ratio.num;
+        p_param->cfg_enc_params.aspectRatioHeight = avctx->sample_aspect_ratio.den;
+    }
+
+    // generate encoded bitstream headers in advance if configured to do so
+    if ((avctx->codec_id != AV_CODEC_ID_MJPEG) &&
+        (s->gen_global_headers == 1 ||
+         ((avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) &&
+          (s->gen_global_headers == GEN_GLOBAL_HEADERS_AUTO)))) {
+        ret = xcoder_encoder_headers(avctx);
+    }
+
+    // original resolution this stream started with, this is used by encoder sequence change
+    s->api_ctx.ori_width = avctx->width;
+    s->api_ctx.ori_height = avctx->height;
+    s->api_ctx.ori_bit_depth_factor = s->api_ctx.bit_depth_factor;
+    s->api_ctx.ori_pix_fmt = s->api_ctx.pixel_format;
+
+    av_log(avctx, AV_LOG_INFO, "xcoder_setup_encoder "
+           "sw_pix_fmt %d ori_pix_fmt %d\n",
+           avctx->sw_pix_fmt, s->api_ctx.ori_pix_fmt);
+
+    s->api_ctx.ori_luma_linesize = 0;
+    s->api_ctx.ori_chroma_linesize = 0;
+
+    return ret;
+}
+
+av_cold int ff_xcoder_encode_init(AVCodecContext *avctx)
+{
+    XCoderEncContext *ctx = avctx->priv_data;
+    AVHWFramesContext *avhwf_ctx;
+    int ret;
+    ni_log_set_level(ff_to_ni_log_level(av_log_get_level()));
+
+    av_log(avctx, AV_LOG_VERBOSE, "XCoder encode init\n");
+
+    if (ctx->api_ctx.session_run_state == SESSION_RUN_STATE_SEQ_CHANGE_DRAINING) {
+        ctx->dev_enc_idx = ctx->orig_dev_enc_idx;
+    } else {
+        ctx->orig_dev_enc_idx = ctx->dev_enc_idx;
+    }
+
+    if ((ret = xcoder_setup_encoder(avctx)) < 0) {
+        ff_xcoder_encode_close(avctx);
+        return ret;
+    }
+
+    if (!avctx->hw_device_ctx) {
+        if (avctx->hw_frames_ctx) {
+            avhwf_ctx = (AVHWFramesContext *)avctx->hw_frames_ctx->data;
+            avctx->hw_device_ctx = av_buffer_ref(avhwf_ctx->device_ref);
+        }
+    }
+
+    return 0;
+}
+
+int ff_xcoder_encode_close(AVCodecContext *avctx)
+{
+    XCoderEncContext *ctx = avctx->priv_data;
+    ni_retcode_t ret = NI_RETCODE_FAILURE;
+    int i;
+
+    for (i = 0; i < MAX_NUM_FRAMEPOOL_HWAVFRAME; i++) {
+        av_frame_free(&(ctx->sframe_pool[i])); //any remaining stored AVframes that have not been unref will die here
+        ctx->sframe_pool[i] = NULL;
+    }
+
+    ret = ni_device_session_close(&ctx->api_ctx, ctx->encoder_eof,
+                                  NI_DEVICE_TYPE_ENCODER);
+    if (NI_RETCODE_SUCCESS != ret) {
+        av_log(avctx, AV_LOG_ERROR, "Failed to close Encoder Session (status = %d)\n", ret);
+    }
+
+    av_log(avctx, AV_LOG_VERBOSE, "XCoder encode close: session_run_state %d\n", ctx->api_ctx.session_run_state);
+    if (ctx->api_ctx.session_run_state != SESSION_RUN_STATE_SEQ_CHANGE_DRAINING) {
+        av_log(avctx, AV_LOG_VERBOSE, "XCoder encode close: close blk_io_handle %d device_handle %d\n", ctx->api_ctx.blk_io_handle, ctx->api_ctx.device_handle);
+#ifdef _WIN32
+        ni_device_close(ctx->api_ctx.device_handle);
+#elif __linux__
+        ni_device_close(ctx->api_ctx.device_handle);
+        ni_device_close(ctx->api_ctx.blk_io_handle);
+#endif
+        ctx->api_ctx.device_handle = NI_INVALID_DEVICE_HANDLE;
+        ctx->api_ctx.blk_io_handle = NI_INVALID_DEVICE_HANDLE;
+        ctx->api_ctx.auto_dl_handle = NI_INVALID_DEVICE_HANDLE;
+        ctx->api_ctx.sender_handle = NI_INVALID_DEVICE_HANDLE;
+    }
+
+    av_log(avctx, AV_LOG_VERBOSE, "XCoder encode close (status = %d)\n", ret);
+
+    if (ctx->api_fme.data.frame.buffer_size
+        || ctx->api_fme.data.frame.metadata_buffer_size
+        || ctx->api_fme.data.frame.start_buffer_size) {
+        ni_frame_buffer_free(&(ctx->api_fme.data.frame));
+    }
+    ni_packet_buffer_free(&(ctx->api_pkt.data.packet));
+    if (AV_CODEC_ID_AV1 == avctx->codec_id &&
+        ctx->api_pkt.data.packet.av1_buffer_index)
+        ni_packet_buffer_free_av1(&(ctx->api_pkt.data.packet));
+
+    av_log(avctx, AV_LOG_DEBUG, "fifo num frames: %lu\n",
+    av_fifo_can_read(ctx->fme_fifo));
+    if (ctx->api_ctx.session_run_state != SESSION_RUN_STATE_SEQ_CHANGE_DRAINING) {
+        av_fifo_freep2(&ctx->fme_fifo);
+        av_log(avctx, AV_LOG_DEBUG, " , freed.\n");
+    } else {
+        av_log(avctx, AV_LOG_DEBUG, " , kept.\n");
+    }
+
+    ni_device_session_context_clear(&ctx->api_ctx);
+
+    ni_rsrc_free_device_context(ctx->rsrc_ctx);
+    ctx->rsrc_ctx = NULL;
+
+    ni_memfree(ctx->av_rois);
+    av_freep(&ctx->p_spsPpsHdr);
+
+    if (avctx->hw_device_ctx) {
+        av_buffer_unref(&avctx->hw_device_ctx);
+    }
+    ctx->started = 0;
+
+    return 0;
+}
+
+static int xcoder_encode_sequence_change(AVCodecContext *avctx, int width, int height, int bit_depth_factor)
+{
+    XCoderEncContext *ctx = avctx->priv_data;
+    ni_retcode_t ret = NI_RETCODE_FAILURE;
+    ni_xcoder_params_t *p_param = &ctx->api_param;
+    ni_xcoder_params_t *pparams = (ni_xcoder_params_t *)ctx->api_ctx.p_session_config;
+
+    av_log(avctx, AV_LOG_VERBOSE, "XCoder encode sequence change: session_run_state %d\n", ctx->api_ctx.session_run_state);
+
+    ret = ni_device_session_sequence_change(&ctx->api_ctx, width, height, bit_depth_factor, NI_DEVICE_TYPE_ENCODER);
+
+    if (NI_RETCODE_SUCCESS != ret) {
+        av_log(avctx, AV_LOG_ERROR, "Failed to send Sequence Change to Encoder Session (status = %d)\n", ret);
+        return ret;
+    }
+
+    // update AvCodecContext
+    if (avctx->pix_fmt != AV_PIX_FMT_NI_QUAD) {
+        av_log(avctx, AV_LOG_INFO, "sw_pix_fmt assigned to pix_fmt was %d, is now %d\n", avctx->pix_fmt, avctx->sw_pix_fmt);
+        avctx->sw_pix_fmt = avctx->pix_fmt;
+    } else {
+        if ((avctx->height >= NI_MIN_HEIGHT) && (avctx->width >= NI_MIN_WIDTH)) {
+            p_param->hwframes = 1;
+        }
+    }
+
+    switch (avctx->sw_pix_fmt) {
+        case AV_PIX_FMT_YUV420P:
+        case AV_PIX_FMT_YUVJ420P:
+        case AV_PIX_FMT_YUV420P10LE:
+        case AV_PIX_FMT_NV12:
+        case AV_PIX_FMT_P010LE:
+        case AV_PIX_FMT_NI_QUAD_8_TILE_4X4:
+        case AV_PIX_FMT_NI_QUAD_10_TILE_4X4:
+        case AV_PIX_FMT_ARGB:
+        case AV_PIX_FMT_ABGR:
+        case AV_PIX_FMT_RGBA:
+        case AV_PIX_FMT_BGRA:
+            break;
+        case AV_PIX_FMT_YUV420P12:
+        case AV_PIX_FMT_YUV422P:
+        case AV_PIX_FMT_YUV422P10:
+        case AV_PIX_FMT_YUV422P12:
+        case AV_PIX_FMT_GBRP:
+        case AV_PIX_FMT_GBRP10:
+        case AV_PIX_FMT_GBRP12:
+        case AV_PIX_FMT_YUV444P:
+        case AV_PIX_FMT_YUV444P10:
+        case AV_PIX_FMT_YUV444P12:
+        case AV_PIX_FMT_GRAY8:
+        case AV_PIX_FMT_GRAY10:
+        case AV_PIX_FMT_GRAY12:
+        default:
+            return AVERROR_INVALIDDATA;
+            break;
+    }
+
+    // update session context
+    ctx->api_ctx.bit_depth_factor = bit_depth_factor;
+    ctx->api_ctx.src_bit_depth = (bit_depth_factor == 1) ? 8 : 10;
+    ctx->api_ctx.src_endian = (AV_PIX_FMT_YUV420P10BE == avctx->sw_pix_fmt) ? NI_FRAME_BIG_ENDIAN : NI_FRAME_LITTLE_ENDIAN;
+    ctx->api_ctx.ready_to_close = 0;
+    ctx->api_ctx.frame_num = 0; // need to reset frame_num because pkt_num is set to 1 when header received after sequnce change, and low delay mode compares frame_num and pkt_num
+    ctx->api_ctx.pkt_num = 0; // also need to reset pkt_num because before header received, pkt_num > frame_num will also cause low delay mode stuck
+    ctx->api_pkt.data.packet.end_of_stream = 0;
+
+    switch (avctx->sw_pix_fmt) {
+        case AV_PIX_FMT_NV12:
+        case AV_PIX_FMT_P010LE:
+            pparams->cfg_enc_params.planar = NI_PIXEL_PLANAR_FORMAT_SEMIPLANAR;
+            break;
+        case AV_PIX_FMT_NI_QUAD_8_TILE_4X4:
+        case AV_PIX_FMT_NI_QUAD_10_TILE_4X4:
+            pparams->cfg_enc_params.planar = NI_PIXEL_PLANAR_FORMAT_TILED4X4;
+            break;
+        default:
+            pparams->cfg_enc_params.planar = NI_PIXEL_PLANAR_FORMAT_PLANAR;
+            break;
+    }
+    return ret;
+}
+
+static int xcoder_encode_reset(AVCodecContext *avctx)
+{
+    av_log(avctx, AV_LOG_WARNING, "XCoder encode reset\n");
+    ff_xcoder_encode_close(avctx);
+    return ff_xcoder_encode_init(avctx);
+}
+
+// frame fifo operations
+static int is_input_fifo_empty(XCoderEncContext *s)
+{
+    if (!s->fme_fifo) {
+        return 1;
+    }
+    return av_fifo_can_read(s->fme_fifo) ? 0 : 1;
+}
+
+static int enqueue_frame(AVCodecContext *avctx, const AVFrame *inframe)
+{
+    XCoderEncContext *ctx = avctx->priv_data;
+    size_t nb_elems;
+    int ret = 0;
+
+    // expand frame buffer fifo if not enough space
+    if (!av_fifo_can_write(ctx->fme_fifo)) {
+        if (av_fifo_can_read(ctx->fme_fifo) >= NI_MAX_FIFO_CAPACITY) {
+            av_log(avctx, AV_LOG_ERROR, "Encoder frame buffer fifo capacity (%lu) reached maximum (%d)\n",
+                   av_fifo_can_read(ctx->fme_fifo), NI_MAX_FIFO_CAPACITY);
+            return AVERROR_EXTERNAL;
+        }
+
+        ret = av_fifo_grow2(ctx->fme_fifo, (size_t) 1);
+        if (ret < 0) {
+            av_log(avctx, AV_LOG_ERROR, "Cannot grow FIFO: out of memory\n");
+            return ret;
+        }
+
+        nb_elems = av_fifo_can_read(ctx->fme_fifo) + av_fifo_can_write(ctx->fme_fifo);
+        if ((nb_elems % 100) == 0) {
+            av_log(avctx, AV_LOG_INFO, "Enc fifo being extended to: %lu\n", nb_elems);
+        }
+    }
+
+    if (inframe == &ctx->buffered_fme) {
+        av_fifo_write(ctx->fme_fifo, (void *)inframe, (size_t) 1);
+    } else {
+        AVFrame temp_frame;
+        memset(&temp_frame, 0, sizeof(AVFrame));
+        // In case double free for external input frame and our buffered frame.
+        av_frame_ref(&temp_frame, inframe);
+        av_fifo_write(ctx->fme_fifo, &temp_frame, 1);
+    }
+
+    av_log(avctx, AV_LOG_DEBUG, "fme queued, fifo num frames: %lu\n",
+           av_fifo_can_read(ctx->fme_fifo));
+    return ret;
+}
+
+static int xcoder_send_frame(AVCodecContext *avctx, const AVFrame *frame)
+{
+    XCoderEncContext *ctx = avctx->priv_data;
+    bool ishwframe;
+    bool isnv12frame;
+    bool alignment_2pass_wa;
+    int format_in_use;
+    int ret = 0;
+    int sent;
+    int orig_avctx_width = avctx->width;
+    int orig_avctx_height = avctx->height;
+    ni_xcoder_params_t *p_param;
+    int need_to_copy = 1;
+    AVHWFramesContext *avhwf_ctx;
+    AVNIFramesContext *nif_src_ctx;
+    AVFrameSideData *side_data;
+    const AVFrame *first_frame = NULL;
+    // employ a ni_frame_t as a data holder to convert/prepare for side data
+    // of the passed in frame
+    ni_frame_t dec_frame    = {0};
+    ni_aux_data_t *aux_data = NULL;
+    // data buffer for various SEI: HDR mastering display color volume, HDR
+    // content light level, close caption, User data unregistered, HDR10+ etc.
+    int send_sei_with_idr;
+    uint8_t mdcv_data[NI_MAX_SEI_DATA];
+    uint8_t cll_data[NI_MAX_SEI_DATA];
+    uint8_t cc_data[NI_MAX_SEI_DATA];
+    uint8_t udu_data[NI_MAX_SEI_DATA];
+    uint8_t hdrp_data[NI_MAX_SEI_DATA];
+
+    av_log(avctx, AV_LOG_VERBOSE, "XCoder send frame\n");
+
+    p_param = (ni_xcoder_params_t *) ctx->api_ctx.p_session_config;
+    alignment_2pass_wa = ((p_param->cfg_enc_params.lookAheadDepth ||
+                           p_param->cfg_enc_params.crf >= 0 ||
+                           p_param->cfg_enc_params.crfFloat >= 0) &&
+                          (avctx->codec_id == AV_CODEC_ID_HEVC ||
+                           avctx->codec_id == AV_CODEC_ID_AV1));
+
+    // leave encoder instance open to when the first frame buffer arrives so that
+    // its stride size is known and handled accordingly.
+    if (ctx->started == 0) {
+        if (!is_input_fifo_empty(ctx)) {
+            av_log(avctx, AV_LOG_VERBOSE, "first frame: use fme from fifo peek\n");
+            av_fifo_peek(ctx->fme_fifo, &ctx->buffered_fme, 1, 0);
+            ctx->buffered_fme.extended_data = ctx->buffered_fme.data;
+            first_frame = &ctx->buffered_fme;
+
+        } else if (frame) {
+            av_log(avctx, AV_LOG_VERBOSE, "first frame: use input frame\n");
+            first_frame = frame;
+        } else {
+            av_log(avctx, AV_LOG_ERROR, "first frame: NULL is unexpected!\n");
+        }
+    } else if (ctx->api_ctx.session_run_state == SESSION_RUN_STATE_SEQ_CHANGE_OPENING) {
+        if (!is_input_fifo_empty(ctx)) {
+            av_log(avctx, AV_LOG_VERBOSE, "first frame: use fme from fifo peek\n");
+            av_fifo_peek(ctx->fme_fifo, &ctx->buffered_fme, 1, 0);
+            ctx->buffered_fme.extended_data = ctx->buffered_fme.data;
+            first_frame = &ctx->buffered_fme;
+        } else {
+            av_log(avctx, AV_LOG_ERROR, "No buffered frame - Sequence Change Fail");
+            ret = AVERROR_EXTERNAL;
+            return ret;
+        }
+    }
+
+    if (first_frame && ctx->started == 0) {
+        // if frame stride size is not as we expect it,
+        // adjust using xcoder-params conf_win_right
+        int linesize_aligned = first_frame->width;
+        int height_aligned = first_frame->height;
+        ishwframe = first_frame->format == AV_PIX_FMT_NI_QUAD;
+
+        if (linesize_aligned < NI_MIN_WIDTH) {
+            p_param->cfg_enc_params.conf_win_right +=
+                (NI_MIN_WIDTH - first_frame->width) / 2 * 2;
+            linesize_aligned = NI_MIN_WIDTH;
+        } else {
+            if (avctx->sw_pix_fmt == AV_PIX_FMT_NI_QUAD_8_TILE_4X4 ||
+                avctx->sw_pix_fmt == AV_PIX_FMT_NI_QUAD_10_TILE_4X4) {
+                linesize_aligned = FFALIGN(first_frame->width, 4);
+                p_param->cfg_enc_params.conf_win_right +=
+                    (linesize_aligned - first_frame->width) / 2 * 2;
+            } else {
+                linesize_aligned = FFALIGN(first_frame->width, 2);
+                p_param->cfg_enc_params.conf_win_right +=
+                    (linesize_aligned - first_frame->width) / 2 * 2;
+            }
+        }
+        p_param->source_width = linesize_aligned;
+
+        if (height_aligned < NI_MIN_HEIGHT) {
+            p_param->cfg_enc_params.conf_win_bottom +=
+                (NI_MIN_HEIGHT - first_frame->height) / 2 * 2;
+            height_aligned = NI_MIN_HEIGHT;
+        } else {
+            if (avctx->sw_pix_fmt == AV_PIX_FMT_NI_QUAD_8_TILE_4X4 ||
+                avctx->sw_pix_fmt == AV_PIX_FMT_NI_QUAD_10_TILE_4X4) {
+                height_aligned = FFALIGN(first_frame->height, 4);
+                p_param->cfg_enc_params.conf_win_bottom +=
+                    (height_aligned - first_frame->height) / 4 * 4;
+            } else {
+                height_aligned = FFALIGN(first_frame->height, 2);
+                p_param->cfg_enc_params.conf_win_bottom +=
+                    (height_aligned - first_frame->height) / 2 * 2;
+            }
+        }
+        p_param->source_height = height_aligned;
+
+        av_log(avctx, AV_LOG_DEBUG,
+               "color primaries (%u %u) colorspace (%u %u) color_range (%u %u)\n",
+               avctx->color_primaries, first_frame->color_primaries,
+               avctx->colorspace, first_frame->colorspace,
+               avctx->color_range, first_frame->color_range);
+
+        if (avctx->color_primaries == AVCOL_PRI_UNSPECIFIED) {
+            avctx->color_primaries = first_frame->color_primaries;
+        }
+        if (avctx->color_trc == AVCOL_TRC_UNSPECIFIED) {
+            avctx->color_trc = first_frame->color_trc;
+        }
+        if (avctx->colorspace == AVCOL_SPC_UNSPECIFIED) {
+            avctx->colorspace = first_frame->colorspace;
+        }
+        avctx->color_range = first_frame->color_range;
+
+        if (xcoder_encoder_header_check_set(avctx) < 0) {
+            return AVERROR_EXTERNAL;
+        }
+
+        av_log(avctx, AV_LOG_VERBOSE,
+               "XCoder frame->linesize: %d/%d/%d frame width/height %dx%d"
+               " conf_win_right %d  conf_win_bottom %d , color primaries %u trc %u "
+               "space %u format %d\n",
+               first_frame->linesize[0], first_frame->linesize[1],
+               first_frame->linesize[2], first_frame->width, first_frame->height,
+               p_param->cfg_enc_params.conf_win_right,
+               p_param->cfg_enc_params.conf_win_bottom,
+               first_frame->color_primaries, first_frame->color_trc,
+               first_frame->colorspace, first_frame->format);
+
+        if (SESSION_RUN_STATE_SEQ_CHANGE_OPENING != ctx->api_ctx.session_run_state) {
+            // sequence change backup / restore encoder device handles, hw_id and
+            // block device name, so no need to overwrite hw_id/blk_dev_name to user
+            // set values
+            ctx->api_ctx.hw_id = ctx->dev_enc_idx;
+            if (ctx->dev_xcoder)
+                av_strlcpy(ctx->api_ctx.dev_xcoder_name, ctx->dev_xcoder, MAX_CHAR_IN_DEVICE_NAME);
+            if (ctx->dev_blk_name)
+                av_strlcpy(ctx->api_ctx.blk_dev_name, ctx->dev_blk_name, NI_MAX_DEVICE_NAME_LEN);
+        }
+
+        p_param->cfg_enc_params.enable_acq_limit = 1;
+        p_param->rootBufId = (ishwframe) ? ((niFrameSurface1_t*)((uint8_t*)first_frame->data[3]))->ui16FrameIdx : 0;
+        if (ishwframe) {
+            ctx->api_ctx.hw_action = NI_CODEC_HW_ENABLE;
+            ctx->api_ctx.sender_handle = (ni_device_handle_t)(
+                (int64_t)(((niFrameSurface1_t *)((uint8_t *)first_frame->data[3]))
+                            ->device_handle));
+        }
+
+        if (first_frame->hw_frames_ctx && ctx->api_ctx.hw_id == -1 &&
+            0 == strcmp(ctx->api_ctx.blk_dev_name, "")) {
+            ctx->api_ctx.hw_id = ni_get_cardno(first_frame);
+            av_log(avctx, AV_LOG_VERBOSE,
+                   "xcoder_send_frame: hw_id -1, empty blk_dev_name, collocated "
+                   "to %d\n",
+                   ctx->api_ctx.hw_id);
+        }
+
+        // AUD insertion has to be handled differently in the firmware
+        // if it is global header
+        if (p_param->cfg_enc_params.EnableAUD) {
+            if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
+                p_param->cfg_enc_params.EnableAUD = NI_ENABLE_AUD_FOR_GLOBAL_HEADER;
+            }
+
+            av_log(avctx, AV_LOG_VERBOSE,
+                   "%s: EnableAUD %d global header flag %d\n", __FUNCTION__,
+                   (p_param->cfg_enc_params.EnableAUD),
+                   (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) ? 1 : 0);
+        }
+
+        // config linesize for zero copy (if input resolution is zero copy compatible)
+        ni_encoder_frame_zerocopy_check(&ctx->api_ctx,
+            p_param, first_frame->width, first_frame->height,
+            first_frame->linesize, true);
+
+        ret = ni_device_session_open(&ctx->api_ctx, NI_DEVICE_TYPE_ENCODER);
+
+        // As the file handle may change we need to assign back
+        ctx->dev_xcoder_name = ctx->api_ctx.dev_xcoder_name;
+        ctx->blk_xcoder_name = ctx->api_ctx.blk_xcoder_name;
+        ctx->dev_enc_idx = ctx->api_ctx.hw_id;
+
+        switch (ret) {
+        case NI_RETCODE_SUCCESS:
+            av_log(avctx, AV_LOG_VERBOSE,
+                   "XCoder %s.%d (inst: %d) opened successfully\n",
+                   ctx->dev_xcoder_name, ctx->dev_enc_idx, ctx->api_ctx.session_id);
+            break;
+        case NI_RETCODE_INVALID_PARAM:
+            av_log(avctx, AV_LOG_ERROR,
+                   "Failed to open encoder (status = %d), invalid parameter values "
+                   "given: %s\n",
+                   ret, ctx->api_ctx.param_err_msg);
+            ret = AVERROR_EXTERNAL;
+            return ret;
+        default:
+            av_log(avctx, AV_LOG_ERROR,
+                   "Failed to open encoder (status = %d), "
+                   "resource unavailable\n",
+                   ret);
+            // for FFmpeg >= 6.1 sequence change session open fail: unlike previous
+            // FFmpeg versions which terminate streams and codecs right away
+            // by calling exit_program after submit_encode_frame returns error,
+            // FFmpeg 6.1 calls enc_flush after error, which enters this function again,
+            // but the buffered frame would have been unref'ed by then, and therefore
+            // we must remove buffered frame upon session open fail to prevent
+            // accessing or unref'ing invalid frame
+            if (SESSION_RUN_STATE_SEQ_CHANGE_DRAINING !=
+                ctx->api_ctx.session_run_state) {
+                if (! is_input_fifo_empty(ctx)) {
+                    av_fifo_drain2(ctx->fme_fifo, (size_t) 1);
+                    av_log(avctx, AV_LOG_DEBUG, "fme popped, fifo num frames: %lu\n",
+                           av_fifo_can_read(ctx->fme_fifo));
+                }
+            }
+            ret = AVERROR_EXTERNAL;
+            return ret;
+        }
+
+        // set up ROI map if in ROI demo mode
+        // Note: this is for demo purpose, and its direct access to QP map in
+        //       session context is not the usual way to do ROI; the normal way is
+        //       through side data of AVFrame in libavcodec, or aux data of ni_frame
+        //       in libxcoder
+        if (p_param->cfg_enc_params.roi_enable &&
+            (1 == p_param->roi_demo_mode || 2 == p_param->roi_demo_mode)) {
+            if (ni_set_demo_roi_map(&ctx->api_ctx) < 0) {
+                return AVERROR(ENOMEM);
+            }
+        }
+    } //end if (first_frame && ctx->started == 0)
+
+    if (ctx->encoder_flushing) {
+        if (! frame && is_input_fifo_empty(ctx)) {
+            av_log(avctx, AV_LOG_DEBUG, "XCoder EOF: null frame && fifo empty\n");
+            return AVERROR_EOF;
+        }
+    }
+
+    if (! frame) {
+        if (is_input_fifo_empty(ctx)) {
+            ctx->eos_fme_received = 1;
+            av_log(avctx, AV_LOG_DEBUG, "null frame, eos_fme_received = 1\n");
+        } else {
+            avctx->internal->draining = 0;
+            av_log(avctx, AV_LOG_DEBUG, "null frame, but fifo not empty, clear draining = 0\n");
+        }
+    } else {
+        av_log(avctx, AV_LOG_DEBUG, "XCoder send frame #%"PRIu64"\n",
+               ctx->api_ctx.frame_num);
+
+        // queue up the frame if fifo is NOT empty, or: sequence change ongoing !
+        if (! is_input_fifo_empty(ctx) ||
+            SESSION_RUN_STATE_SEQ_CHANGE_DRAINING == ctx->api_ctx.session_run_state) {
+            ret = enqueue_frame(avctx, frame);
+            if (ret < 0) {
+                return ret;
+            }
+
+            if (SESSION_RUN_STATE_SEQ_CHANGE_DRAINING ==
+                ctx->api_ctx.session_run_state) {
+                av_log(avctx, AV_LOG_TRACE, "XCoder doing sequence change, frame #%"PRIu64" "
+                       "queued and return 0 !\n", ctx->api_ctx.frame_num);
+                return 0;
+            }
+        } else if (frame != &ctx->buffered_fme) {
+            ret = av_frame_ref(&ctx->buffered_fme, frame);
+        }
+    }
+
+resend:
+
+    if (ctx->started == 0) {
+        ctx->api_fme.data.frame.start_of_stream = 1;
+        ctx->started = 1;
+    } else if (ctx->api_ctx.session_run_state == SESSION_RUN_STATE_SEQ_CHANGE_OPENING) {
+        ctx->api_fme.data.frame.start_of_stream = 1;
+    } else {
+        ctx->api_fme.data.frame.start_of_stream = 0;
+    }
+
+    if (is_input_fifo_empty(ctx)) {
+        av_log(avctx, AV_LOG_DEBUG,
+               "no frame in fifo to send, just send/receive ..\n");
+        if (ctx->eos_fme_received) {
+            av_log(avctx, AV_LOG_DEBUG,
+                   "no frame in fifo to send, send eos ..\n");
+        }
+    } else {
+        av_log(avctx, AV_LOG_DEBUG, "fifo peek fme\n");
+        av_fifo_peek(ctx->fme_fifo, &ctx->buffered_fme, 1, 0);
+        ctx->buffered_fme.extended_data = ctx->buffered_fme.data;
+    }
+
+    if (!ctx->eos_fme_received) {
+        int8_t bit_depth = 1;
+        ishwframe        = ctx->buffered_fme.format == AV_PIX_FMT_NI_QUAD;
+        if (ishwframe) {
+            // Superframe early cleanup of unused outputs
+            niFrameSurface1_t *pOutExtra;
+            if (ctx->buffered_fme.buf[1]) {
+                // NOLINTNEXTLINE(clang-diagnostic-incompatible-pointer-types)
+                pOutExtra= (niFrameSurface1_t *)ctx->buffered_fme.buf[1]->data;
+                if (pOutExtra->ui16FrameIdx != 0) {
+                    av_log(avctx, AV_LOG_DEBUG, "Unref unused index %d\n",
+                           pOutExtra->ui16FrameIdx);
+                } else {
+                    av_log(avctx, AV_LOG_ERROR,
+                           "ERROR: Should not be getting superframe with dead "
+                           "outputs\n");
+                }
+                av_buffer_unref(&ctx->buffered_fme.buf[1]);
+                if (ctx->buffered_fme.buf[2]) {
+                    // NOLINTNEXTLINE(clang-diagnostic-incompatible-pointer-types)
+                    pOutExtra = (niFrameSurface1_t *)ctx->buffered_fme.buf[2]->data;
+                    if (pOutExtra->ui16FrameIdx != 0) {
+                        av_log(avctx, AV_LOG_DEBUG, "Unref unused index %d\n",
+                               pOutExtra->ui16FrameIdx);
+                    } else {
+                        av_log(
+                            avctx, AV_LOG_ERROR,
+                            "ERROR: Should not be getting superframe with dead "
+                            "outputs\n");
+                    }
+                    av_buffer_unref(&ctx->buffered_fme.buf[2]);
+                }
+            }
+            pOutExtra = (niFrameSurface1_t *)ctx->buffered_fme.data[3];
+            if (ctx->api_ctx.pixel_format == NI_PIX_FMT_ARGB
+                || ctx->api_ctx.pixel_format == NI_PIX_FMT_ABGR
+                || ctx->api_ctx.pixel_format == NI_PIX_FMT_RGBA
+                || ctx->api_ctx.pixel_format == NI_PIX_FMT_BGRA) {
+                bit_depth = 1;
+            } else {
+                bit_depth = pOutExtra->bit_depth;
+            }
+
+            switch (bit_depth) {
+            case 1:
+            case 2:
+                break;
+            default:
+                av_log(avctx, AV_LOG_ERROR, "ERROR: Unknown bit depth %d!\n", bit_depth);
+                return AVERROR_INVALIDDATA;
+            }
+        } else {
+            if (AV_PIX_FMT_YUV420P10BE == ctx->buffered_fme.format ||
+                AV_PIX_FMT_YUV420P10LE == ctx->buffered_fme.format ||
+                AV_PIX_FMT_P010LE == ctx->buffered_fme.format) {
+                bit_depth = 2;
+            }
+        }
+
+        if ((ctx->buffered_fme.height && ctx->buffered_fme.width &&
+             (ctx->buffered_fme.height != avctx->height ||
+              ctx->buffered_fme.width != avctx->width)) ||
+            bit_depth != ctx->api_ctx.bit_depth_factor) {
+            av_log(avctx, AV_LOG_INFO,
+                   "xcoder_send_frame resolution change %dx%d "
+                   "-> %dx%d or bit depth change %d -> %d\n",
+                   avctx->width, avctx->height, ctx->buffered_fme.width,
+                   ctx->buffered_fme.height, ctx->api_ctx.bit_depth_factor,
+                   bit_depth);
+
+            ctx->api_ctx.session_run_state =
+                SESSION_RUN_STATE_SEQ_CHANGE_DRAINING;
+            ctx->eos_fme_received = 1;
+
+            // have to queue this frame if not done so: an empty queue
+            if (is_input_fifo_empty(ctx)) {
+                av_log(avctx, AV_LOG_TRACE,
+                       "resolution change when fifo empty, frame "
+                       "#%" PRIu64 " being queued ..\n",
+                       ctx->api_ctx.frame_num);
+                // unref buffered frame (this buffered frame is taken from input
+                // AVFrame) because we are going to send EOS (instead of sending
+                // buffered frame)
+                if (frame != &ctx->buffered_fme) {
+                    av_frame_unref(&ctx->buffered_fme);
+                }
+                ret = enqueue_frame(avctx, frame);
+                if (ret < 0) {
+                    return ret;
+                }
+            }
+        }
+    }
+
+    ctx->api_fme.data.frame.preferred_characteristics_data_len = 0;
+    ctx->api_fme.data.frame.end_of_stream                      = 0;
+    ctx->api_fme.data.frame.force_key_frame =
+        ctx->api_fme.data.frame.use_cur_src_as_long_term_pic =
+            ctx->api_fme.data.frame.use_long_term_ref = 0;
+
+    ctx->api_fme.data.frame.sei_total_len =
+        ctx->api_fme.data.frame.sei_cc_offset = ctx->api_fme.data.frame
+                                                    .sei_cc_len =
+            ctx->api_fme.data.frame.sei_hdr_mastering_display_color_vol_offset =
+                ctx->api_fme.data.frame
+                    .sei_hdr_mastering_display_color_vol_len =
+                    ctx->api_fme.data.frame
+                        .sei_hdr_content_light_level_info_offset =
+                        ctx->api_fme.data.frame
+                            .sei_hdr_content_light_level_info_len =
+                            ctx->api_fme.data.frame.sei_hdr_plus_offset =
+                                ctx->api_fme.data.frame.sei_hdr_plus_len = 0;
+
+    ctx->api_fme.data.frame.roi_len      = 0;
+    ctx->api_fme.data.frame.reconf_len   = 0;
+    ctx->api_fme.data.frame.force_pic_qp = 0;
+
+    if (SESSION_RUN_STATE_SEQ_CHANGE_DRAINING ==
+            ctx->api_ctx.session_run_state ||
+        (ctx->eos_fme_received && is_input_fifo_empty(ctx))) {
+        av_log(avctx, AV_LOG_VERBOSE, "XCoder start flushing\n");
+        ctx->api_fme.data.frame.end_of_stream = 1;
+        ctx->encoder_flushing                 = 1;
+    } else {
+        format_in_use = ctx->buffered_fme.format;
+
+        // extra data starts with metadata header, various aux data sizes
+        // have been reset above
+        ctx->api_fme.data.frame.extra_data_len =
+            NI_APP_ENC_FRAME_META_DATA_SIZE;
+
+        ctx->api_fme.data.frame.ni_pict_type    = 0;
+
+        ret = ni_enc_prep_reconf_demo_data(&ctx->api_ctx, &dec_frame);
+        if (ret < 0) {
+            return ret;
+        }
+
+        // support VFR
+        if (ctx->api_param.enable_vfr) {
+            int cur_fps = 0, pre_fps = 0;
+
+            pre_fps = ctx->api_ctx.prev_fps;
+
+            if (ctx->buffered_fme.pts > ctx->api_ctx.prev_pts) {
+                ctx->api_ctx.passed_time_in_timebase_unit += ctx->buffered_fme.pts - ctx->api_ctx.prev_pts;
+                ctx->api_ctx.count_frame_num_in_sec++;
+                //change the FrameRate for VFR
+                //1. Only when the fps change, setting the new bitrate
+                //2. The interval between two framerate chagne settings shall be greater than 1 seconds
+                //   or at the start the transcoding
+                if (ctx->api_ctx.passed_time_in_timebase_unit >= (avctx->time_base.den / avctx->time_base.num)) {
+                    //this is a workaround for small resolution vfr mode
+                    //when detect framerate change, the reconfig framerate will trigger bitrate params to reset
+                    //the cost related to bitrate estimate is all tuned with downsample flow
+                    //but for small resolution, the lookahead won't downsample
+                    int slow_down_vfr = 0;
+                    cur_fps = ctx->api_ctx.count_frame_num_in_sec;
+                    if (ctx->buffered_fme.width < 288 || ctx->buffered_fme.height < 256) {
+                        slow_down_vfr = 1;
+                    }
+                    if ((ctx->api_ctx.frame_num != 0) && (pre_fps != cur_fps) && (slow_down_vfr ? (abs(cur_fps - pre_fps) > 2) : 1) &&
+                        ((ctx->api_ctx.frame_num < ctx->api_param.cfg_enc_params.frame_rate) ||
+                         (ctx->api_ctx.frame_num - ctx->api_ctx.last_change_framenum >= ctx->api_param.cfg_enc_params.frame_rate))) {
+                        aux_data = ni_frame_new_aux_data(&dec_frame, NI_FRAME_AUX_DATA_FRAMERATE, sizeof(ni_framerate_t));
+                        if (aux_data) {
+                            ni_framerate_t *framerate = (ni_framerate_t *)aux_data->data;
+                            framerate->framerate_num = cur_fps;
+                            framerate->framerate_denom = 1;
+                        }
+
+                        ctx->api_ctx.last_change_framenum = ctx->api_ctx.frame_num;
+                        ctx->api_ctx.prev_fps = cur_fps;
+                    }
+                    ctx->api_ctx.count_frame_num_in_sec = 0;
+                    ctx->api_ctx.passed_time_in_timebase_unit = 0;
+                }
+                ctx->api_ctx.prev_pts = ctx->buffered_fme.pts;
+            } else if (ctx->buffered_fme.pts < ctx->api_ctx.prev_pts) {
+                //error handle for the case that pts jump back
+                //this may cause a little error in the bitrate setting, This little error is acceptable.
+                //As long as the subsequent, PTS is normal, it will be repaired quickly.
+                ctx->api_ctx.prev_pts = ctx->buffered_fme.pts;
+            } else {
+                //do nothing, when the pts of two adjacent frames are the same
+                //this may cause a little error in the bitrate setting, This little error is acceptable.
+                //As long as the subsequent, PTS is normal, it will be repaired quickly.
+            }
+        }
+
+        // force pic qp demo mode: initial QP (200 frames) -> QP value specified by
+        // ForcePicQpDemoMode (100 frames) -> initial QP (remaining frames)
+        if (p_param->force_pic_qp_demo_mode) {
+            if (ctx->api_ctx.frame_num >= 300) {
+                ctx->api_fme.data.frame.force_pic_qp =
+                    p_param->cfg_enc_params.rc.intra_qp;
+            } else if (ctx->api_ctx.frame_num >= 200) {
+                ctx->api_fme.data.frame.force_pic_qp = p_param->force_pic_qp_demo_mode;
+            }
+        }
+
+        // supply QP map if ROI enabled and if ROIs passed in
+        // Note: ROI demo mode takes higher priority over side data !
+        side_data = av_frame_get_side_data(&ctx->buffered_fme, AV_FRAME_DATA_REGIONS_OF_INTEREST);
+
+        if (!p_param->roi_demo_mode && p_param->cfg_enc_params.roi_enable &&
+            side_data) {
+            aux_data = ni_frame_new_aux_data(
+                &dec_frame, NI_FRAME_AUX_DATA_REGIONS_OF_INTEREST, side_data->size);
+            if (aux_data) {
+                memcpy(aux_data->data, side_data->data, side_data->size);
+            }
+        }
+
+        // Note: when ROI demo modes enabled, supply ROI map for the specified range
+        //       frames, and 0 map for others
+        if (QUADRA && p_param->roi_demo_mode &&
+            p_param->cfg_enc_params.roi_enable) {
+            if (ctx->api_ctx.frame_num > 90 && ctx->api_ctx.frame_num < 300) {
+                ctx->api_fme.data.frame.roi_len = ctx->api_ctx.roi_len;
+            } else {
+                ctx->api_fme.data.frame.roi_len = 0;
+            }
+            // when ROI enabled, always have a data buffer for ROI
+            // Note: this is handled separately from ROI through side/aux data
+            ctx->api_fme.data.frame.extra_data_len += ctx->api_ctx.roi_len;
+        }
+
+        if (!p_param->cfg_enc_params.enable_all_sei_passthru) {
+            // SEI (HDR)
+            // content light level info
+            if (!(p_param->cfg_enc_params.HDR10CLLEnable)) { // not user set
+                side_data = av_frame_get_side_data(&ctx->buffered_fme, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
+
+                if (side_data && side_data->size == sizeof(AVContentLightMetadata)) {
+                    aux_data = ni_frame_new_aux_data(
+                        &dec_frame, NI_FRAME_AUX_DATA_CONTENT_LIGHT_LEVEL,
+                        sizeof(ni_content_light_level_t));
+                    if (aux_data) {
+                        memcpy(aux_data->data, side_data->data, side_data->size);
+                    }
+                }
+            } else if ((AV_CODEC_ID_H264 == avctx->codec_id ||
+                        ctx->api_ctx.bit_depth_factor == 1) &&
+                      ctx->api_ctx.light_level_data_len == 0) {
+                // User input maxCLL so create SEIs for h264 and don't touch for (h265 &&
+                // hdr10) since that is conveyed in config step
+                // Quadra autoset only for hdr10 format with hevc
+                aux_data = ni_frame_new_aux_data(&dec_frame,
+                                                NI_FRAME_AUX_DATA_CONTENT_LIGHT_LEVEL,
+                                                sizeof(ni_content_light_level_t));
+                if (aux_data) {
+                    ni_content_light_level_t *cll =
+                        (ni_content_light_level_t *)(aux_data->data);
+                    cll->max_cll  = p_param->cfg_enc_params.HDR10MaxLight;
+                    cll->max_fall = p_param->cfg_enc_params.HDR10AveLight;
+                }
+            }
+
+            // mastering display color volume
+            if (!(p_param->cfg_enc_params.HDR10Enable)) { // not user set
+                side_data = av_frame_get_side_data(&ctx->buffered_fme, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
+                if (side_data && side_data->size == sizeof(AVMasteringDisplayMetadata)) {
+                    aux_data = ni_frame_new_aux_data(
+                        &dec_frame, NI_FRAME_AUX_DATA_MASTERING_DISPLAY_METADATA,
+                        sizeof(ni_mastering_display_metadata_t));
+                    if (aux_data) {
+                        memcpy(aux_data->data, side_data->data, side_data->size);
+                    }
+                }
+            } else if ((AV_CODEC_ID_H264 == avctx->codec_id ||
+                       ctx->api_ctx.bit_depth_factor == 1) &&
+                      ctx->api_ctx.sei_hdr_mastering_display_color_vol_len == 0) {
+                // User input masterDisplay so create SEIs for h264 and don't touch for (h265 &&
+                // hdr10) since that is conveyed in config step
+                // Quadra autoset only for hdr10 format with hevc
+                aux_data = ni_frame_new_aux_data(&dec_frame,
+                    NI_FRAME_AUX_DATA_MASTERING_DISPLAY_METADATA,
+                    sizeof(ni_mastering_display_metadata_t));
+                if (aux_data) {
+                    ni_mastering_display_metadata_t *mst_dsp =
+                        (ni_mastering_display_metadata_t *)(aux_data->data);
+
+                    //X, Y display primaries for RGB channels and white point(WP) in units of 0.00002
+                    //and max, min luminance(L) values in units of 0.0001 nits
+                    //xy are denom = 50000 num = HDR10dx0/y
+                    mst_dsp->display_primaries[0][0].den = MASTERING_DISP_CHROMA_DEN;
+                    mst_dsp->display_primaries[0][1].den = MASTERING_DISP_CHROMA_DEN;
+                    mst_dsp->display_primaries[1][0].den = MASTERING_DISP_CHROMA_DEN;
+                    mst_dsp->display_primaries[1][1].den = MASTERING_DISP_CHROMA_DEN;
+                    mst_dsp->display_primaries[2][0].den = MASTERING_DISP_CHROMA_DEN;
+                    mst_dsp->display_primaries[2][1].den = MASTERING_DISP_CHROMA_DEN;
+                    mst_dsp->white_point[0].den = MASTERING_DISP_CHROMA_DEN;
+                    mst_dsp->white_point[1].den = MASTERING_DISP_CHROMA_DEN;
+                    mst_dsp->min_luminance.den = MASTERING_DISP_LUMA_DEN;
+                    mst_dsp->max_luminance.den = MASTERING_DISP_LUMA_DEN;
+                    // ni_mastering_display_metadata_t has to be filled with R,G,B
+                    // values, in that order, while HDR10d is filled in order of G,B,R,
+                    // so do the conversion here.
+                    mst_dsp->display_primaries[0][0].num = p_param->cfg_enc_params.HDR10dx2;
+                    mst_dsp->display_primaries[0][1].num = p_param->cfg_enc_params.HDR10dy2;
+                    mst_dsp->display_primaries[1][0].num = p_param->cfg_enc_params.HDR10dx0;
+                    mst_dsp->display_primaries[1][1].num = p_param->cfg_enc_params.HDR10dy0;
+                    mst_dsp->display_primaries[2][0].num = p_param->cfg_enc_params.HDR10dx1;
+                    mst_dsp->display_primaries[2][1].num = p_param->cfg_enc_params.HDR10dy1;
+                    mst_dsp->white_point[0].num = p_param->cfg_enc_params.HDR10wx;
+                    mst_dsp->white_point[1].num = p_param->cfg_enc_params.HDR10wy;
+                    mst_dsp->min_luminance.num = p_param->cfg_enc_params.HDR10minluma;
+                    mst_dsp->max_luminance.num = p_param->cfg_enc_params.HDR10maxluma;
+                    mst_dsp->has_primaries = 1;
+                    mst_dsp->has_luminance = 1;
+                }
+            }
+
+            // SEI (HDR10+)
+            side_data = av_frame_get_side_data(&ctx->buffered_fme, AV_FRAME_DATA_DYNAMIC_HDR_PLUS);
+            if (side_data && side_data->size == sizeof(AVDynamicHDRPlus)) {
+                aux_data = ni_frame_new_aux_data(&dec_frame, NI_FRAME_AUX_DATA_HDR_PLUS,
+                                                sizeof(ni_dynamic_hdr_plus_t));
+                if (aux_data) {
+                    memcpy(aux_data->data, side_data->data, side_data->size);
+                }
+            } // hdr10+
+
+            // SEI (close caption)
+            side_data = av_frame_get_side_data(&ctx->buffered_fme, AV_FRAME_DATA_A53_CC);
+
+            if (side_data && side_data->size > 0) {
+                aux_data = ni_frame_new_aux_data(&dec_frame, NI_FRAME_AUX_DATA_A53_CC,
+                                                side_data->size);
+                if (aux_data) {
+                    memcpy(aux_data->data, side_data->data, side_data->size);
+                }
+            }
+
+            // User data unregistered SEI
+            side_data = av_frame_get_side_data(&ctx->buffered_fme, AV_FRAME_DATA_SEI_UNREGISTERED);
+            if (ctx->udu_sei && side_data && side_data->size > 0) {
+                aux_data = ni_frame_new_aux_data(&dec_frame, NI_FRAME_AUX_DATA_UDU_SEI,
+                                                 side_data->size);
+                if (aux_data) {
+                    memcpy(aux_data->data, (uint8_t *)side_data->data, side_data->size);
+                }
+            }
+        }
+        if (ctx->api_ctx.force_frame_type) {
+            switch (ctx->buffered_fme.pict_type) {
+            case AV_PICTURE_TYPE_I:
+                ctx->api_fme.data.frame.ni_pict_type = PIC_TYPE_IDR;
+                break;
+            case AV_PICTURE_TYPE_P:
+                ctx->api_fme.data.frame.ni_pict_type = PIC_TYPE_P;
+                break;
+            default:
+                ;
+            }
+        } else if (ctx->buffered_fme.pict_type == AV_PICTURE_TYPE_I) {
+            ctx->api_fme.data.frame.force_key_frame = 1;
+            ctx->api_fme.data.frame.ni_pict_type = PIC_TYPE_IDR;
+        }
+
+        av_log(avctx, AV_LOG_TRACE,
+               "xcoder_send_frame: #%" PRIu64 " ni_pict_type %d"
+               " forced_header_enable %d intraPeriod %d\n",
+               ctx->api_ctx.frame_num, ctx->api_fme.data.frame.ni_pict_type,
+               p_param->cfg_enc_params.forced_header_enable,
+               p_param->cfg_enc_params.intra_period);
+
+        // whether should send SEI with this frame
+        send_sei_with_idr = ni_should_send_sei_with_frame(
+            &ctx->api_ctx, ctx->api_fme.data.frame.ni_pict_type, p_param);
+
+        // prep for auxiliary data (various SEI, ROI) in encode frame, based on the
+        // data returned in decoded frame
+        ni_enc_prep_aux_data(&ctx->api_ctx, &ctx->api_fme.data.frame, &dec_frame,
+                             ctx->api_ctx.codec_format, send_sei_with_idr,
+                             mdcv_data, cll_data, cc_data, udu_data, hdrp_data);
+
+        if (ctx->api_fme.data.frame.sei_total_len > NI_ENC_MAX_SEI_BUF_SIZE) {
+            av_log(avctx, AV_LOG_ERROR, "xcoder_send_frame: sei total length %u exceeds maximum sei size %u.\n",
+                   ctx->api_fme.data.frame.sei_total_len, NI_ENC_MAX_SEI_BUF_SIZE);
+            ret = AVERROR(EINVAL);
+            return ret;
+        }
+
+        ctx->api_fme.data.frame.extra_data_len += ctx->api_fme.data.frame.sei_total_len;
+
+        // data layout requirement: leave space for reconfig data if at least one of
+        // reconfig, SEI or ROI is present
+        // Note: ROI is present when enabled, so use encode config flag instead of
+        //       frame's roi_len as it can be 0 indicating a 0'd ROI map setting !
+        if (ctx->api_fme.data.frame.reconf_len ||
+            ctx->api_fme.data.frame.sei_total_len ||
+            p_param->cfg_enc_params.roi_enable) {
+            ctx->api_fme.data.frame.extra_data_len +=
+                sizeof(ni_encoder_change_params_t);
+        }
+
+        ctx->api_fme.data.frame.pts = ctx->buffered_fme.pts;
+        ctx->api_fme.data.frame.dts = ctx->buffered_fme.pkt_dts;
+
+        ctx->api_fme.data.frame.video_width = avctx->width;
+        ctx->api_fme.data.frame.video_height = avctx->height;
+
+        ishwframe = ctx->buffered_fme.format == AV_PIX_FMT_NI_QUAD;
+        if (ctx->api_ctx.auto_dl_handle != 0 || (avctx->height < NI_MIN_HEIGHT) ||
+            (avctx->width < NI_MIN_WIDTH)) {
+            format_in_use          = avctx->sw_pix_fmt;
+            ctx->api_ctx.hw_action = 0;
+            ishwframe              = 0;
+        }
+        isnv12frame = (format_in_use == AV_PIX_FMT_NV12 || format_in_use == AV_PIX_FMT_P010LE);
+
+        if (ishwframe) {
+            ret = sizeof(niFrameSurface1_t);
+        } else {
+            ret = av_image_get_buffer_size(format_in_use,
+                                         ctx->buffered_fme.width, ctx->buffered_fme.height, 1);
+        }
+
+        av_log(avctx, AV_LOG_TRACE, "xcoder_send_frame: frame->format=%d, frame->width=%d, frame->height=%d, frame->pict_type=%d, size=%d\n", format_in_use, ctx->buffered_fme.width, ctx->buffered_fme.height, ctx->buffered_fme.pict_type, ret);
+        if (ret < 0) {
+            return ret;
+        }
+
+        int dst_stride[NI_MAX_NUM_DATA_POINTERS]     = {0};
+        int height_aligned[NI_MAX_NUM_DATA_POINTERS] = {0};
+        int src_height[NI_MAX_NUM_DATA_POINTERS]     = {0};
+
+        src_height[0] = ctx->buffered_fme.height;
+        src_height[1] = ctx->buffered_fme.height / 2;
+        src_height[2] = (isnv12frame) ? 0 : (ctx->buffered_fme.height / 2);
+        if (avctx->sw_pix_fmt == AV_PIX_FMT_ARGB ||
+            avctx->sw_pix_fmt == AV_PIX_FMT_RGBA ||
+            avctx->sw_pix_fmt == AV_PIX_FMT_ABGR ||
+            avctx->sw_pix_fmt == AV_PIX_FMT_BGRA) {
+            src_height[0] = ctx->buffered_fme.height;
+            src_height[1] = 0;
+            src_height[2] = 0;
+            alignment_2pass_wa = 0;
+        }
+        /* The main reason for the problem is that when using out=sw and noautoscale=0,
+        * the incoming stream initially conforms to zerocopy, so the firmware will allocate memory according to zerocopy.
+        * The resolution after this changes, but the software inserts autoscale,
+        * so the encoder cannot detect the change in resolution and cannot reopen.
+        * However, due to ffmpeg being 64 bit aligned, So the linesize is not consistent with the linesize we initially decoded,
+        * so the encoding will take the path of non-zero copy. At this time, due to lookahead>0,
+        * the software will send the firmware a size larger than the originally requested size,
+        * so it will not be able to send it. Moreover, at this point,
+        * the firmware is unable to perceive changes in linesize and respond accordingly.
+        * For fix this, we can check the linesize and disable 2-pass workaround.
+        */
+        if (ctx->api_param.luma_linesize) {
+            alignment_2pass_wa = false;
+        }
+
+        ni_get_min_frame_dim(ctx->buffered_fme.width,
+                             ctx->buffered_fme.height,
+                             ctx->api_ctx.pixel_format,
+                             dst_stride, height_aligned);
+
+        av_log(avctx, AV_LOG_TRACE,
+               "xcoder_send_frame frame->width %d "
+               "ctx->api_ctx.bit_depth_factor %d dst_stride[0/1/2] %d/%d/%d sw_pix_fmt %d\n",
+               ctx->buffered_fme.width, ctx->api_ctx.bit_depth_factor,
+               dst_stride[0], dst_stride[1], dst_stride[2], avctx->sw_pix_fmt);
+
+        if (alignment_2pass_wa && !ishwframe) {
+            if (isnv12frame) {
+                // for 2-pass encode output mismatch WA, need to extend (and
+                // pad) CbCr plane height, because 1st pass assume input 32
+                // align
+                height_aligned[1] = FFALIGN(height_aligned[0], 32) / 2;
+            } else {
+                // for 2-pass encode output mismatch WA, need to extend (and
+                // pad) Cr plane height, because 1st pass assume input 32 align
+                height_aligned[2] = FFALIGN(height_aligned[0], 32) / 2;
+            }
+        }
+
+        // alignment(16) extra padding for H.264 encoding
+        if (ishwframe) {
+            uint8_t *dsthw;
+            const uint8_t *srchw;
+
+            ni_frame_buffer_alloc_hwenc(
+                &(ctx->api_fme.data.frame), ctx->buffered_fme.width,
+                ctx->buffered_fme.height,
+                (int)ctx->api_fme.data.frame.extra_data_len);
+            if (!ctx->api_fme.data.frame.p_data[3]) {
+                return AVERROR(ENOMEM);
+            }
+            dsthw       = ctx->api_fme.data.frame.p_data[3];
+            srchw = (const uint8_t *)ctx->buffered_fme.data[3];
+            av_log(avctx, AV_LOG_TRACE, "dst=%p src=%p len=%d\n", dsthw, srchw,
+                   ctx->api_fme.data.frame.data_len[3]);
+            memcpy(dsthw, srchw, ctx->api_fme.data.frame.data_len[3]);
+            av_log(avctx, AV_LOG_TRACE,
+                   "ctx->buffered_fme.data[3] %p memcpy to %p\n",
+                   ctx->buffered_fme.data[3], dsthw);
+        } else {  // traditional yuv transfer
+            av_log(avctx, AV_LOG_TRACE, "%s %s %d buffered_fme.data[0] %p data[3] %p wxh %u %u dst_stride[0] %d %d  linesize[0] %d %d data[1] %p %p data[2] %p %p data[3] %p buf[0] %p crop(t:b:l:r) %lu:%lu:%lu:%lu avctx(w:h:cw:ch) %u:%u:%u:%u\n",
+              __FILE__, __FUNCTION__, __LINE__,
+              ctx->buffered_fme.data[0], ctx->buffered_fme.data[3],
+              ctx->buffered_fme.width, ctx->buffered_fme.height,
+              dst_stride[0], dst_stride[1],
+              ctx->buffered_fme.linesize[0], ctx->buffered_fme.linesize[1],
+              ctx->buffered_fme.data[1], ctx->buffered_fme.data[0] + dst_stride[0] * ctx->buffered_fme.height,
+              ctx->buffered_fme.data[2], ctx->buffered_fme.data[1] + dst_stride[1] * ctx->buffered_fme.height / 2,
+              ctx->buffered_fme.data[3],
+              ctx->buffered_fme.buf[0],
+              ctx->buffered_fme.crop_top, ctx->buffered_fme.crop_bottom, ctx->buffered_fme.crop_left, ctx->buffered_fme.crop_right,
+              avctx->width, avctx->height,
+              avctx->coded_width, avctx->coded_height);
+
+            // check input resolution zero copy compatible or not
+            if (ni_encoder_frame_zerocopy_check(&ctx->api_ctx,
+                p_param, ctx->buffered_fme.width, ctx->buffered_fme.height,
+                (const int *)ctx->buffered_fme.linesize, false) == NI_RETCODE_SUCCESS) {
+                need_to_copy = 0;
+                // alloc metadata buffer etc. (if needed)
+                ret = ni_encoder_frame_zerocopy_buffer_alloc(
+                    &(ctx->api_fme.data.frame), ctx->buffered_fme.width,
+                    ctx->buffered_fme.height, (const int *)ctx->buffered_fme.linesize, (const uint8_t **)ctx->buffered_fme.data,
+                    (int)ctx->api_fme.data.frame.extra_data_len);
+                if (ret != NI_RETCODE_SUCCESS)
+                    return AVERROR(ENOMEM);
+            } else {
+                // if linesize changes (while resolution remains the same), copy to previously configured linesizes
+                if (p_param->luma_linesize && p_param->chroma_linesize) {
+                    dst_stride[0] = p_param->luma_linesize;
+                    dst_stride[1] = p_param->chroma_linesize;
+                    dst_stride[2] = isnv12frame ? 0 : p_param->chroma_linesize;
+                }
+                ni_encoder_sw_frame_buffer_alloc(
+                    !isnv12frame, &(ctx->api_fme.data.frame), ctx->buffered_fme.width,
+                    height_aligned[0], dst_stride, (avctx->codec_id == AV_CODEC_ID_H264),
+                    (int)ctx->api_fme.data.frame.extra_data_len, alignment_2pass_wa);
+            }
+            av_log(avctx, AV_LOG_TRACE, "%p need_to_copy %d! pts = %ld\n", ctx->api_fme.data.frame.p_buffer, need_to_copy, ctx->buffered_fme.pts);
+            if (!ctx->api_fme.data.frame.p_data[0]) {
+                return AVERROR(ENOMEM);
+            }
+
+            // if this is indeed sw frame, do the YUV data layout, otherwise may need
+            // to do frame download
+            if (ctx->buffered_fme.format != AV_PIX_FMT_NI_QUAD) {
+                av_log(
+                    avctx, AV_LOG_TRACE,
+                    "xcoder_send_frame: fme.data_len[0]=%d, "
+                    "buf_fme->linesize=%d/%d/%d, dst alloc linesize = %d/%d/%d, "
+                    "src height = %d/%d/%d, dst height aligned = %d/%d/%d, "
+                    "force_key_frame=%d, extra_data_len=%d sei_size=%d "
+                    "(hdr_content_light_level %u hdr_mastering_display_color_vol %u "
+                    "hdr10+ %u cc %u udu %u prefC %u) roi_size=%u reconf_size=%u "
+                    "force_pic_qp=%u "
+                    "use_cur_src_as_long_term_pic %u use_long_term_ref %u\n",
+                    ctx->api_fme.data.frame.data_len[0],
+                    ctx->buffered_fme.linesize[0], ctx->buffered_fme.linesize[1],
+                    ctx->buffered_fme.linesize[2], dst_stride[0], dst_stride[1],
+                    dst_stride[2], src_height[0], src_height[1], src_height[2],
+                    height_aligned[0], height_aligned[1], height_aligned[2],
+                    ctx->api_fme.data.frame.force_key_frame,
+                    ctx->api_fme.data.frame.extra_data_len,
+                    ctx->api_fme.data.frame.sei_total_len,
+                    ctx->api_fme.data.frame.sei_hdr_content_light_level_info_len,
+                    ctx->api_fme.data.frame.sei_hdr_mastering_display_color_vol_len,
+                    ctx->api_fme.data.frame.sei_hdr_plus_len,
+                    ctx->api_fme.data.frame.sei_cc_len,
+                    ctx->api_fme.data.frame.sei_user_data_unreg_len,
+                    ctx->api_fme.data.frame.preferred_characteristics_data_len,
+                    (p_param->cfg_enc_params.roi_enable ? ctx->api_ctx.roi_len : 0),
+                    ctx->api_fme.data.frame.reconf_len,
+                    ctx->api_fme.data.frame.force_pic_qp,
+                    ctx->api_fme.data.frame.use_cur_src_as_long_term_pic,
+                    ctx->api_fme.data.frame.use_long_term_ref);
+
+                // YUV part of the encoder input data layout
+                if (need_to_copy) {
+                    ni_copy_frame_data(
+                        (uint8_t **)(ctx->api_fme.data.frame.p_data),
+                        ctx->buffered_fme.data, ctx->buffered_fme.width,
+                        ctx->buffered_fme.height, ctx->api_ctx.bit_depth_factor,
+                        ctx->api_ctx.pixel_format, p_param->cfg_enc_params.conf_win_right, dst_stride,
+                        height_aligned, ctx->buffered_fme.linesize, src_height);
+                }
+            } else {
+                ni_session_data_io_t *p_session_data;
+                ni_session_data_io_t niframe;
+                niFrameSurface1_t *src_surf;
+
+                av_log(avctx, AV_LOG_DEBUG,
+                       "xcoder_send_frame:Autodownload to be run: hdl: %d w: %d h: %d\n",
+                       ctx->api_ctx.auto_dl_handle, avctx->width, avctx->height);
+                avhwf_ctx =
+                    (AVHWFramesContext *)ctx->buffered_fme.hw_frames_ctx->data;
+                nif_src_ctx = (AVNIFramesContext*) avhwf_ctx->hwctx;
+
+                src_surf = (niFrameSurface1_t *)ctx->buffered_fme.data[3];
+
+                if (avctx->height < NI_MIN_HEIGHT || avctx->width < NI_MIN_WIDTH) {
+                    int bit_depth;
+                    int is_planar;
+
+                    p_session_data = &niframe;
+                    memset(&niframe, 0, sizeof(niframe));
+                    bit_depth = ((avctx->sw_pix_fmt == AV_PIX_FMT_YUV420P10LE) ||
+                               (avctx->sw_pix_fmt == AV_PIX_FMT_P010LE))
+                                  ? 2
+                                  : 1;
+                    is_planar = (avctx->sw_pix_fmt == AV_PIX_FMT_YUV420P) ||
+                              (avctx->sw_pix_fmt == AV_PIX_FMT_YUV420P10LE);
+
+                    /* Allocate a minimal frame */
+                    ni_enc_frame_buffer_alloc(&niframe.data.frame, avctx->width,
+                                        avctx->height, 0, /* alignment */
+                                        1,                /* metadata */
+                                        bit_depth, 0,     /* hw_frame_count */
+                                        is_planar, ctx->api_ctx.pixel_format);
+                } else {
+                    p_session_data = &(ctx->api_fme);
+                }
+
+                nif_src_ctx->api_ctx.is_auto_dl = true;
+                ret = ni_device_session_hwdl(&nif_src_ctx->api_ctx, p_session_data,
+                                           src_surf);
+                ishwframe = false;
+                if (ret <= 0) {
+                    av_log(avctx, AV_LOG_ERROR,
+                           "nienc.c:ni_hwdl_frame() failed to retrieve frame\n");
+                    return AVERROR_EXTERNAL;
+                }
+
+                if ((avctx->height < NI_MIN_HEIGHT) ||
+                    (avctx->width < NI_MIN_WIDTH)) {
+                    int nb_planes = av_pix_fmt_count_planes(avctx->sw_pix_fmt);
+                    int ni_fmt = ctx->api_ctx.pixel_format;
+                    ni_expand_frame(&ctx->api_fme.data.frame,
+                                  &p_session_data->data.frame, dst_stride,
+                                  avctx->width, avctx->height, ni_fmt, nb_planes);
+
+                    ni_frame_buffer_free(&niframe.data.frame);
+                }
+            }
+        } // end if hwframe else
+
+        // auxiliary data part of the encoder input data layout
+        ni_enc_copy_aux_data(&ctx->api_ctx, &ctx->api_fme.data.frame, &dec_frame,
+                             ctx->api_ctx.codec_format, mdcv_data, cll_data,
+                             cc_data, udu_data, hdrp_data, ishwframe, isnv12frame);
+
+        ni_frame_buffer_free(&dec_frame);
+    } // end non seq change
+
+    sent = ni_device_session_write(&ctx->api_ctx, &ctx->api_fme, NI_DEVICE_TYPE_ENCODER);
+
+    av_log(avctx, AV_LOG_DEBUG, "xcoder_send_frame: size %d sent to xcoder\n", sent);
+
+    // return EIO at error
+    if (NI_RETCODE_ERROR_VPU_RECOVERY == sent) {
+        sent = xcoder_encode_reset(avctx);
+        if (sent < 0) {
+            av_log(avctx, AV_LOG_ERROR, "xcoder_send_frame(): VPU recovery failed:%d, returning EIO\n", sent);
+            ret = AVERROR(EIO);
+        }
+    } else if (sent < 0) {
+        av_log(avctx, AV_LOG_ERROR, "xcoder_send_frame(): failure sent (%d) , "
+               "returning EIO\n", sent);
+        ret = AVERROR(EIO);
+
+        // if rejected due to sequence change in progress, revert resolution
+        // setting and will do it again next time.
+        if (ctx->api_fme.data.frame.start_of_stream &&
+            (avctx->width != orig_avctx_width ||
+             avctx->height != orig_avctx_height)) {
+            avctx->width = orig_avctx_width;
+            avctx->height = orig_avctx_height;
+        }
+        return ret;
+    } else {
+        av_log(avctx, AV_LOG_DEBUG, "xcoder_send_frame(): sent (%d)\n", sent);
+        if (sent == 0) {
+            // case of sequence change in progress
+            if (ctx->api_fme.data.frame.start_of_stream &&
+                (avctx->width != orig_avctx_width ||
+                 avctx->height != orig_avctx_height)) {
+                avctx->width = orig_avctx_width;
+                avctx->height = orig_avctx_height;
+            }
+
+            // when buffer_full, drop the frame and return EAGAIN if in strict timeout
+            // mode, otherwise buffer the frame and it is to be sent out using encode2
+            // API: queue the frame only if not done so yet, i.e. queue is empty
+            // *and* it's a valid frame.
+            if (ctx->api_ctx.status == NI_RETCODE_NVME_SC_WRITE_BUFFER_FULL) {
+                ishwframe = ctx->buffered_fme.format == AV_PIX_FMT_NI_QUAD;
+                if (ishwframe) {
+                    // Do not queue frames to avoid FFmpeg stuck when multiple HW frames are queued up in nienc, causing decoder unable to acquire buffer, which led to FFmpeg stuck
+                    av_log(avctx, AV_LOG_ERROR, "xcoder_send_frame(): device WRITE_BUFFER_FULL cause HW frame drop! (approx. Frame num #%" PRIu64 "\n", ctx->api_ctx.frame_num);
+                    av_frame_unref(&ctx->buffered_fme);
+                    ret = 1;
+                } else {
+                    av_log(avctx, AV_LOG_DEBUG, "xcoder_send_frame(): Write buffer full, enqueue frame and return 0\n");
+                    ret = 0;
+
+                    if (frame && is_input_fifo_empty(ctx)) {
+                        ret = enqueue_frame(avctx, frame);
+                        if (ret < 0) {
+                            return ret;
+                        }
+                    }
+                }
+            }
+        } else {
+            ishwframe = (ctx->buffered_fme.format == AV_PIX_FMT_NI_QUAD) &&
+                        (ctx->api_ctx.auto_dl_handle == 0) &&
+                        (avctx->height >= NI_MIN_HEIGHT) &&
+                        (avctx->width >= NI_MIN_WIDTH);
+
+            if (!ctx->eos_fme_received && ishwframe) {
+                av_log(avctx, AV_LOG_TRACE, "AVframe_index = %d at head %d\n",
+                       ctx->aFree_Avframes_list[ctx->freeHead], ctx->freeHead);
+                av_frame_ref(
+                    ctx->sframe_pool[ctx->aFree_Avframes_list[ctx->freeHead]],
+                    &ctx->buffered_fme);
+                av_log(avctx, AV_LOG_TRACE,
+                       "AVframe_index = %d popped from free head %d\n",
+                       ctx->aFree_Avframes_list[ctx->freeHead], ctx->freeHead);
+                av_log(avctx, AV_LOG_TRACE,
+                       "ctx->buffered_fme.data[3] %p sframe_pool[%d]->data[3] %p\n",
+                       ctx->buffered_fme.data[3],
+                       ctx->aFree_Avframes_list[ctx->freeHead],
+                       ctx->sframe_pool[ctx->aFree_Avframes_list[ctx->freeHead]]
+                           ->data[3]);
+                if (ctx->sframe_pool[ctx->aFree_Avframes_list[ctx->freeHead]]
+                        ->data[3]) {
+                    av_log(avctx, AV_LOG_DEBUG,
+                           "nienc.c sframe_pool[%d] trace ui16FrameIdx = [%u] sent\n",
+                           ctx->aFree_Avframes_list[ctx->freeHead],
+                           ((niFrameSurface1_t
+                                 *)((uint8_t *)ctx
+                                        ->sframe_pool
+                                            [ctx->aFree_Avframes_list[ctx->freeHead]]
+                                        ->data[3]))
+                               ->ui16FrameIdx);
+                    av_log(
+                        avctx, AV_LOG_TRACE,
+                        "xcoder_send_frame: after ref sframe_pool, hw frame "
+                        "av_buffer_get_ref_count=%d, data[3]=%p\n",
+                        av_buffer_get_ref_count(
+                            ctx->sframe_pool[ctx->aFree_Avframes_list[ctx->freeHead]]
+                                ->buf[0]),
+                        ctx->sframe_pool[ctx->aFree_Avframes_list[ctx->freeHead]]
+                            ->data[3]);
+                }
+                if (deq_free_frames(ctx) != 0) {
+                    ret = AVERROR_EXTERNAL;
+                    return ret;
+                }
+            }
+
+            // only if it's NOT sequence change flushing (in which case only the eos
+            // was sent and not the first sc pkt) AND
+            // only after successful sending will it be removed from fifo
+            if (SESSION_RUN_STATE_SEQ_CHANGE_DRAINING !=
+                ctx->api_ctx.session_run_state) {
+                if (!is_input_fifo_empty(ctx)) {
+                    av_fifo_drain2(ctx->fme_fifo, (size_t) 1);
+                    av_log(avctx, AV_LOG_DEBUG, "fme popped, fifo num frames: %lu\n",
+                           av_fifo_can_read(ctx->fme_fifo));
+                }
+                av_frame_unref(&ctx->buffered_fme);
+                ishwframe = (ctx->buffered_fme.format == AV_PIX_FMT_NI_QUAD) &&
+                            (ctx->api_ctx.auto_dl_handle == 0);
+                if (ishwframe) {
+                    if (ctx->buffered_fme.buf[0])
+                        av_log(avctx, AV_LOG_TRACE, "xcoder_send_frame: after unref buffered_fme, hw frame av_buffer_get_ref_count=%d\n", av_buffer_get_ref_count(ctx->buffered_fme.buf[0]));
+                    else
+                        av_log(avctx, AV_LOG_TRACE, "xcoder_send_frame: after unref buffered_fme, hw frame av_buffer_get_ref_count=0 (buf[0] is NULL)\n");
+                }
+            } else {
+                av_log(avctx, AV_LOG_TRACE, "XCoder frame(eos) sent, sequence changing!"
+                       " NO fifo pop !\n");
+            }
+
+            // pushing input pts in circular FIFO
+            ctx->api_ctx.enc_pts_list[ctx->api_ctx.enc_pts_w_idx % NI_FIFO_SZ] = ctx->api_fme.data.frame.pts;
+            ctx->api_ctx.enc_pts_w_idx++;
+
+            // have another check before return: if no more frames in fifo to send and
+            // we've got eos (NULL) frame from upper stream, flag for flushing
+            if (ctx->eos_fme_received && is_input_fifo_empty(ctx)) {
+                av_log(avctx, AV_LOG_DEBUG, "Upper stream EOS frame received, fifo "
+                       "empty, start flushing ..\n");
+                ctx->encoder_flushing = 1;
+            }
+
+            ret = 0;
+        }
+    }
+
+    // try to flush encoder input fifo if it's not in seqchange draining state.
+    // Sending a frame before seqchange done may lead to stuck because the new frame's
+    // resolution could be different from that of the last sequence. Need to flush the
+    // fifo because its size increases with seqchange.
+    if (ret == 0 && frame && !is_input_fifo_empty(ctx) &&
+        SESSION_RUN_STATE_SEQ_CHANGE_DRAINING != ctx->api_ctx.session_run_state) {
+        av_log(avctx, AV_LOG_DEBUG, "try to flush encoder input fifo. Fifo num frames: %lu\n",
+               av_fifo_can_read(ctx->fme_fifo));
+        goto resend;
+    }
+
+    if (ctx->encoder_flushing) {
+        av_log(avctx, AV_LOG_DEBUG, "xcoder_send_frame flushing ..\n");
+        ret = ni_device_session_flush(&ctx->api_ctx, NI_DEVICE_TYPE_ENCODER);
+    }
+
+    av_log(avctx, AV_LOG_VERBOSE, "XCoder send frame return %d \n", ret);
+    return ret;
+}
+
+static int xcoder_encode_reinit(AVCodecContext *avctx)
+{
+    int ret = 0;
+    XCoderEncContext *ctx = avctx->priv_data;
+    bool ishwframe;
+    ni_device_handle_t device_handle = ctx->api_ctx.device_handle;
+    ni_device_handle_t blk_io_handle = ctx->api_ctx.blk_io_handle;
+    int hw_id = ctx->api_ctx.hw_id;
+    char tmp_blk_dev_name[NI_MAX_DEVICE_NAME_LEN];
+    int bit_depth = 1;
+    int pix_fmt = AV_PIX_FMT_YUV420P;
+    int stride, ori_stride;
+    bool bIsSmallPicture = false;
+    AVFrame temp_frame;
+    ni_xcoder_params_t *p_param = &ctx->api_param;
+
+    av_strlcpy(tmp_blk_dev_name, ctx->api_ctx.blk_dev_name, NI_MAX_DEVICE_NAME_LEN);
+
+    // re-init avctx's resolution to the changed one that is
+    // stored in the first frame of the fifo
+    av_fifo_peek(ctx->fme_fifo, &temp_frame, 1, 0);
+    temp_frame.extended_data = temp_frame.data;
+
+    ishwframe = temp_frame.format == AV_PIX_FMT_NI_QUAD;
+
+    if (ishwframe) {
+        bit_depth = (uint8_t)((niFrameSurface1_t*)((uint8_t*)temp_frame.data[3]))->bit_depth;
+        av_log(avctx, AV_LOG_INFO, "xcoder_receive_packet hw frame bit depth "
+               "changing %d -> %d\n",
+               ctx->api_ctx.bit_depth_factor, bit_depth);
+
+        switch (avctx->sw_pix_fmt) {
+            case AV_PIX_FMT_YUV420P:
+            case AV_PIX_FMT_YUVJ420P:
+                if (bit_depth == 2) {
+                    avctx->sw_pix_fmt = AV_PIX_FMT_YUV420P10LE;
+                    pix_fmt = NI_PIX_FMT_YUV420P10LE;
+                } else {
+                    pix_fmt = NI_PIX_FMT_YUV420P;
+                }
+                break;
+            case AV_PIX_FMT_YUV420P10LE:
+                if (bit_depth == 1) {
+                    avctx->sw_pix_fmt = AV_PIX_FMT_YUV420P;
+                    pix_fmt = NI_PIX_FMT_YUV420P;
+                } else {
+                    pix_fmt = NI_PIX_FMT_YUV420P10LE;
+                }
+                break;
+            case AV_PIX_FMT_NV12:
+                if (bit_depth == 2) {
+                    avctx->sw_pix_fmt = AV_PIX_FMT_P010LE;
+                    pix_fmt = NI_PIX_FMT_P010LE;
+                } else {
+                    pix_fmt = NI_PIX_FMT_NV12;
+                }
+                break;
+            case AV_PIX_FMT_P010LE:
+                if (bit_depth == 1) {
+                    avctx->sw_pix_fmt = AV_PIX_FMT_NV12;
+                    pix_fmt = NI_PIX_FMT_NV12;
+                } else {
+                    pix_fmt = NI_PIX_FMT_P010LE;
+                }
+                break;
+            case AV_PIX_FMT_NI_QUAD_10_TILE_4X4:
+                if (bit_depth == 1) {
+                    avctx->sw_pix_fmt = AV_PIX_FMT_NI_QUAD_8_TILE_4X4;
+                    pix_fmt = NI_PIX_FMT_8_TILED4X4;
+                } else {
+                    pix_fmt = NI_PIX_FMT_10_TILED4X4;
+                }
+                break;
+            case AV_PIX_FMT_NI_QUAD_8_TILE_4X4:
+                if (bit_depth == 2) {
+                    avctx->sw_pix_fmt = AV_PIX_FMT_NI_QUAD_10_TILE_4X4;
+                    pix_fmt = NI_PIX_FMT_10_TILED4X4;
+                } else {
+                    pix_fmt = NI_PIX_FMT_8_TILED4X4;
+                }
+                break;
+            case AV_PIX_FMT_ARGB:
+                pix_fmt = NI_PIX_FMT_ARGB;
+                break;
+            case AV_PIX_FMT_ABGR:
+                pix_fmt = NI_PIX_FMT_ABGR;
+                break;
+            case AV_PIX_FMT_RGBA:
+                pix_fmt = NI_PIX_FMT_RGBA;
+                break;
+            case AV_PIX_FMT_BGRA:
+                pix_fmt = NI_PIX_FMT_BGRA;
+                break;
+            default:
+                pix_fmt = NI_PIX_FMT_NONE;
+                break;
+        }
+    } else {
+        switch (temp_frame.format) {
+            case AV_PIX_FMT_YUV420P:
+            case AV_PIX_FMT_YUVJ420P:
+                pix_fmt = NI_PIX_FMT_YUV420P;
+                bit_depth = 1;
+                break;
+            case AV_PIX_FMT_NV12:
+                pix_fmt = NI_PIX_FMT_NV12;
+                bit_depth = 1;
+                break;
+            case AV_PIX_FMT_YUV420P10LE:
+                pix_fmt = NI_PIX_FMT_YUV420P10LE;
+                bit_depth = 2;
+                break;
+            case AV_PIX_FMT_P010LE:
+                pix_fmt = NI_PIX_FMT_P010LE;
+                bit_depth = 2;
+                break;
+            default:
+                pix_fmt = NI_PIX_FMT_NONE;
+                break;
+        }
+    }
+
+    ctx->eos_fme_received = 0;
+    ctx->encoder_eof = 0;
+    ctx->encoder_flushing = 0;
+    ctx->firstPktArrived = 0;
+    ctx->spsPpsArrived = 0;
+    ctx->spsPpsHdrLen = 0;
+    av_freep(&ctx->p_spsPpsHdr);
+    ctx->seqChangeCount++;
+
+    // check if resolution is zero copy compatible and set linesize according to new resolution
+    if (ni_encoder_frame_zerocopy_check(&ctx->api_ctx,
+        p_param, temp_frame.width, temp_frame.height,
+        (const int *)temp_frame.linesize, true) == NI_RETCODE_SUCCESS) {
+        stride = p_param->luma_linesize; // new sequence is zero copy compatible
+    } else {
+        stride = FFALIGN(temp_frame.width*bit_depth, 128);
+    }
+
+    if (ctx->api_ctx.ori_luma_linesize && ctx->api_ctx.ori_chroma_linesize) {
+        ori_stride = ctx->api_ctx.ori_luma_linesize; // previous sequence was zero copy compatible
+    } else {
+        ori_stride = FFALIGN(ctx->api_ctx.ori_width*bit_depth, 128);
+    }
+
+    if (pix_fmt == NI_PIX_FMT_ARGB
+       || pix_fmt == NI_PIX_FMT_ABGR
+       || pix_fmt == NI_PIX_FMT_RGBA
+       || pix_fmt == NI_PIX_FMT_BGRA) {
+        stride = temp_frame.width;
+        ori_stride = ctx->api_ctx.ori_width;
+    }
+
+    if (ctx->api_param.cfg_enc_params.lookAheadDepth
+        || ctx->api_param.cfg_enc_params.crf >= 0
+        || ctx->api_param.cfg_enc_params.crfFloat >= 0) {
+        av_log(avctx, AV_LOG_DEBUG, "xcoder_encode_reinit 2-pass "
+               "lookaheadDepth  %d and/or CRF %d and/or CRFFloat %f\n",
+               ctx->api_param.cfg_enc_params.lookAheadDepth,
+               ctx->api_param.cfg_enc_params.crf,
+               ctx->api_param.cfg_enc_params.crfFloat);
+        if ((temp_frame.width < NI_2PASS_ENCODE_MIN_WIDTH) ||
+            (temp_frame.height < NI_2PASS_ENCODE_MIN_HEIGHT)) {
+            bIsSmallPicture = true;
+        }
+    } else {
+        if ((temp_frame.width < NI_MIN_WIDTH) ||
+           (temp_frame.height < NI_MIN_HEIGHT)) {
+            bIsSmallPicture = true;
+        }
+    }
+
+    if (ctx->api_param.cfg_enc_params.multicoreJointMode) {
+        av_log(avctx, AV_LOG_DEBUG, "xcoder_encode_reinit multicore "
+               "joint mode\n");
+        if ((temp_frame.width < 256) ||
+           (temp_frame.height < 256)) {
+            bIsSmallPicture = true;
+        }
+    }
+
+    if (ctx->api_param.cfg_enc_params.crop_width || ctx->api_param.cfg_enc_params.crop_height) {
+        av_log(avctx, AV_LOG_DEBUG, "xcoder_encode_reinit needs to close and re-open "
+               " due to crop width x height \n");
+        bIsSmallPicture = true;
+    }
+
+    av_log(avctx, AV_LOG_INFO, "%s resolution "
+           "changing %dx%d -> %dx%d "
+           "format %d -> %d "
+           "original stride %d height %d pix fmt %d "
+           "new stride %d height %d pix fmt %d \n",
+           __func__, avctx->width, avctx->height,
+           temp_frame.width, temp_frame.height,
+           avctx->pix_fmt, temp_frame.format,
+           ori_stride, ctx->api_ctx.ori_height, ctx->api_ctx.ori_pix_fmt,
+           stride, temp_frame.height, pix_fmt);
+
+    avctx->width = temp_frame.width;
+    avctx->height = temp_frame.height;
+    avctx->pix_fmt = temp_frame.format;
+
+    // fast sequence change without close / open only if new resolution < original resolution
+    if ((ori_stride*ctx->api_ctx.ori_height < stride*temp_frame.height) ||
+        (ctx->api_ctx.ori_pix_fmt != pix_fmt) ||
+        bIsSmallPicture ||
+        (avctx->codec_id == AV_CODEC_ID_MJPEG) ||
+        ctx->api_param.cfg_enc_params.disable_adaptive_buffers) {
+        ff_xcoder_encode_close(avctx);
+        ret = ff_xcoder_encode_init(avctx);
+        // clear crop parameters upon sequence change because cropping values may not be compatible to new resolution
+        // (except for Motion Constrained mode 2, for which we crop to 64x64 alignment)
+        if (ctx->api_param.cfg_enc_params.motionConstrainedMode == MOTION_CONSTRAINED_QUALITY_MODE && avctx->codec_id == AV_CODEC_ID_HEVC) {
+            ctx->api_param.cfg_enc_params.crop_width = (temp_frame.width / 64 * 64);
+            ctx->api_param.cfg_enc_params.crop_height = (temp_frame.height / 64 * 64);
+            ctx->api_param.cfg_enc_params.hor_offset = ctx->api_param.cfg_enc_params.ver_offset = 0;
+            av_log(avctx, AV_LOG_DEBUG, "xcoder_encode_reinit sets "
+                   "crop width x height to %d x %d for Motion Constrained mode 2\n",
+                   ctx->api_param.cfg_enc_params.crop_width,
+                   ctx->api_param.cfg_enc_params.crop_height);
+        } else {
+            ctx->api_param.cfg_enc_params.crop_width = ctx->api_param.cfg_enc_params.crop_height = 0;
+            ctx->api_param.cfg_enc_params.hor_offset = ctx->api_param.cfg_enc_params.ver_offset = 0;
+        }
+    } else {
+        if (avctx->codec_id == AV_CODEC_ID_AV1) {
+            // AV1 8x8 alignment HW limitation is now worked around by FW cropping input resolution
+            if (temp_frame.width % NI_PARAM_AV1_ALIGN_WIDTH_HEIGHT)
+                av_log(avctx, AV_LOG_ERROR,
+                       "resolution change: AV1 Picture Width not aligned to %d - picture will be cropped\n",
+                       NI_PARAM_AV1_ALIGN_WIDTH_HEIGHT);
+
+            if (temp_frame.height % NI_PARAM_AV1_ALIGN_WIDTH_HEIGHT)
+                av_log(avctx, AV_LOG_ERROR,
+                       "resolution change: AV1 Picture Height not aligned to %d - picture will be cropped\n",
+                       NI_PARAM_AV1_ALIGN_WIDTH_HEIGHT);
+        }
+        ret = xcoder_encode_sequence_change(avctx, temp_frame.width, temp_frame.height, bit_depth);
+    }
+
+    // keep device handle(s) open during sequence change to fix mem bin buffer not recycled
+    ctx->api_ctx.device_handle  = device_handle;
+    ctx->api_ctx.blk_io_handle = blk_io_handle;
+    ctx->api_ctx.hw_id = hw_id;
+    av_strlcpy(ctx->api_ctx.blk_dev_name, tmp_blk_dev_name, NI_MAX_DEVICE_NAME_LEN);
+    ctx->api_ctx.session_run_state = SESSION_RUN_STATE_SEQ_CHANGE_OPENING; // this state is referenced when sending first frame after sequence change
+
+    return ret;
+}
+
+static int xcoder_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
+{
+    XCoderEncContext *ctx = avctx->priv_data;
+    int i, ret = 0;
+    int recv;
+    ni_packet_t *xpkt = &ctx->api_pkt.data.packet;
+    bool av1_output_frame = 0;
+
+    av_log(avctx, AV_LOG_VERBOSE, "XCoder receive packet\n");
+
+    if (ctx->encoder_eof) {
+        av_log(avctx, AV_LOG_VERBOSE, "xcoder_receive_packet: EOS\n");
+        return AVERROR_EOF;
+    }
+
+    if (ni_packet_buffer_alloc(xpkt, NI_MAX_TX_SZ)) {
+        av_log(avctx, AV_LOG_ERROR,
+               "xcoder_receive_packet: packet buffer size %d allocation failed\n",
+               NI_MAX_TX_SZ);
+        return AVERROR(ENOMEM);
+    }
+
+    if (avctx->codec_id == AV_CODEC_ID_MJPEG && (!ctx->spsPpsArrived)) {
+        ctx->spsPpsArrived = 1;
+        // for Jpeg, start pkt_num counter from 1, because unlike video codecs
+        // (1st packet is header), there is no header for Jpeg
+        ctx->api_ctx.pkt_num = 1;
+    }
+
+    while (1) {
+        xpkt->recycle_index = -1;
+        recv = ni_device_session_read(&ctx->api_ctx, &(ctx->api_pkt), NI_DEVICE_TYPE_ENCODER);
+
+        av_log(avctx, AV_LOG_TRACE,
+               "XCoder receive packet: xpkt.end_of_stream=%d, xpkt.data_len=%d, "
+               "xpkt.frame_type=%d, recv=%d, encoder_flushing=%d, encoder_eof=%d\n",
+               xpkt->end_of_stream, xpkt->data_len, xpkt->frame_type, recv,
+               ctx->encoder_flushing, ctx->encoder_eof);
+
+        if (recv <= 0) {
+            ctx->encoder_eof = xpkt->end_of_stream;
+            if (ctx->encoder_eof || xpkt->end_of_stream) {
+                if (SESSION_RUN_STATE_SEQ_CHANGE_DRAINING ==
+                    ctx->api_ctx.session_run_state) {
+                    // after sequence change completes, reset codec state
+                    av_log(avctx, AV_LOG_INFO, "xcoder_receive_packet 1: sequence "
+                           "change completed, return AVERROR(EAGAIN) and will reopen "
+                           "codec!\n");
+
+                    ret = xcoder_encode_reinit(avctx);
+                    av_log(avctx, AV_LOG_DEBUG, "xcoder_receive_packet: xcoder_encode_reinit ret %d\n", ret);
+                    if (ret >= 0) {
+                        ret = AVERROR(EAGAIN);
+
+                        xcoder_send_frame(avctx, NULL);
+
+                        ctx->api_ctx.session_run_state = SESSION_RUN_STATE_NORMAL;
+                    }
+                    break;
+                }
+
+                ret = AVERROR_EOF;
+                av_log(avctx, AV_LOG_VERBOSE, "xcoder_receive_packet: got encoder_eof, return AVERROR_EOF\n");
+                break;
+            } else {
+                bool bIsReset = false;
+                if (NI_RETCODE_ERROR_VPU_RECOVERY == recv) {
+                    xcoder_encode_reset(avctx);
+                    bIsReset = true;
+                } else if (NI_RETCODE_ERROR_INVALID_SESSION == recv) {
+                    av_log(ctx, AV_LOG_ERROR, "encoder read retval %d\n", recv);
+                    ret = AVERROR(EIO);
+                    break;
+                }
+                ret = AVERROR(EAGAIN);
+                if ((!ctx->encoder_flushing && !ctx->eos_fme_received) || bIsReset) { // if encode session was reset, can't read again with invalid session, must break out first
+                    av_log(avctx, AV_LOG_TRACE, "xcoder_receive_packet: NOT encoder_"
+                           "flushing, NOT eos_fme_received, return AVERROR(EAGAIN)\n");
+                    break;
+                }
+            }
+        } else {
+            /* got encoded data back */
+            uint8_t *p_src, *p_end;
+            int64_t local_pts;
+            ni_custom_sei_set_t *p_custom_sei_set;
+            int meta_size = ctx->api_ctx.meta_size;
+            uint32_t copy_len = 0;
+            uint32_t data_len = 0;
+            int total_custom_sei_size = 0;
+            int custom_sei_count = 0;
+
+            if (avctx->pix_fmt == AV_PIX_FMT_NI_QUAD && xpkt->recycle_index >= 0 &&
+                avctx->height >= NI_MIN_HEIGHT && avctx->width >= NI_MIN_WIDTH &&
+                xpkt->recycle_index < NI_GET_MAX_HWDESC_P2P_BUF_ID(ctx->api_ctx.ddr_config)) {
+                int avframe_index =
+                    recycle_index_2_avframe_index(ctx, xpkt->recycle_index);
+                av_log(avctx, AV_LOG_VERBOSE, "UNREF trace ui16FrameIdx = [%d].\n",
+                       xpkt->recycle_index);
+                if (avframe_index >= 0 && ctx->sframe_pool[avframe_index]) {
+                    av_frame_unref(ctx->sframe_pool[avframe_index]);
+                    av_log(avctx, AV_LOG_DEBUG,
+                           "AVframe_index = %d pushed to free tail %d\n",
+                           avframe_index, ctx->freeTail);
+                    enq_free_frames(ctx, avframe_index);
+                    // enqueue the index back to free
+                    xpkt->recycle_index = -1;
+                } else {
+                    av_log(avctx, AV_LOG_DEBUG,
+                           "can't push to tail - avframe_index %d sframe_pool %p\n",
+                           avframe_index, ctx->sframe_pool[avframe_index]);
+                }
+            }
+
+            if (!ctx->spsPpsArrived) {
+                ret = AVERROR(EAGAIN);
+                ctx->spsPpsArrived = 1;
+                ctx->spsPpsHdrLen = recv - meta_size;
+                ctx->p_spsPpsHdr = av_malloc(ctx->spsPpsHdrLen);
+                if (!ctx->p_spsPpsHdr) {
+                    ret = AVERROR(ENOMEM);
+                    break;
+                }
+
+                memcpy(ctx->p_spsPpsHdr, (uint8_t *)xpkt->p_data + meta_size,
+                       xpkt->data_len - meta_size);
+
+                // start pkt_num counter from 1 to get the real first frame
+                ctx->api_ctx.pkt_num = 1;
+                // for low-latency mode, keep reading until the first frame is back
+                if (ctx->api_param.low_delay_mode) {
+                    av_log(avctx, AV_LOG_TRACE, "XCoder receive packet: low delay mode,"
+                           " keep reading until 1st pkt arrives\n");
+                    continue;
+                }
+                break;
+            }
+
+            // handle pic skip
+            if (xpkt->frame_type == 3) { // 0=I, 1=P, 2=B, 3=not coded / skip
+                ret = AVERROR(EAGAIN);
+                if (ctx->first_frame_pts == INT_MIN)
+                    ctx->first_frame_pts = xpkt->pts;
+                if (AV_CODEC_ID_AV1 == avctx->codec_id) {
+                    ctx->latest_dts = xpkt->pts;
+                } else if (ctx->total_frames_received < ctx->dtsOffset) {
+                    // guess dts
+                    ctx->latest_dts = ctx->first_frame_pts +
+                                      ctx->gop_offset_count - ctx->dtsOffset;
+                    ctx->gop_offset_count++;
+                } else {
+                    // get dts from pts FIFO
+                    ctx->latest_dts =
+                        ctx->api_ctx
+                            .enc_pts_list[ctx->api_ctx.enc_pts_r_idx % NI_FIFO_SZ];
+                    ctx->api_ctx.enc_pts_r_idx++;
+                }
+                if (ctx->latest_dts > xpkt->pts) {
+                    ctx->latest_dts = xpkt->pts;
+                }
+                ctx->total_frames_received++;
+
+                if (!ctx->encoder_flushing && !ctx->eos_fme_received) {
+                    av_log(avctx, AV_LOG_TRACE, "xcoder_receive_packet: skip"
+                           " picture output, return AVERROR(EAGAIN)\n");
+                    break;
+                } else {
+                    continue;
+                }
+            }
+
+            // store av1 packets to be merged & sent along with future packet
+            if (avctx->codec_id == AV_CODEC_ID_AV1) {
+                av_log(
+                    avctx, AV_LOG_TRACE,
+                    "xcoder_receive_packet: AV1 xpkt buf %p size %d show_frame %d\n",
+                    xpkt->p_data, xpkt->data_len, xpkt->av1_show_frame);
+                if (!xpkt->av1_show_frame) {
+                    // store AV1 packets
+                    xpkt->av1_p_buffer[xpkt->av1_buffer_index] = xpkt->p_buffer;
+                    xpkt->av1_p_data[xpkt->av1_buffer_index] = xpkt->p_data;
+                    xpkt->av1_buffer_size[xpkt->av1_buffer_index] = xpkt->buffer_size;
+                    xpkt->av1_data_len[xpkt->av1_buffer_index] = xpkt->data_len;
+                    xpkt->av1_buffer_index++;
+                    xpkt->p_buffer = NULL;
+                    xpkt->p_data = NULL;
+                    xpkt->buffer_size = 0;
+                    xpkt->data_len = 0;
+                    if (xpkt->av1_buffer_index >= MAX_AV1_ENCODER_GOP_NUM) {
+                        av_log(avctx, AV_LOG_ERROR,
+                               "xcoder_receive_packet: recv AV1 not shown frame "
+                               "number %d >= %d, return AVERROR_EXTERNAL\n",
+                               xpkt->av1_buffer_index, MAX_AV1_ENCODER_GOP_NUM);
+                        ret = AVERROR_EXTERNAL;
+                        break;
+                    } else if (!ctx->encoder_flushing && !ctx->eos_fme_received) {
+                        av_log(avctx, AV_LOG_TRACE,
+                               "xcoder_receive_packet: recv AV1 not shown frame, "
+                               "return AVERROR(EAGAIN)\n");
+                        ret = AVERROR(EAGAIN);
+                        break;
+                    } else {
+                        if (ni_packet_buffer_alloc(xpkt, NI_MAX_TX_SZ)) {
+                            av_log(avctx, AV_LOG_ERROR,
+                                   "xcoder_receive_packet: AV1 packet buffer size %d "
+                                   "allocation failed during flush\n",
+                                   NI_MAX_TX_SZ);
+                            ret = AVERROR(ENOMEM);
+                            break;
+                        }
+                        av_log(avctx, AV_LOG_TRACE,
+                               "xcoder_receive_packet: recv AV1 not shown frame "
+                               "during flush, continue..\n");
+                        continue;
+                    }
+                } else {
+                    // calculate length of previously received AV1 packets pending for merge
+                    av1_output_frame = 1;
+                    for (i = 0; i < xpkt->av1_buffer_index; i++) {
+                        data_len += xpkt->av1_data_len[i] - meta_size;
+                    }
+                }
+            }
+
+            p_src = (uint8_t*)xpkt->p_data + meta_size;
+            p_end = p_src + (xpkt->data_len - meta_size);
+            local_pts = xpkt->pts;
+
+            p_custom_sei_set = ctx->api_ctx.pkt_custom_sei_set[local_pts % NI_FIFO_SZ];
+            if (p_custom_sei_set != NULL) {
+                custom_sei_count = p_custom_sei_set->count;
+                for (i = 0; i < p_custom_sei_set->count; i++) {
+                    total_custom_sei_size += p_custom_sei_set->custom_sei[i].size;
+                }
+            }
+
+            if (custom_sei_count) {
+                // if HRD or custom sei enabled, search for pic_timing or custom SEI insertion point by
+                // skipping non-VCL until video data is found.
+                uint32_t nalu_type = 0;
+                const uint8_t *p_start_code = p_src;
+                uint32_t stc = -1;
+                if (AV_CODEC_ID_HEVC == avctx->codec_id) {
+                    do {
+                        stc = -1;
+                        p_start_code = avpriv_find_start_code(p_start_code, p_end, &stc);
+                        nalu_type = (stc >> 1) & 0x3F;
+                    } while (nalu_type > HEVC_NAL_RSV_VCL31);
+
+                    // calc. length to copy
+                    copy_len = p_start_code - 5 - p_src;
+                } else if (AV_CODEC_ID_H264 == avctx->codec_id) {
+                    do {
+                        stc = -1;
+                        p_start_code = avpriv_find_start_code(p_start_code, p_end, &stc);
+                        nalu_type = stc & 0x1F;
+                    } while (nalu_type > H264_NAL_IDR_SLICE);
+
+                    // calc. length to copy
+                    copy_len = p_start_code - 5 - p_src;
+                } else {
+                    av_log(avctx, AV_LOG_ERROR, "xcoder_receive packet: codec %d not "
+                           "supported for SEI !\n", avctx->codec_id);
+                }
+            }
+
+            if (avctx->codec_id == AV_CODEC_ID_MJPEG && !ctx->firstPktArrived) {
+                // there is no header for Jpeg, so skip header copy
+                ctx->firstPktArrived = 1;
+                if (ctx->first_frame_pts == INT_MIN) {
+                    ctx->first_frame_pts = xpkt->pts;
+                }
+            }
+
+            if (!ctx->firstPktArrived) {
+                int sizeof_spspps_attached_to_idr = ctx->spsPpsHdrLen;
+                if ((avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) &&
+                    (avctx->codec_id != AV_CODEC_ID_AV1) &&
+                    (ctx->seqChangeCount == 0)) {
+                    sizeof_spspps_attached_to_idr = 0;
+                }
+                ctx->firstPktArrived = 1;
+                if (ctx->first_frame_pts == INT_MIN) {
+                    ctx->first_frame_pts = xpkt->pts;
+                }
+
+                data_len += xpkt->data_len - meta_size + sizeof_spspps_attached_to_idr + total_custom_sei_size;
+                if (avctx->codec_id == AV_CODEC_ID_AV1)
+                    av_log(avctx, AV_LOG_TRACE, "xcoder_receive_packet: AV1 first output pkt size %d\n", data_len);
+
+                ret = ff_get_encode_buffer(avctx, pkt, data_len, 0);
+
+                if (!ret) {
+                    uint8_t *p_dst, *p_side_data;
+
+                    // fill in AVC/HEVC sidedata
+                    if ((avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) &&
+                        (avctx->extradata_size != ctx->spsPpsHdrLen ||
+                         (memcmp(avctx->extradata, ctx->p_spsPpsHdr, ctx->spsPpsHdrLen) !=
+                          0))) {
+                        avctx->extradata_size = ctx->spsPpsHdrLen;
+                        av_freep(&avctx->extradata);
+                        avctx->extradata = av_mallocz(avctx->extradata_size +
+                                                  AV_INPUT_BUFFER_PADDING_SIZE);
+                        if (!avctx->extradata) {
+                            av_log(avctx, AV_LOG_ERROR,
+                                   "Cannot allocate AVC/HEVC header of size %d.\n",
+                                   avctx->extradata_size);
+                            return AVERROR(ENOMEM);
+                        }
+                        memcpy(avctx->extradata, ctx->p_spsPpsHdr, avctx->extradata_size);
+                    }
+
+                    p_side_data = av_packet_new_side_data(
+                        pkt, AV_PKT_DATA_NEW_EXTRADATA, ctx->spsPpsHdrLen);
+                    if (p_side_data) {
+                        memcpy(p_side_data, ctx->p_spsPpsHdr, ctx->spsPpsHdrLen);
+                    }
+
+                    p_dst = pkt->data;
+                    if (sizeof_spspps_attached_to_idr) {
+                        memcpy(p_dst, ctx->p_spsPpsHdr, ctx->spsPpsHdrLen);
+                        p_dst += ctx->spsPpsHdrLen;
+                    }
+
+                    if (custom_sei_count && avctx->codec_id != AV_CODEC_ID_AV1) {
+                        // copy buf_period
+                        memcpy(p_dst, p_src, copy_len);
+                        p_dst += copy_len;
+
+                        for (i = 0; i < custom_sei_count; i++) {
+                            // copy custom sei
+                            ni_custom_sei_t *p_custom_sei = &p_custom_sei_set->custom_sei[i];
+                            if (p_custom_sei->location == NI_CUSTOM_SEI_LOC_AFTER_VCL) {
+                                break;
+                            }
+                            memcpy(p_dst, &p_custom_sei->data[0], p_custom_sei->size);
+                            p_dst += p_custom_sei->size;
+                        }
+
+                        // copy the IDR data
+                        memcpy(p_dst, p_src + copy_len,
+                               xpkt->data_len - meta_size - copy_len);
+                        p_dst += xpkt->data_len - meta_size - copy_len;
+
+                        // copy custom sei after slice
+                        for (; i < custom_sei_count; i++) {
+                            ni_custom_sei_t *p_custom_sei = &p_custom_sei_set->custom_sei[i];
+                            memcpy(p_dst, &p_custom_sei->data[0], p_custom_sei->size);
+                            p_dst += p_custom_sei->size;
+                        }
+                    } else {
+                        // merge AV1 packets
+                        if (avctx->codec_id == AV_CODEC_ID_AV1) {
+                            for (i = 0; i < xpkt->av1_buffer_index; i++) {
+                                memcpy(p_dst, (uint8_t *)xpkt->av1_p_data[i] + meta_size,
+                                       xpkt->av1_data_len[i] - meta_size);
+                                p_dst += (xpkt->av1_data_len[i] - meta_size);
+                            }
+                        }
+
+                        memcpy(p_dst, (uint8_t*)xpkt->p_data + meta_size,
+                               xpkt->data_len - meta_size);
+                    }
+                }
+            } else {
+                data_len += xpkt->data_len - meta_size + total_custom_sei_size;
+                if (avctx->codec_id == AV_CODEC_ID_AV1)
+                    av_log(avctx, AV_LOG_TRACE, "xcoder_receive_packet: AV1 output pkt size %d\n", data_len);
+
+                ret = ff_get_encode_buffer(avctx, pkt, data_len, 0);
+
+                if (!ret) {
+                    uint8_t *p_dst = pkt->data;
+
+                    if (custom_sei_count && avctx->codec_id != AV_CODEC_ID_AV1) {
+                        // copy buf_period
+                        memcpy(p_dst, p_src, copy_len);
+                        p_dst += copy_len;
+
+                        for (i = 0; i < custom_sei_count; i++) {
+                            // copy custom sei
+                            ni_custom_sei_t *p_custom_sei = &p_custom_sei_set->custom_sei[i];
+                            if (p_custom_sei->location == NI_CUSTOM_SEI_LOC_AFTER_VCL) {
+                                break;
+                            }
+                            memcpy(p_dst, &p_custom_sei->data[0], p_custom_sei->size);
+                            p_dst += p_custom_sei->size;
+                        }
+
+                        // copy the packet data
+                        memcpy(p_dst, p_src + copy_len,
+                               xpkt->data_len - meta_size - copy_len);
+                        p_dst += xpkt->data_len - meta_size - copy_len;
+
+                        // copy custom sei after slice
+                        for (; i < custom_sei_count; i++) {
+                            ni_custom_sei_t *p_custom_sei = &p_custom_sei_set->custom_sei[i];
+                            memcpy(p_dst, &p_custom_sei->data[0], p_custom_sei->size);
+                            p_dst += p_custom_sei->size;
+                        }
+                    } else {
+                        // merge AV1 packets
+                        if (avctx->codec_id == AV_CODEC_ID_AV1) {
+                            for (i = 0; i < xpkt->av1_buffer_index; i++) {
+                                memcpy(p_dst, (uint8_t *)xpkt->av1_p_data[i] + meta_size,
+                                       xpkt->av1_data_len[i] - meta_size);
+                                p_dst += (xpkt->av1_data_len[i] - meta_size);
+                            }
+                        }
+
+                        memcpy(p_dst, (uint8_t *)xpkt->p_data + meta_size,
+                               xpkt->data_len - meta_size);
+                    }
+                }
+            }
+
+            // free buffer
+            if (custom_sei_count) {
+                ni_memfree(p_custom_sei_set);
+                ctx->api_ctx.pkt_custom_sei_set[local_pts % NI_FIFO_SZ] = NULL;
+            }
+
+            if (!ret) {
+                if (xpkt->frame_type == 0) {
+                    pkt->flags |= AV_PKT_FLAG_KEY;
+                }
+
+                pkt->pts = xpkt->pts;
+                /* to ensure pts>dts for all frames, we assign a guess pts for the first 'dtsOffset' frames and then the pts from input stream
+                 * is extracted from input pts FIFO.
+                 * if GOP = IBBBP and PTSs = 0 1 2 3 4 5 .. then out DTSs = -3 -2 -1 0 1 ... and -3 -2 -1 are the guessed values
+                 * if GOP = IBPBP and PTSs = 0 1 2 3 4 5 .. then out DTSs = -1 0 1 2 3 ... and -1 is the guessed value
+                 * the number of guessed values is equal to dtsOffset
+                 */
+                if (AV_CODEC_ID_AV1 == avctx->codec_id) {
+                    pkt->dts = pkt->pts;
+                    av_log(avctx, AV_LOG_TRACE, "Packet dts (av1): %ld\n", pkt->dts);
+                } else if (ctx->total_frames_received < ctx->dtsOffset) {
+                    // guess dts
+                    pkt->dts = ctx->first_frame_pts + ctx->gop_offset_count - ctx->dtsOffset;
+                    ctx->gop_offset_count++;
+                    av_log(avctx, AV_LOG_TRACE, "Packet dts (guessed): %ld\n",
+                           pkt->dts);
+                } else {
+                    // get dts from pts FIFO
+                    pkt->dts =
+                        ctx->api_ctx
+                            .enc_pts_list[ctx->api_ctx.enc_pts_r_idx % NI_FIFO_SZ];
+                    ctx->api_ctx.enc_pts_r_idx++;
+                    av_log(avctx, AV_LOG_TRACE, "Packet dts: %ld\n", pkt->dts);
+                }
+                if (ctx->total_frames_received >= 1) {
+                    if (pkt->dts < ctx->latest_dts) {
+                        av_log(NULL, AV_LOG_WARNING, "dts: %ld < latest_dts: %ld.\n",
+                                pkt->dts, ctx->latest_dts);
+                    }
+                }
+                if (pkt->pts < ctx->first_frame_pts) {
+                    av_log(NULL, AV_LOG_WARNING, "pts %ld less than first frame pts %ld. Force it to first frame pts\n",
+                            pkt->pts, ctx->first_frame_pts);
+                    pkt->pts = ctx->first_frame_pts;
+                }
+                if (pkt->dts > pkt->pts) {
+                    av_log(NULL, AV_LOG_WARNING, "dts: %ld, pts: %ld. Forcing dts = pts \n",
+                            pkt->dts, pkt->pts);
+                    pkt->dts = pkt->pts;
+                    av_log(avctx, AV_LOG_TRACE, "Force dts to: %ld\n", pkt->dts);
+                }
+                ctx->total_frames_received++;
+                ctx->latest_dts = pkt->dts;
+                av_log(avctx, AV_LOG_DEBUG, "XCoder recv pkt #%" PRId64 ""
+                       " pts %" PRId64 "  dts %" PRId64 "  size %d  st_index %d frame_type %u avg qp %u\n",
+                       ctx->api_ctx.pkt_num - 1, pkt->pts, pkt->dts, pkt->size,
+                       pkt->stream_index, xpkt->frame_type, xpkt->avg_frame_qp);
+
+                enum AVPictureType pict_type = AV_PICTURE_TYPE_NONE;
+                switch (xpkt->frame_type) {
+                case 0:
+                  pict_type = AV_PICTURE_TYPE_I;
+                  break;
+                case 1:
+                  pict_type = AV_PICTURE_TYPE_P;
+                  break;
+                case 2:
+                  pict_type = AV_PICTURE_TYPE_B;
+                  break;
+                default:
+                  break;
+                }
+
+                int frame_qp = 0;
+                switch (avctx->codec_id) {
+                case AV_CODEC_ID_H264:
+                case AV_CODEC_ID_HEVC:
+                  frame_qp = xpkt->avg_frame_qp;
+                  break;
+                default:
+                  break;
+                }
+
+                ff_side_data_set_encoder_stats(pkt, frame_qp * FF_QP2LAMBDA, NULL, 0, pict_type);
+            }
+            ctx->encoder_eof = xpkt->end_of_stream;
+            if (ctx->encoder_eof &&
+                SESSION_RUN_STATE_SEQ_CHANGE_DRAINING ==
+                ctx->api_ctx.session_run_state) {
+                // after sequence change completes, reset codec state
+                av_log(avctx, AV_LOG_DEBUG, "xcoder_receive_packet 2: sequence change "
+                    "completed, return 0 and will reopen codec !\n");
+                ret = xcoder_encode_reinit(avctx);
+                av_log(avctx, AV_LOG_DEBUG, "xcoder_receive_packet: xcoder_encode_reinit ret %d\n", ret);
+                if (ret >= 0) {
+                    xcoder_send_frame(avctx, NULL);
+                    ctx->api_ctx.session_run_state = SESSION_RUN_STATE_NORMAL;
+                }
+            }
+            break;
+        }
+    }
+
+    if ((AV_CODEC_ID_AV1 == avctx->codec_id) && xpkt->av1_buffer_index &&
+        av1_output_frame) {
+        av_log(avctx, AV_LOG_TRACE,
+               "xcoder_receive_packet: ni_packet_buffer_free_av1 %d packtes\n",
+               xpkt->av1_buffer_index);
+        ni_packet_buffer_free_av1(xpkt);
+    }
+
+    av_log(avctx, AV_LOG_VERBOSE, "xcoder_receive_packet: return %d\n", ret);
+    return ret;
+}
+
+// for FFmpeg 4.4+
+int ff_xcoder_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
+{
+    XCoderEncContext *ctx = avctx->priv_data;
+    AVFrame *frame = &ctx->buffered_fme;
+    int ret;
+
+    ret = ff_encode_get_frame(avctx, frame);
+    if (!ctx->encoder_flushing && ret >= 0 || ret == AVERROR_EOF) {
+        ret = xcoder_send_frame(avctx, (ret == AVERROR_EOF ? NULL : frame));
+        if (ret < 0 && ret != AVERROR_EOF) {
+            av_frame_unref(frame);
+            return ret;
+        }
+    }
+    // Once send_frame returns EOF go on receiving packets until EOS is met.
+    return xcoder_receive_packet(avctx, pkt);
+}
+
+static bool free_frames_isempty(XCoderEncContext *ctx)
+{
+    return (ctx->freeHead == ctx->freeTail);
+}
+
+static bool free_frames_isfull(XCoderEncContext *ctx)
+{
+    return (ctx->freeHead == ((ctx->freeTail == MAX_NUM_FRAMEPOOL_HWAVFRAME) ? 0 : ctx->freeTail + 1));
+}
+
+static int deq_free_frames(XCoderEncContext *ctx)
+{
+    if (free_frames_isempty(ctx)) {
+        return -1;
+    }
+    ctx->aFree_Avframes_list[ctx->freeHead] = -1;
+    ctx->freeHead = (ctx->freeHead == MAX_NUM_FRAMEPOOL_HWAVFRAME) ? 0 : ctx->freeHead + 1;
+    return 0;
+}
+
+static int enq_free_frames(XCoderEncContext *ctx, int idx)
+{
+    if (free_frames_isfull(ctx)) {
+        return -1;
+    }
+    ctx->aFree_Avframes_list[ctx->freeTail] = idx;
+    ctx->freeTail = (ctx->freeTail == MAX_NUM_FRAMEPOOL_HWAVFRAME) ? 0 : ctx->freeTail + 1;
+    return 0;
+}
+
+static int recycle_index_2_avframe_index(XCoderEncContext *ctx, uint32_t recycleIndex)
+{
+    int i;
+    for (i = 0; i < MAX_NUM_FRAMEPOOL_HWAVFRAME; i++) {
+        if (ctx->sframe_pool[i]->data[3] &&
+            ((niFrameSurface1_t *)(ctx->sframe_pool[i]->data[3]))->ui16FrameIdx == recycleIndex) {
+            return i;
+        }
+    }
+    return -1;
+}
+
+const AVCodecHWConfigInternal *ff_ni_enc_hw_configs[] = {
+    HW_CONFIG_ENCODER_FRAMES(NI_QUAD,  NI_QUADRA),
+    HW_CONFIG_ENCODER_DEVICE(NV12, NI_QUADRA),
+    HW_CONFIG_ENCODER_DEVICE(P010LE, NI_QUADRA),
+    HW_CONFIG_ENCODER_DEVICE(YUV420P, NI_QUADRA),
+    HW_CONFIG_ENCODER_DEVICE(YUV420P10LE, NI_QUADRA),
+    NULL,
+};
diff --git a/libavcodec/nienc.h b/libavcodec/nienc.h
new file mode 100644
index 0000000000..0998b06a61
--- /dev/null
+++ b/libavcodec/nienc.h
@@ -0,0 +1,161 @@
+/*
+ * NetInt XCoder H.264/HEVC Encoder common code header
+ * Copyright (c) 2018-2019 NetInt
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_NIENC_H
+#define AVCODEC_NIENC_H
+
+#include <ni_rsrc_api.h>
+#include <ni_device_api.h>
+#include <ni_util.h>
+
+#include "libavutil/internal.h"
+
+#include "avcodec.h"
+#include "codec_internal.h"
+#include "internal.h"
+#include "libavutil/opt.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/fifo.h"
+
+#include "hwconfig.h"
+#include "nicodec.h"
+
+typedef struct XCoderEncContext {
+    AVClass *avclass;
+
+    /* from the command line, which resource allocation method we use */
+    char *dev_xcoder;
+    char *dev_xcoder_name;          /* dev name of the xcoder card to use */
+    char *blk_xcoder_name;          /* blk name of the xcoder card to use */
+    int dev_enc_idx;                /* user-specified encoder index */
+    char *dev_blk_name;             /* user-specified encoder block device name */
+    int nvme_io_size;               /* custom nvme io size */
+    int keep_alive_timeout;         /* keep alive timeout setting */
+    ni_device_context_t *rsrc_ctx;  /* resource management context */
+    uint64_t xcode_load_pixel; /* xcode load in pixels by this encode task */
+
+    AVFifo *fme_fifo;
+    int eos_fme_received;
+    AVFrame buffered_fme; // buffered frame for sequence change handling
+
+    ni_session_data_io_t  api_pkt; /* used for receiving bitstream from xcoder */
+    ni_session_data_io_t   api_fme; /* used for sending YUV data to xcoder */
+    ni_session_context_t api_ctx;
+    ni_xcoder_params_t api_param;
+
+    int started;
+    uint8_t *p_spsPpsHdr;
+    int spsPpsHdrLen;
+    int spsPpsArrived;
+    int firstPktArrived;
+    int64_t dtsOffset;
+    int gop_offset_count;/*this is a counter to guess the pts only dtsOffset times*/
+    uint64_t total_frames_received;
+    int64_t first_frame_pts;
+    int64_t latest_dts;
+
+    int encoder_flushing;
+    int encoder_eof;
+
+    // ROI
+    int roi_side_data_size;
+    AVRegionOfInterest *av_rois;  // last passed in AVRegionOfInterest
+    int nb_rois;
+
+    /* backup copy of original values of -enc command line option */
+    int  orig_dev_enc_idx;
+
+    AVFrame *sframe_pool[MAX_NUM_FRAMEPOOL_HWAVFRAME];
+    int aFree_Avframes_list[MAX_NUM_FRAMEPOOL_HWAVFRAME + 1];
+    int freeHead;
+    int freeTail;
+
+    /* below are all command line options */
+    char *xcoder_opts;
+    char *xcoder_gop;
+    int gen_global_headers;
+    int udu_sei;
+
+    int reconfigCount;
+    int seqChangeCount;
+    // actual enc_change_params is in ni_session_context !
+
+} XCoderEncContext;
+
+#define OFFSETENC(x) offsetof(XCoderEncContext, x)
+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+
+// Common Netint encoder options
+#define NI_ENC_OPTIONS\
+    { "xcoder", "Select which XCoder card to use.", OFFSETENC(dev_xcoder), \
+      AV_OPT_TYPE_STRING, { .str = NI_BEST_MODEL_LOAD_STR }, CHAR_MIN, CHAR_MAX, VE, "xcoder" }, \
+    {     "bestmodelload", "Pick the least model load XCoder/encoder available.", 0, AV_OPT_TYPE_CONST, \
+          { .str = NI_BEST_MODEL_LOAD_STR }, 0, 0, VE, "xcoder" }, \
+    {     "bestload", "Pick the least real load XCoder/encoder available.", 0, AV_OPT_TYPE_CONST, \
+          { .str = NI_BEST_REAL_LOAD_STR }, 0, 0, VE, "xcoder" }, \
+    \
+    { "ni_enc_idx", "Select which encoder to use by index. First is 0, second is 1, and so on.", \
+      OFFSETENC(dev_enc_idx), AV_OPT_TYPE_INT, { .i64 = BEST_DEVICE_LOAD }, -1, INT_MAX, VE }, \
+    \
+    { "ni_enc_name", "Select which encoder to use by NVMe block device name, e.g. /dev/nvme0n1.", \
+      OFFSETENC(dev_blk_name), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE }, \
+    \
+    { "encname", "Select which encoder to use by NVMe block device name, e.g. /dev/nvme0n1.", \
+      OFFSETENC(dev_blk_name), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE }, \
+    \
+    { "iosize", "Specify a custom NVMe IO transfer size (multiples of 4096 only).", \
+      OFFSETENC(nvme_io_size), AV_OPT_TYPE_INT, { .i64 = BEST_DEVICE_LOAD }, -1, INT_MAX, VE }, \
+    \
+    { "xcoder-params", "Set the XCoder configuration using a :-separated list of key=value parameters.", \
+      OFFSETENC(xcoder_opts), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE }, \
+    \
+    { "xcoder-gop", "Set the XCoder custom gop using a :-separated list of key=value parameters.", \
+      OFFSETENC(xcoder_gop), AV_OPT_TYPE_STRING, { 0 }, 0, 0, VE }, \
+    \
+    { "keep_alive_timeout", "Specify a custom session keep alive timeout in seconds.", \
+      OFFSETENC(keep_alive_timeout), AV_OPT_TYPE_INT, { .i64 = NI_DEFAULT_KEEP_ALIVE_TIMEOUT }, \
+      NI_MIN_KEEP_ALIVE_TIMEOUT, NI_MAX_KEEP_ALIVE_TIMEOUT, VE }
+
+// "gen_global_headers" encoder options
+#define NI_ENC_OPTION_GEN_GLOBAL_HEADERS\
+    { "gen_global_headers", "Generate SPS and PPS headers during codec initialization.", \
+      OFFSETENC(gen_global_headers), AV_OPT_TYPE_INT, { .i64 = GEN_GLOBAL_HEADERS_AUTO }, \
+      GEN_GLOBAL_HEADERS_AUTO, GEN_GLOBAL_HEADERS_ON, VE, "gen_global_headers" }, \
+    {     "auto", NULL, 0, AV_OPT_TYPE_CONST, \
+          { .i64 = GEN_GLOBAL_HEADERS_AUTO }, 0, 0, VE, "gen_global_headers" }, \
+    {     "off", NULL, 0, AV_OPT_TYPE_CONST, \
+          { .i64 = GEN_GLOBAL_HEADERS_OFF }, 0, 0, VE, "gen_global_headers" }, \
+    {     "on", NULL, 0, AV_OPT_TYPE_CONST, \
+          { .i64 = GEN_GLOBAL_HEADERS_ON }, 0, 0, VE, "gen_global_headers" }
+
+#define NI_ENC_OPTION_UDU_SEI \
+    { "udu_sei", "Pass through user data unregistered SEI if available", OFFSETENC(udu_sei), \
+      AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }
+
+av_cold int ff_xcoder_encode_init(AVCodecContext *avctx);
+
+int ff_xcoder_encode_close(AVCodecContext *avctx);
+
+int ff_xcoder_receive_packet(AVCodecContext *avctx, AVPacket *pkt);
+
+extern const AVCodecHWConfigInternal *ff_ni_enc_hw_configs[];
+
+#endif /* AVCODEC_NIENC_H */
diff --git a/libavcodec/nienc_h264.c b/libavcodec/nienc_h264.c
new file mode 100644
index 0000000000..0b65c43585
--- /dev/null
+++ b/libavcodec/nienc_h264.c
@@ -0,0 +1,61 @@
+/*
+ * NetInt XCoder H.264 Encoder
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "nienc.h"
+
+static const enum AVPixelFormat ni_quadra_enc_h264_pix_fmts[] = {
+    AV_PIX_FMT_YUV420P,
+    AV_PIX_FMT_YUVJ420P,
+    AV_PIX_FMT_YUV420P10LE,
+    AV_PIX_FMT_NV12,
+    AV_PIX_FMT_P010LE,
+    AV_PIX_FMT_NI_QUAD,
+    AV_PIX_FMT_NONE
+};
+
+static const AVOption enc_options[] = {
+    NI_ENC_OPTIONS,
+    NI_ENC_OPTION_GEN_GLOBAL_HEADERS,
+    NI_ENC_OPTION_UDU_SEI,
+    {NULL}
+};
+
+static const AVClass h264_xcoderenc_class = {
+    .class_name = "h264_ni_quadra_enc",
+    .item_name  = av_default_item_name,
+    .option     = enc_options,
+    .version    = LIBAVUTIL_VERSION_INT,
+};
+
+const FFCodec ff_h264_ni_quadra_encoder = {
+    .p.name           = "h264_ni_quadra_enc",
+    CODEC_LONG_NAME("H.264 NETINT Quadra encoder v" NI_XCODER_REVISION),
+    .p.type           = AVMEDIA_TYPE_VIDEO,
+    .p.id             = AV_CODEC_ID_H264,
+    .p.priv_class     = &h264_xcoderenc_class,
+    .p.capabilities   = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE,
+    .p.wrapper_name   = "libxcoder_quadra",
+    CODEC_PIXFMTS_ARRAY(ni_quadra_enc_h264_pix_fmts),
+    FF_CODEC_RECEIVE_PACKET_CB(ff_xcoder_receive_packet),
+    .init             = ff_xcoder_encode_init,
+    .close            = ff_xcoder_encode_close,
+    .priv_data_size   = sizeof(XCoderEncContext),
+    .color_ranges     = AVCOL_RANGE_MPEG | AVCOL_RANGE_JPEG,
+    .hw_configs       = ff_ni_enc_hw_configs,
+};
diff --git a/libavcodec/nienc_hevc.c b/libavcodec/nienc_hevc.c
new file mode 100644
index 0000000000..9b16d163c9
--- /dev/null
+++ b/libavcodec/nienc_hevc.c
@@ -0,0 +1,61 @@
+/*
+ * NetInt XCoder HEVC Encoder
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "nienc.h"
+
+static const enum AVPixelFormat ni_quadra_enc_h265_pix_fmts[] = {
+    AV_PIX_FMT_YUV420P,
+    AV_PIX_FMT_YUVJ420P,
+    AV_PIX_FMT_YUV420P10LE,
+    AV_PIX_FMT_NV12,
+    AV_PIX_FMT_P010LE,
+    AV_PIX_FMT_NI_QUAD,
+    AV_PIX_FMT_NONE
+};
+
+static const AVOption enc_options[] = {
+    NI_ENC_OPTIONS,
+    NI_ENC_OPTION_GEN_GLOBAL_HEADERS,
+    NI_ENC_OPTION_UDU_SEI,
+    {NULL}
+};
+
+static const AVClass h265_xcoderenc_class = {
+    .class_name = "h265_ni_quadra_enc",
+    .item_name  = av_default_item_name,
+    .option     = enc_options,
+    .version    = LIBAVUTIL_VERSION_INT,
+};
+
+const FFCodec ff_h265_ni_quadra_encoder = {
+    .p.name           = "h265_ni_quadra_enc",
+    CODEC_LONG_NAME("H.265 NETINT Quadra encoder v" NI_XCODER_REVISION),
+    .p.type           = AVMEDIA_TYPE_VIDEO,
+    .p.id             = AV_CODEC_ID_H265,
+    .p.priv_class     = &h265_xcoderenc_class,
+    .p.capabilities   = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE,
+    .p.wrapper_name   = "libxcoder_quadra",
+    CODEC_PIXFMTS_ARRAY(ni_quadra_enc_h265_pix_fmts),
+    FF_CODEC_RECEIVE_PACKET_CB(ff_xcoder_receive_packet),
+    .init             = ff_xcoder_encode_init,
+    .close            = ff_xcoder_encode_close,
+    .priv_data_size   = sizeof(XCoderEncContext),
+    .color_ranges     = AVCOL_RANGE_MPEG | AVCOL_RANGE_JPEG,
+    .hw_configs       = ff_ni_enc_hw_configs,
+};
diff --git a/libavutil/Makefile b/libavutil/Makefile
index ee77e51c08..be85dc49ab 100644
--- a/libavutil/Makefile
+++ b/libavutil/Makefile
@@ -50,6 +50,7 @@ HEADERS = adler32.h                                                     \
           hwcontext_amf.h                                               \
           hwcontext_qsv.h                                               \
           hwcontext_mediacodec.h                                        \
+          hwcontext_ni_quad.h                                           \
           hwcontext_opencl.h                                            \
           hwcontext_oh.h                                                \
           hwcontext_vaapi.h                                             \
@@ -211,6 +212,7 @@ OBJS-$(CONFIG_AMF)                      += hwcontext_amf.o
 OBJS-$(CONFIG_LIBDRM)                   += hwcontext_drm.o
 OBJS-$(CONFIG_MACOS_KPERF)              += macos_kperf.o
 OBJS-$(CONFIG_MEDIACODEC)               += hwcontext_mediacodec.o
+OBJS-$(CONFIG_NI_QUADRA)                += hwcontext_ni_quad.o
 OBJS-$(CONFIG_OHCODEC)                  += hwcontext_oh.o
 OBJS-$(CONFIG_OPENCL)                   += hwcontext_opencl.o
 OBJS-$(CONFIG_QSV)                      += hwcontext_qsv.o
@@ -243,6 +245,7 @@ SKIPHEADERS-$(CONFIG_DXVA2)            += hwcontext_dxva2.h
 SKIPHEADERS-$(CONFIG_AMF)              += hwcontext_amf.h               \
                                           hwcontext_amf_internal.h
 SKIPHEADERS-$(CONFIG_QSV)              += hwcontext_qsv.h
+SKIPHEADERS-$(CONFIG_NI_QUADRA)        += hwcontext_ni_quad.h
 SKIPHEADERS-$(CONFIG_OPENCL)           += hwcontext_opencl.h
 SKIPHEADERS-$(CONFIG_VAAPI)            += hwcontext_vaapi.h
 SKIPHEADERS-$(CONFIG_VIDEOTOOLBOX)     += hwcontext_videotoolbox.h
diff --git a/libavutil/hwcontext.c b/libavutil/hwcontext.c
index 83bd7457e8..84de532b02 100644
--- a/libavutil/hwcontext.c
+++ b/libavutil/hwcontext.c
@@ -54,6 +54,9 @@ static const HWContextType * const hw_table[] = {
 #if CONFIG_VAAPI
     &ff_hwcontext_type_vaapi,
 #endif
+#if CONFIG_NI_QUADRA
+    &ff_hwcontext_type_ni_quadra,
+#endif
 #if CONFIG_VDPAU
     &ff_hwcontext_type_vdpau,
 #endif
@@ -83,6 +86,7 @@ static const char *const hw_type_names[] = {
     [AV_HWDEVICE_TYPE_D3D12VA] = "d3d12va",
     [AV_HWDEVICE_TYPE_OPENCL] = "opencl",
     [AV_HWDEVICE_TYPE_QSV]    = "qsv",
+    [AV_HWDEVICE_TYPE_NI_QUADRA] = "ni_quadra",
     [AV_HWDEVICE_TYPE_VAAPI]  = "vaapi",
     [AV_HWDEVICE_TYPE_VDPAU]  = "vdpau",
     [AV_HWDEVICE_TYPE_VIDEOTOOLBOX] = "videotoolbox",
diff --git a/libavutil/hwcontext.h b/libavutil/hwcontext.h
index 29374cf0a7..3107c911ef 100644
--- a/libavutil/hwcontext.h
+++ b/libavutil/hwcontext.h
@@ -30,6 +30,7 @@ enum AVHWDeviceType {
     AV_HWDEVICE_TYPE_CUDA,
     AV_HWDEVICE_TYPE_VAAPI,
     AV_HWDEVICE_TYPE_DXVA2,
+    AV_HWDEVICE_TYPE_NI_QUADRA,
     AV_HWDEVICE_TYPE_QSV,
     AV_HWDEVICE_TYPE_VIDEOTOOLBOX,
     AV_HWDEVICE_TYPE_D3D11VA,
diff --git a/libavutil/hwcontext_internal.h b/libavutil/hwcontext_internal.h
index dcfdc2016a..b2193d3936 100644
--- a/libavutil/hwcontext_internal.h
+++ b/libavutil/hwcontext_internal.h
@@ -159,6 +159,7 @@ extern const HWContextType ff_hwcontext_type_dxva2;
 extern const HWContextType ff_hwcontext_type_opencl;
 extern const HWContextType ff_hwcontext_type_qsv;
 extern const HWContextType ff_hwcontext_type_vaapi;
+extern const HWContextType ff_hwcontext_type_ni_quadra;
 extern const HWContextType ff_hwcontext_type_vdpau;
 extern const HWContextType ff_hwcontext_type_videotoolbox;
 extern const HWContextType ff_hwcontext_type_mediacodec;
diff --git a/libavutil/hwcontext_ni_quad.c b/libavutil/hwcontext_ni_quad.c
new file mode 100644
index 0000000000..903ee0a253
--- /dev/null
+++ b/libavutil/hwcontext_ni_quad.c
@@ -0,0 +1,1257 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include <fcntl.h>
+#include <string.h>
+#if HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
+#include "avassert.h"
+#include "buffer.h"
+#include "common.h"
+#include "hwcontext.h"
+#include "hwcontext_internal.h"
+#include "hwcontext_ni_quad.h"
+#include "libavutil/imgutils.h"
+#include "mem.h"
+#include "pixdesc.h"
+#include "pixfmt.h"
+
+static enum AVPixelFormat supported_pixel_formats[] = {
+    AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUYV422, AV_PIX_FMT_UYVY422,
+    AV_PIX_FMT_NV12,    AV_PIX_FMT_ARGB,    AV_PIX_FMT_RGBA,
+    AV_PIX_FMT_ABGR,    AV_PIX_FMT_BGRA,    AV_PIX_FMT_YUV420P10LE,
+    AV_PIX_FMT_NV16,    AV_PIX_FMT_BGR0,    AV_PIX_FMT_P010LE
+};
+
+static inline void ni_frame_free(void *opaque, uint8_t *data)
+{
+    if (data) {
+        niFrameSurface1_t* p_data3 = (niFrameSurface1_t*)data;
+        if (p_data3->ui16FrameIdx != 0) {
+            ni_hwframe_buffer_recycle(p_data3, p_data3->device_handle);
+        }
+        ni_aligned_free(p_data3);
+    }
+}
+
+static int ni_device_create(AVHWDeviceContext *ctx, const char *device,
+                            AVDictionary *opts, int flags)
+{
+    AVNIDeviceContext *ni_hw_ctx;
+    char *blk_name;
+    int i, module_id = 0, ret = 0;
+    ni_device_handle_t fd;
+    uint32_t max_io_size = NI_INVALID_IO_SIZE;
+    ni_device_t *p_ni_devices = NULL;
+
+    p_ni_devices = av_calloc(1, sizeof(ni_device_t));
+    if(p_ni_devices == NULL) {
+        av_log(ctx, AV_LOG_ERROR, "could not allocate memory for p_ni_devices in %s", __func__);
+        return AVERROR_UNKNOWN;
+    }
+
+    ni_hw_ctx = (AVNIDeviceContext *)ctx->hwctx;
+    ni_hw_ctx->uploader_handle = NI_INVALID_DEVICE_HANDLE;
+    ni_hw_ctx->uploader_ID = -2; // -1 is load balance by pixel rate,
+                                 // default -2 invalid
+
+    if (device) {
+        /* parse device string and fail if incorrect */
+        av_log(ctx, AV_LOG_VERBOSE, "%s %s\n", __func__, device);
+        ni_hw_ctx->uploader_ID = atoi(device);
+        av_log(ctx, AV_LOG_DEBUG, "%s: given uploader ID %d\n", __func__,
+               ni_hw_ctx->uploader_ID);
+        if (ni_hw_ctx->uploader_ID < -1) {
+            av_log(ctx, AV_LOG_ERROR, "%s: uploader ID %d must be >= -1.\n",
+                   __func__, ni_hw_ctx->uploader_ID);
+            ret =  AVERROR_UNKNOWN;
+            LRETURN;
+        }
+    }
+
+    for (i = 0; i < NI_MAX_DEVICE_CNT; i++) {
+        ni_hw_ctx->cards[i] = NI_INVALID_DEVICE_HANDLE;
+    }
+
+    /* Scan all cards on the host, only look at NETINT cards */
+    if (ni_rsrc_list_all_devices(p_ni_devices) == NI_RETCODE_SUCCESS) {
+        // Note: this only checks for Netint encoders
+        for (i = 0; i < p_ni_devices->xcoder_cnt[NI_DEVICE_TYPE_ENCODER]; i++) {
+            blk_name =
+                &(p_ni_devices->xcoders[NI_DEVICE_TYPE_ENCODER][i].blk_name[0]);
+            // cone-to-one correspondence between card index and module_id
+            module_id = p_ni_devices->xcoders[NI_DEVICE_TYPE_ENCODER][i].module_id;
+            av_log(ctx, AV_LOG_DEBUG, "%s blk name %s\n", __func__, blk_name);
+            fd = ni_device_open(blk_name, &max_io_size);
+            if (fd != NI_INVALID_DEVICE_HANDLE) {
+                ni_hw_ctx->cards[module_id] = fd;
+            }
+        }
+    } else {
+        ret = AVERROR_UNKNOWN;
+    }
+END:
+    av_freep(&p_ni_devices);
+    return ret;
+}
+
+static void ni_device_uninit(AVHWDeviceContext *ctx)
+{
+    AVNIDeviceContext *ni_hw_ctx;
+    int i;
+
+    ni_hw_ctx = (AVNIDeviceContext *)ctx->hwctx;
+
+    av_log(ctx, AV_LOG_VERBOSE, "%s\n", __func__);
+
+    if (ni_hw_ctx->uploader_handle != NI_INVALID_DEVICE_HANDLE) {
+        ni_device_close(ni_hw_ctx->uploader_handle);
+        ni_hw_ctx->uploader_handle = NI_INVALID_DEVICE_HANDLE;
+    }
+
+    for (i = 0; i < NI_MAX_DEVICE_CNT; i++) {
+        ni_device_handle_t fd = ni_hw_ctx->cards[i];
+        if (fd != NI_INVALID_DEVICE_HANDLE) {
+            ni_hw_ctx->cards[i] = NI_INVALID_DEVICE_HANDLE;
+            ni_device_close(fd);
+        } else {
+            break;
+        }
+    }
+
+    return;
+}
+
+static int ni_frames_get_constraints(AVHWDeviceContext *ctx,
+                                     const void *hwconfig,
+                                     AVHWFramesConstraints *constraints)
+{
+    int i;
+    int num_pix_fmts_supported;
+
+    num_pix_fmts_supported = FF_ARRAY_ELEMS(supported_pixel_formats);
+
+    constraints->valid_sw_formats = av_malloc_array(num_pix_fmts_supported + 1,
+                                                    sizeof(*constraints->valid_sw_formats));
+    if (!constraints->valid_sw_formats) {
+        return AVERROR(ENOMEM);
+    }
+
+    for (i = 0; i < num_pix_fmts_supported; i++) {
+        constraints->valid_sw_formats[i] = supported_pixel_formats[i];
+    }
+    constraints->valid_sw_formats[num_pix_fmts_supported] = AV_PIX_FMT_NONE;
+
+    constraints->valid_hw_formats = av_malloc_array(2, sizeof(*constraints->valid_hw_formats));
+    if (!constraints->valid_hw_formats) {
+        return AVERROR(ENOMEM);
+    }
+
+    constraints->valid_hw_formats[0] = AV_PIX_FMT_NI_QUAD;
+    constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE;
+
+    return 0;
+}
+
+static int ni_get_buffer(AVHWFramesContext *ctx, AVFrame *frame)
+{
+    int ret = 0;
+    uint8_t *buf;
+    uint32_t buf_size;
+    ni_frame_t *xfme;
+    AVNIFramesContext *f_hwctx = (AVNIFramesContext*) ctx->hwctx;
+    ni_session_data_io_t dst_session_io_data;
+    ni_session_data_io_t * p_dst_session_data = &dst_session_io_data;
+    bool isnv12frame = (ctx->sw_format == AV_PIX_FMT_NV12 ||
+                        ctx->sw_format == AV_PIX_FMT_P010LE);
+
+    av_log(ctx, AV_LOG_TRACE, "hwcontext_ni.c:ni_get_buffer()\n");
+
+    // alloc dest avframe buff
+    memset(p_dst_session_data, 0, sizeof(dst_session_io_data));
+    ret = ni_frame_buffer_alloc(&p_dst_session_data->data.frame, ctx->width,
+                                ctx->height, 0, 1, // codec type does not matter, metadata exists
+                                f_hwctx->api_ctx.bit_depth_factor, 1, !isnv12frame);
+    if (ret != 0) {
+        return AVERROR(ENOMEM);
+    }
+
+    xfme = &p_dst_session_data->data.frame;
+    buf_size = xfme->data_len[0] + xfme->data_len[1] +
+               xfme->data_len[2] + xfme->data_len[3];
+    buf = xfme->p_data[0];
+    memset(buf, 0, buf_size);
+    frame->buf[0] = av_buffer_create(buf, buf_size, ni_frame_free, NULL, 0);
+    if (!frame->buf[0]) {
+        return AVERROR(ENOMEM);
+    }
+    frame->data[3] = xfme->p_buffer + xfme->data_len[0] + xfme->data_len[1] +
+                     xfme->data_len[2];
+    frame->format = AV_PIX_FMT_NI_QUAD;
+    frame->width = ctx->width;
+    frame->height = ctx->height;
+
+    return 0;
+}
+
+static int ni_transfer_get_formats(AVHWFramesContext *ctx,
+                                   enum AVHWFrameTransferDirection dir,
+                                   enum AVPixelFormat **formats)
+{
+    enum AVPixelFormat *fmts;
+
+    fmts = av_malloc_array(2, sizeof(*fmts));
+    if (!fmts) {
+        return AVERROR(ENOMEM);
+    }
+
+    fmts[0] = ctx->sw_format;
+    fmts[1] = AV_PIX_FMT_NONE;
+
+    *formats = fmts;
+
+    return 0;
+}
+
+static void ni_frames_uninit(AVHWFramesContext *ctx)
+{
+    AVNIFramesContext *f_hwctx = (AVNIFramesContext*) ctx->hwctx;
+    int dev_dec_idx = f_hwctx->uploader_device_id; //Supplied by init_hw_device ni=<name>:<id> or ni_hwupload=<id>
+
+    av_log(ctx, AV_LOG_DEBUG, "%s: only close if upload instance, poolsize=%d "
+                              "devid=%d\n",
+                              __func__, ctx->initial_pool_size, dev_dec_idx);
+
+    if (dev_dec_idx != -2 && ctx->initial_pool_size >= 0) {
+        if (f_hwctx->src_session_io_data.data.frame.buffer_size
+            || f_hwctx->src_session_io_data.data.frame.metadata_buffer_size
+            || f_hwctx->src_session_io_data.data.frame.start_buffer_size) {
+            av_log(ctx, AV_LOG_DEBUG, "%s:free upload src frame buffer\n",
+                 __func__);
+            ni_frame_buffer_free(&f_hwctx->src_session_io_data.data.frame);
+        }
+        av_log(ctx, AV_LOG_VERBOSE, "SessionID = %d!\n", f_hwctx->api_ctx.session_id);
+        if (f_hwctx->api_ctx.session_id != NI_INVALID_SESSION_ID) {
+            ni_device_session_close(&f_hwctx->api_ctx, 1, NI_DEVICE_TYPE_UPLOAD);
+        }
+        ni_device_session_context_clear(&f_hwctx->api_ctx);
+
+        //only upload frames init allocates these ones
+        av_freep(&f_hwctx->surface_ptrs);
+        av_freep(&f_hwctx->surfaces_internal);
+    } else {
+        ni_device_session_context_clear(&f_hwctx->api_ctx);
+    }
+
+    if (f_hwctx->suspended_device_handle != NI_INVALID_DEVICE_HANDLE) {
+        av_log(ctx, AV_LOG_DEBUG, "%s: close file handle, =%d\n",
+               __func__, f_hwctx->suspended_device_handle);
+        ni_device_close(f_hwctx->suspended_device_handle);
+        f_hwctx->suspended_device_handle = NI_INVALID_DEVICE_HANDLE;
+    }
+}
+
+static AVBufferRef *ni_pool_alloc(void *opaque, size_t size)
+{
+    AVHWFramesContext *ctx = (AVHWFramesContext*)opaque;
+    AVNIFramesContext *f_hwctx = (AVNIFramesContext*) ctx->hwctx;
+
+    if (f_hwctx->nb_surfaces_used < f_hwctx->nb_surfaces) {
+        f_hwctx->nb_surfaces_used++;
+        return av_buffer_create((uint8_t*) (f_hwctx->surfaces_internal + f_hwctx->nb_surfaces_used - 1),
+                                sizeof(*f_hwctx->surfaces), NULL, NULL, 0);
+    }
+
+    return NULL;
+}
+
+static int ni_init_surface(AVHWFramesContext *ctx, niFrameSurface1_t *surf)
+{
+    /* Fill with dummy values. This data is never used. */
+    surf->ui16FrameIdx    = 0;
+    surf->ui16session_ID  = 0;
+    surf->ui32nodeAddress = 0;
+    surf->device_handle   = 0;
+    surf->bit_depth       = 0;
+    surf->encoding_type   = 0;
+    surf->output_idx      = 0;
+    surf->src_cpu         = 0;
+
+    return 0;
+}
+
+static int ni_init_pool(AVHWFramesContext *ctx)
+{
+    AVNIFramesContext *f_hwctx = (AVNIFramesContext*) ctx->hwctx;
+    int i, ret;
+
+    av_log(ctx, AV_LOG_VERBOSE, "ctx->initial_pool_size = %d\n", ctx->initial_pool_size);
+
+    if (ctx->initial_pool_size <= 0) {
+        av_log(ctx, AV_LOG_ERROR, "NI requires a fixed frame pool size\n");
+        return AVERROR(EINVAL);
+    }
+
+    f_hwctx->surfaces_internal = av_calloc(ctx->initial_pool_size,
+                                           sizeof(*f_hwctx->surfaces_internal));
+    if (!f_hwctx->surfaces_internal) {
+        return AVERROR(ENOMEM);
+    }
+
+    for (i = 0; i < ctx->initial_pool_size; i++) {
+        ret = ni_init_surface(ctx, &f_hwctx->surfaces_internal[i]);
+        if (ret < 0) {
+            return ret;
+        }
+    }
+
+    ffhwframesctx(ctx)->pool_internal =
+        av_buffer_pool_init2(sizeof(niFrameSurface1_t), ctx, ni_pool_alloc, NULL);
+    if (!ffhwframesctx(ctx)->pool_internal) {
+        return AVERROR(ENOMEM);
+    }
+
+    f_hwctx->surfaces = f_hwctx->surfaces_internal;
+    f_hwctx->nb_surfaces = ctx->initial_pool_size;
+
+    return 0;
+}
+
+static int ni_init_internal_session(AVHWFramesContext *ctx)
+{
+    AVNIFramesContext *f_hwctx = (AVNIFramesContext*) ctx->hwctx;
+    ni_log_set_level(ff_to_ni_log_level(av_log_get_level()));
+    av_log(ctx, AV_LOG_INFO, "hwcontext_ni:ni_init_internal_session()\n");
+    if (ni_device_session_context_init(&(f_hwctx->api_ctx)) < 0) {
+        av_log(ctx, AV_LOG_ERROR, "ni init context failure\n");
+        return -1;
+    }
+
+    return 0;
+}
+
+static void init_split_rsrc(AVNIFramesContext *f_hwctx, int w, int h)
+{
+    int i;
+    ni_split_context_t* p_split_ctx = &f_hwctx->split_ctx;
+    memset(p_split_ctx, 0, sizeof(ni_split_context_t));
+    for (i = 0; i < 3; i++) {
+        p_split_ctx->w[i] = w;
+        p_split_ctx->h[i] = h;
+        p_split_ctx->f[i] = -1;
+    }
+}
+
+static int ni_frames_init(AVHWFramesContext *ctx) //hwupload runs this on hwupload_config_output
+{
+    AVNIFramesContext *f_hwctx = (AVNIFramesContext*) ctx->hwctx;
+    AVNIDeviceContext *device_hwctx = (AVNIDeviceContext*) ctx->device_ctx->hwctx;
+    int linesize_aligned,height_aligned;
+    int pool_size,ret;
+
+    av_log(ctx, AV_LOG_INFO, "%s: Enter, supplied poolsize = %d, devid=%d\n",
+           __func__, ctx->initial_pool_size, device_hwctx->uploader_ID);
+
+    f_hwctx->suspended_device_handle = NI_INVALID_DEVICE_HANDLE;
+    f_hwctx->uploader_device_id = -2; // -1 is load balance by pixel rate,
+                                      // default -2 invalid
+    pool_size = ctx->initial_pool_size;
+    if (device_hwctx->uploader_ID < -1) {
+        if (pool_size > -1) { // ffmpeg does not specify init_hw_device for decoder
+                              // - so decoder device_hwctx->uploader_ID is always -1
+            av_log(ctx, AV_LOG_INFO, "%s no uploader device selected!\n",
+                   __func__);
+            return AVERROR(EINVAL);
+        }
+    }
+
+    ret = ni_init_internal_session(ctx);
+    if (ret < 0) {
+        return AVERROR(EINVAL);
+    }
+
+    init_split_rsrc(f_hwctx, ctx->width, ctx->height);
+    if (pool_size <= -1) { // None upload init returns here
+        av_log(ctx, AV_LOG_INFO, "%s: poolsize code %d, this code recquires no host pool\n",
+               __func__, pool_size);
+        return ret;
+    } else if (pool_size == 0) {
+        pool_size = ctx->initial_pool_size = 3;
+        av_log(ctx, AV_LOG_INFO, "%s: Pool_size autoset to %d\n", __func__, pool_size);
+    }
+
+    /*Kept in AVNIFramesContext for future reference, the AVNIDeviceContext data member gets overwritten*/
+    f_hwctx->uploader_device_id = device_hwctx->uploader_ID;
+
+    if ((ctx->width & 0x1) || (ctx->height & 0x1)) {
+        av_log(ctx, AV_LOG_ERROR, "Odd resolution %dx%d not permitted\n",
+               ctx->width, ctx->height);
+        return AVERROR(EINVAL);
+    }
+
+    linesize_aligned = NI_VPU_CEIL(ctx->width, 2);
+    ctx->width = linesize_aligned;
+
+    height_aligned = ctx->height;
+    ctx->height = NI_VPU_CEIL(height_aligned, 2);
+
+    f_hwctx->api_ctx.active_video_width = ctx->width;
+    f_hwctx->api_ctx.active_video_height = ctx->height;
+
+    switch (ctx->sw_format) {
+        case AV_PIX_FMT_YUV420P:
+            f_hwctx->api_ctx.bit_depth_factor = 1;
+            f_hwctx->api_ctx.src_bit_depth = 8;
+            f_hwctx->api_ctx.pixel_format = NI_PIX_FMT_YUV420P;
+            break;
+        case AV_PIX_FMT_YUV420P10LE:
+            f_hwctx->api_ctx.bit_depth_factor = 2;
+            f_hwctx->api_ctx.src_bit_depth = 10;
+            f_hwctx->api_ctx.src_endian = NI_FRAME_LITTLE_ENDIAN;
+            f_hwctx->api_ctx.pixel_format = NI_PIX_FMT_YUV420P10LE;
+            break;
+        case AV_PIX_FMT_NV12:
+            f_hwctx->api_ctx.bit_depth_factor = 1;
+            f_hwctx->api_ctx.src_bit_depth = 8;
+            f_hwctx->api_ctx.pixel_format = NI_PIX_FMT_NV12;
+            break;
+        case AV_PIX_FMT_P010LE:
+            f_hwctx->api_ctx.bit_depth_factor = 2;
+            f_hwctx->api_ctx.src_bit_depth = 10;
+            f_hwctx->api_ctx.pixel_format = NI_PIX_FMT_P010LE;
+            f_hwctx->api_ctx.src_endian = NI_FRAME_LITTLE_ENDIAN;
+            break;
+        case AV_PIX_FMT_RGBA:
+            f_hwctx->api_ctx.bit_depth_factor = 4;
+            f_hwctx->api_ctx.src_bit_depth    = 32;
+            f_hwctx->api_ctx.src_endian       = NI_FRAME_LITTLE_ENDIAN;
+            f_hwctx->api_ctx.pixel_format     = NI_PIX_FMT_RGBA;
+            break;
+        case AV_PIX_FMT_BGRA:
+            f_hwctx->api_ctx.bit_depth_factor = 4;
+            f_hwctx->api_ctx.src_bit_depth    = 32;
+            f_hwctx->api_ctx.src_endian       = NI_FRAME_LITTLE_ENDIAN;
+            f_hwctx->api_ctx.pixel_format     = NI_PIX_FMT_BGRA;
+            break;
+        case AV_PIX_FMT_ABGR:
+            f_hwctx->api_ctx.bit_depth_factor = 4;
+            f_hwctx->api_ctx.src_bit_depth    = 32;
+            f_hwctx->api_ctx.src_endian       = NI_FRAME_LITTLE_ENDIAN;
+            f_hwctx->api_ctx.pixel_format     = NI_PIX_FMT_ABGR;
+            break;
+        case AV_PIX_FMT_ARGB:
+            f_hwctx->api_ctx.bit_depth_factor = 4;
+            f_hwctx->api_ctx.src_bit_depth    = 32;
+            f_hwctx->api_ctx.src_endian       = NI_FRAME_LITTLE_ENDIAN;
+            f_hwctx->api_ctx.pixel_format     = NI_PIX_FMT_ARGB;
+            break;
+        case AV_PIX_FMT_BGR0:
+            f_hwctx->api_ctx.bit_depth_factor = 4;
+            f_hwctx->api_ctx.src_bit_depth    = 32;
+            f_hwctx->api_ctx.src_endian       = NI_FRAME_LITTLE_ENDIAN;
+            f_hwctx->api_ctx.pixel_format     = NI_PIX_FMT_BGR0;
+            break;
+        case AV_PIX_FMT_YUYV422:
+            f_hwctx->api_ctx.bit_depth_factor = 1;
+            f_hwctx->api_ctx.src_bit_depth    = 8;
+            f_hwctx->api_ctx.src_endian       = NI_FRAME_LITTLE_ENDIAN;
+            f_hwctx->api_ctx.pixel_format     = NI_PIX_FMT_YUYV422;
+            break;
+        case AV_PIX_FMT_UYVY422:
+            f_hwctx->api_ctx.bit_depth_factor = 1;
+            f_hwctx->api_ctx.src_bit_depth    = 8;
+            f_hwctx->api_ctx.src_endian       = NI_FRAME_LITTLE_ENDIAN;
+            f_hwctx->api_ctx.pixel_format     = NI_PIX_FMT_UYVY422;
+            break;
+        case AV_PIX_FMT_NV16:
+            f_hwctx->api_ctx.bit_depth_factor = 1;
+            f_hwctx->api_ctx.src_bit_depth    = 8;
+            f_hwctx->api_ctx.src_endian       = NI_FRAME_LITTLE_ENDIAN;
+            f_hwctx->api_ctx.pixel_format     = NI_PIX_FMT_NV16;
+            break;
+        default:
+            av_log(ctx, AV_LOG_ERROR, "Pixel format not supported by device.\n");
+            return AVERROR(EINVAL);
+    }
+
+    if (ctx->width > NI_MAX_RESOLUTION_WIDTH ||
+        ctx->height > NI_MAX_RESOLUTION_HEIGHT ||
+        ctx->width * ctx->height > NI_MAX_RESOLUTION_AREA) {
+        av_log(ctx, AV_LOG_ERROR, "Error XCoder resolution %dx%d not supported\n",
+               ctx->width, ctx->height);
+        av_log(ctx, AV_LOG_ERROR, "Max Supported Width: %d Height %d Area %d\n",
+               NI_MAX_RESOLUTION_WIDTH, NI_MAX_RESOLUTION_HEIGHT, NI_MAX_RESOLUTION_AREA);
+        return AVERROR_EXTERNAL;
+    } else if (f_hwctx->uploader_device_id >= -1) {
+        // leave it to ni_device_session_open to handle uploader session open
+        // based on api_ctx.hw_id set to proper value
+    } else {
+        av_log(ctx, AV_LOG_ERROR, "Error XCoder command line options");
+        return AVERROR(EINVAL);
+    }
+
+    av_log(ctx, AV_LOG_VERBOSE,
+           "pixel sw_format=%d width = %d height = %d outformat=%d "
+           "uploader_device_id=%d\n",
+           ctx->sw_format, ctx->width, ctx->height, ctx->format,
+           f_hwctx->uploader_device_id);
+
+    f_hwctx->api_ctx.hw_id = f_hwctx->uploader_device_id;
+    f_hwctx->api_ctx.keep_alive_timeout = f_hwctx->keep_alive_timeout;
+    if (0 == f_hwctx->api_ctx.keep_alive_timeout) {
+        f_hwctx->api_ctx.keep_alive_timeout = NI_DEFAULT_KEEP_ALIVE_TIMEOUT;
+    }
+
+    f_hwctx->api_ctx.framerate.framerate_num = f_hwctx->framerate.num;
+    f_hwctx->api_ctx.framerate.framerate_denom = f_hwctx->framerate.den;
+
+    ret = ni_device_session_open(&f_hwctx->api_ctx, NI_DEVICE_TYPE_UPLOAD);
+    if (ret != NI_RETCODE_SUCCESS) {
+        av_log(ctx, AV_LOG_ERROR, "Error Something wrong in xcoder open\n");
+        ni_frames_uninit(ctx);
+        return AVERROR_EXTERNAL;
+    } else {
+        av_log(ctx, AV_LOG_VERBOSE,
+               "XCoder %s.%d (inst: %d) opened successfully\n",
+               f_hwctx->api_ctx.dev_xcoder_name, f_hwctx->api_ctx.hw_id,
+               f_hwctx->api_ctx.session_id);
+#ifndef _WIN32
+        // replace device_handle with blk_io_handle
+        ni_device_close(f_hwctx->api_ctx.device_handle);
+        f_hwctx->api_ctx.device_handle = f_hwctx->api_ctx.blk_io_handle;
+#endif
+        // save blk_io_handle for track
+        device_hwctx->uploader_handle = f_hwctx->api_ctx.blk_io_handle;
+    }
+    memset(&f_hwctx->src_session_io_data, 0, sizeof(ni_session_data_io_t));
+
+    ret = ni_device_session_init_framepool(&f_hwctx->api_ctx, pool_size, NI_UPLOADER_FLAG_LM);
+    if (ret < 0) {
+        return ret;
+    }
+
+    if (!ctx->pool) {
+        ret = ni_init_pool(ctx);
+        if (ret < 0) {
+            av_log(ctx, AV_LOG_ERROR, "Error creating an internal frame pool\n");
+            return ret;
+        }
+    }
+    return 0;
+}
+
+static int ni_to_avframe_copy(AVHWFramesContext *hwfc, AVFrame *dst,
+                              const ni_frame_t *src)
+{
+    int src_linesize[4], src_height[4];
+    int i, h, nb_planes;
+    uint8_t *src_line, *dst_line;
+
+    nb_planes = av_pix_fmt_count_planes(hwfc->sw_format);
+
+    switch (hwfc->sw_format) {
+    case AV_PIX_FMT_YUV420P:
+        src_linesize[0] = FFALIGN(dst->width, 128);
+        src_linesize[1] = FFALIGN(dst->width / 2, 128);
+        src_linesize[2] = src_linesize[1];
+        src_linesize[3] = 0;
+
+        src_height[0] = dst->height;
+        src_height[1] = FFALIGN(dst->height, 2) / 2;
+        src_height[2] = src_height[1];
+        src_height[3] = 0;
+        break;
+
+    case AV_PIX_FMT_YUV420P10LE:
+        src_linesize[0] = FFALIGN(dst->width * 2, 128);
+        src_linesize[1] = FFALIGN(dst->width, 128);
+        src_linesize[2] = src_linesize[1];
+        src_linesize[3] = 0;
+
+        src_height[0] = dst->height;
+        src_height[1] = FFALIGN(dst->height, 2) / 2;
+        src_height[2] = src_height[1];
+        src_height[3] = 0;
+        break;
+
+    case AV_PIX_FMT_NV12:
+        src_linesize[0] = FFALIGN(dst->width, 128);
+        src_linesize[1] = FFALIGN(dst->width, 128);
+        src_linesize[2] = 0;
+        src_linesize[3] = 0;
+
+        src_height[0] = dst->height;
+        src_height[1] = FFALIGN(dst->height, 2) / 2;
+        src_height[2] = 0;
+        src_height[3] = 0;
+        break;
+
+    case AV_PIX_FMT_NV16:
+        src_linesize[0] = FFALIGN(dst->width, 64);
+        src_linesize[1] = FFALIGN(dst->width, 64);
+        src_linesize[2] = 0;
+        src_linesize[3] = 0;
+
+        src_height[0] = dst->height;
+        src_height[1] = dst->height;
+        src_height[2] = 0;
+        src_height[3] = 0;
+        break;
+
+    case AV_PIX_FMT_YUYV422:
+    case AV_PIX_FMT_UYVY422:
+        src_linesize[0] = FFALIGN(dst->width, 16) * 2;
+        src_linesize[1] = 0;
+        src_linesize[2] = 0;
+        src_linesize[3] = 0;
+
+        src_height[0] = dst->height;
+        src_height[1] = 0;
+        src_height[2] = 0;
+        src_height[3] = 0;
+        break;
+
+    case AV_PIX_FMT_P010LE:
+        src_linesize[0] = FFALIGN(dst->width * 2, 128);
+        src_linesize[1] = FFALIGN(dst->width * 2, 128);
+        src_linesize[2] = 0;
+        src_linesize[3] = 0;
+
+        src_height[0] = dst->height;
+        src_height[1] = FFALIGN(dst->height, 2) / 2;
+        src_height[2] = 0;
+        src_height[3] = 0;
+        break;
+
+    case AV_PIX_FMT_RGBA:
+    case AV_PIX_FMT_BGRA:
+    case AV_PIX_FMT_ABGR:
+    case AV_PIX_FMT_ARGB:
+    case AV_PIX_FMT_BGR0:
+        src_linesize[0] = FFALIGN(dst->width, 16) * 4;
+        src_linesize[1] = 0;
+        src_linesize[2] = 0;
+        src_linesize[3] = 0;
+
+        src_height[0] = dst->height;
+        src_height[1] = 0;
+        src_height[2] = 0;
+        src_height[3] = 0;
+        break;
+
+    default:
+        av_log(hwfc, AV_LOG_ERROR, "Unsupported pixel format %s\n",
+               av_get_pix_fmt_name(hwfc->sw_format));
+        return AVERROR(EINVAL);
+    }
+
+    for (i = 0; i < nb_planes; i++) {
+        dst_line = dst->data[i];
+        src_line = src->p_data[i];
+
+        for (h = 0; h < src_height[i]; h++) {
+            memcpy(dst_line, src_line,
+                   FFMIN(src_linesize[i], dst->linesize[i]));
+            dst_line += dst->linesize[i];
+            src_line += src_linesize[i];
+        }
+    }
+
+    return 0;
+}
+
+static int av_to_niframe_copy(AVHWFramesContext *hwfc, const int dst_stride[4],
+                              ni_frame_t *dst, const AVFrame *src) {
+    int src_height[4], hpad[4], vpad[4];
+    int i, j, h, nb_planes;
+    uint8_t *src_line, *dst_line, YUVsample, *sample, *dest;
+    uint16_t lastidx;
+    bool tenBit;
+
+    nb_planes = av_pix_fmt_count_planes(hwfc->sw_format);
+
+    switch (src->format) {
+    case AV_PIX_FMT_YUV420P:
+        hpad[0] = FFMAX(dst_stride[0] - src->linesize[0], 0);
+        hpad[1] = FFMAX(dst_stride[1] - src->linesize[1], 0);
+        hpad[2] = FFMAX(dst_stride[2] - src->linesize[2], 0);
+        hpad[3] = 0;
+
+        src_height[0] = src->height;
+        src_height[1] = FFALIGN(src->height, 2) / 2;
+        src_height[2] = FFALIGN(src->height, 2) / 2;
+        src_height[3] = 0;
+
+        vpad[0] = FFALIGN(src_height[0], 2) - src_height[0];
+        vpad[1] = FFALIGN(src_height[1], 2) - src_height[1];
+        vpad[2] = FFALIGN(src_height[2], 2) - src_height[2];
+        vpad[3] = 0;
+
+        tenBit = false;
+        break;
+
+    case AV_PIX_FMT_YUV420P10LE:
+        hpad[0] = FFMAX(dst_stride[0] - src->linesize[0], 0);
+        hpad[1] = FFMAX(dst_stride[1] - src->linesize[1], 0);
+        hpad[2] = FFMAX(dst_stride[2] - src->linesize[2], 0);
+        hpad[3] = 0;
+
+        src_height[0] = src->height;
+        src_height[1] = FFALIGN(src->height, 2) / 2;
+        src_height[2] = FFALIGN(src->height, 2) / 2;
+        src_height[3] = 0;
+
+        vpad[0] = FFALIGN(src_height[0], 2) - src_height[0];
+        vpad[1] = FFALIGN(src_height[1], 2) - src_height[1];
+        vpad[2] = FFALIGN(src_height[2], 2) - src_height[2];
+        vpad[3] = 0;
+
+        tenBit = true;
+        break;
+
+    case AV_PIX_FMT_NV12:
+        hpad[0] = FFMAX(dst_stride[0] - src->linesize[0], 0);
+        hpad[1] = FFMAX(dst_stride[1] - src->linesize[1], 0);
+        hpad[2] = 0;
+        hpad[3] = 0;
+
+        src_height[0] = src->height;
+        src_height[1] = FFALIGN(src->height, 2) / 2;
+        src_height[2] = 0;
+        src_height[3] = 0;
+
+        vpad[0] = FFALIGN(src_height[0], 2) - src_height[0];
+        vpad[1] = FFALIGN(src_height[1], 2) - src_height[1];
+        vpad[2] = 0;
+        vpad[3] = 0;
+
+        tenBit = false;
+        break;
+    case AV_PIX_FMT_NV16:
+        hpad[0] = 0;
+        hpad[1] = 0;
+        hpad[2] = 0;
+        hpad[3] = 0;
+
+        src_height[0] = src->height;
+        src_height[1] = src->height;
+        src_height[2] = 0;
+        src_height[3] = 0;
+
+        vpad[0] = 0;
+        vpad[1] = 0;
+        vpad[2] = 0;
+        vpad[3] = 0;
+
+        tenBit = false;
+        break;
+
+    case AV_PIX_FMT_P010LE:
+        hpad[0] = FFMAX(dst_stride[0] - src->linesize[0], 0);
+        hpad[1] = FFMAX(dst_stride[1] - src->linesize[1], 0);
+        hpad[2] = 0;
+        hpad[3] = 0;
+
+        src_height[0] = src->height;
+        src_height[1] = FFALIGN(src->height, 2) / 2;
+        src_height[2] = 0;
+        src_height[3] = 0;
+
+        vpad[0] = FFALIGN(src_height[0], 2) - src_height[0];
+        vpad[1] = FFALIGN(src_height[1], 2) - src_height[1];
+        vpad[2] = 0;
+        vpad[3] = 0;
+
+        tenBit = true;
+        break;
+
+    case AV_PIX_FMT_RGBA:
+    case AV_PIX_FMT_BGRA:
+    case AV_PIX_FMT_ABGR:
+    case AV_PIX_FMT_ARGB:
+    case AV_PIX_FMT_BGR0:
+        hpad[0] = FFMAX(dst_stride[0] - src->linesize[0], 0);
+        hpad[1] = 0;
+        hpad[2] = 0;
+        hpad[3] = 0;
+
+        src_height[0] = src->height;
+        src_height[1] = 0;
+        src_height[2] = 0;
+        src_height[3] = 0;
+
+        vpad[0] = 0;
+        vpad[1] = 0;
+        vpad[2] = 0;
+        vpad[3] = 0;
+
+        tenBit = false;
+        break;
+
+    case AV_PIX_FMT_YUYV422:
+    case AV_PIX_FMT_UYVY422:
+        hpad[0] = FFMAX(dst_stride[0] - src->linesize[0], 0);
+        hpad[1] = 0;
+        hpad[2] = 0;
+        hpad[3] = 0;
+
+        src_height[0] = src->height;
+        src_height[1] = 0;
+        src_height[2] = 0;
+        src_height[3] = 0;
+
+        vpad[0] = 0;
+        vpad[1] = 0;
+        vpad[2] = 0;
+        vpad[3] = 0;
+
+        tenBit = false;
+        break;
+
+    default:
+        av_log(hwfc, AV_LOG_ERROR, "Pixel format %s not supported\n",
+               av_get_pix_fmt_name(src->format));
+        break;
+    }
+
+    for (i = 0; i < nb_planes; i++) {
+        dst_line = dst->p_data[i];
+        src_line = src->data[i];
+
+        for (h = 0; h < src_height[i]; h++) {
+            memcpy(dst_line, src_line, FFMIN(src->linesize[i], dst_stride[i]));
+
+            if (hpad[i]) {
+                lastidx = src->linesize[i];
+
+                if (tenBit) {
+                    sample = &src_line[lastidx - 2];
+                    dest   = &dst_line[lastidx];
+
+                    /* two bytes per sample */
+                    for (j = 0; j < hpad[i] / 2; j++) {
+                        memcpy(dest, sample, 2);
+                        dest += 2;
+                    }
+                } else {
+                    YUVsample = dst_line[lastidx - 1];
+                    memset(&dst_line[lastidx], YUVsample, hpad[i]);
+                }
+            }
+
+            src_line += src->linesize[i];
+            dst_line += dst_stride[i];
+        }
+
+        /* Extend the height by cloning the last line */
+        src_line = dst_line - dst_stride[i];
+        for (h = 0; h < vpad[i]; h++) {
+            memcpy(dst_line, src_line, dst_stride[i]);
+            dst_line += dst_stride[i];
+        }
+    }
+
+    return 0;
+}
+
+static int ni_hwdl_frame(AVHWFramesContext *hwfc, AVFrame *dst,
+                         const AVFrame *src)
+{
+    AVNIFramesContext *f_hwctx = (AVNIFramesContext*) hwfc->hwctx;
+    ni_session_data_io_t session_io_data;
+    ni_session_data_io_t *p_session_data = &session_io_data;
+    niFrameSurface1_t *src_surf = (niFrameSurface1_t *)src->data[3];
+    int ret;
+    int pixel_format;
+
+    memset(&session_io_data, 0, sizeof(ni_session_data_io_t));
+
+    av_log(hwfc, AV_LOG_VERBOSE,
+           "%s handle %d trace ui16FrameIdx = [%d] SID %d\n", __func__,
+           src_surf->device_handle, src_surf->ui16FrameIdx,
+           src_surf->ui16session_ID);
+
+    av_log(hwfc, AV_LOG_DEBUG, "%s hwdl processed h/w = %d/%d\n", __func__,
+           src->height, src->width);
+
+    switch (hwfc->sw_format) {
+    case AV_PIX_FMT_YUV420P:
+        pixel_format = NI_PIX_FMT_YUV420P;
+        break;
+    case AV_PIX_FMT_YUV420P10LE:
+        pixel_format = NI_PIX_FMT_YUV420P10LE;
+        break;
+    case AV_PIX_FMT_NV12:
+        pixel_format = NI_PIX_FMT_NV12;
+        break;
+    case AV_PIX_FMT_NV16:
+        pixel_format = NI_PIX_FMT_NV16;
+        break;
+    case AV_PIX_FMT_YUYV422:
+        pixel_format = NI_PIX_FMT_YUYV422;
+        break;
+    case AV_PIX_FMT_UYVY422:
+        pixel_format = NI_PIX_FMT_UYVY422;
+        break;
+    case AV_PIX_FMT_P010LE:
+        pixel_format = NI_PIX_FMT_P010LE;
+        break;
+    case AV_PIX_FMT_RGBA:
+        pixel_format = NI_PIX_FMT_RGBA;
+        break;
+    case AV_PIX_FMT_BGRA:
+        pixel_format = NI_PIX_FMT_BGRA;
+        break;
+    case AV_PIX_FMT_ABGR:
+        pixel_format = NI_PIX_FMT_ABGR;
+        break;
+    case AV_PIX_FMT_ARGB:
+        pixel_format = NI_PIX_FMT_ARGB;
+        break;
+    case AV_PIX_FMT_BGR0:
+        pixel_format = NI_PIX_FMT_BGR0;
+        break;
+    default:
+        av_log(hwfc, AV_LOG_ERROR, "Pixel format %s not supported\n",
+               av_get_pix_fmt_name(hwfc->sw_format));
+        return AVERROR(EINVAL);
+    }
+
+    ret = ni_frame_buffer_alloc_dl(&(p_session_data->data.frame), src->width,
+                                   src->height, pixel_format);
+    if (ret != NI_RETCODE_SUCCESS) {
+        av_log(hwfc, AV_LOG_ERROR, "%s Cannot allocate ni_frame\n", __func__);
+        return AVERROR(ENOMEM);
+    }
+
+    f_hwctx->api_ctx.is_auto_dl = false;
+    ret = ni_device_session_hwdl(&f_hwctx->api_ctx, p_session_data, src_surf);
+    if (ret <= 0) {
+        av_log(hwfc, AV_LOG_DEBUG, "%s failed to retrieve frame\n", __func__);
+        ni_frame_buffer_free(&p_session_data->data.frame);
+        return AVERROR_EXTERNAL;
+    }
+
+    ret = ni_to_avframe_copy(hwfc, dst, &p_session_data->data.frame);
+    if (ret < 0) {
+        av_log(hwfc, AV_LOG_ERROR, "Can't copy frame %d\n", ret);
+        ni_frame_buffer_free(&p_session_data->data.frame);
+        return ret;
+    }
+
+    dst->format = hwfc->sw_format;
+
+    av_frame_copy_props(dst, src);
+    ni_frame_buffer_free(&p_session_data->data.frame);
+
+    return 0;
+}
+
+static int ni_hwup_frame(AVHWFramesContext *hwfc, AVFrame *dst, const AVFrame *src)
+{
+    AVNIFramesContext *f_hwctx = (AVNIFramesContext*) hwfc->hwctx;
+    ni_session_data_io_t *p_src_session_data;
+    niFrameSurface1_t *dst_surf;
+    int ret = 0;
+    int dst_stride[4];
+    int pixel_format;
+    bool isSemiPlanar;
+    int need_to_copy = 1;
+    size_t crop_right = 0, crop_bottom = 0;
+
+    dst_surf = (niFrameSurface1_t *)dst->data[3];
+
+    if (dst_surf == NULL || dst->hw_frames_ctx == NULL) {
+        av_log(hwfc, AV_LOG_ERROR, "Invalid hw frame\n");
+        return AVERROR(EINVAL);
+    }
+
+    p_src_session_data = &f_hwctx->src_session_io_data;
+
+    switch (src->format) {
+    /* 8-bit YUV420 planar */
+    case AV_PIX_FMT_YUV420P:
+        dst_stride[0] = FFALIGN(src->width, 128);
+        dst_stride[1] = FFALIGN((src->width / 2), 128);
+        dst_stride[2] = dst_stride[1];
+        dst_stride[3] = 0;
+
+        pixel_format = NI_PIX_FMT_YUV420P;
+        isSemiPlanar = false;
+        break;
+
+    /* 10-bit YUV420 planar, little-endian, least significant bits */
+    case AV_PIX_FMT_YUV420P10LE:
+        dst_stride[0] = FFALIGN(src->width * 2, 128);
+        dst_stride[1] = FFALIGN(src->width, 128);
+        dst_stride[2] = dst_stride[1];
+        dst_stride[3] = 0;
+
+        pixel_format = NI_PIX_FMT_YUV420P10LE;
+        isSemiPlanar = false;
+        break;
+
+    /* 8-bit YUV420 semi-planar */
+    case AV_PIX_FMT_NV12:
+        dst_stride[0] = FFALIGN(src->width, 128);
+        dst_stride[1] = dst_stride[0];
+        dst_stride[2] = 0;
+        dst_stride[3] = 0;
+
+        pixel_format = NI_PIX_FMT_NV12;
+        isSemiPlanar = true;
+        break;
+
+    /* 8-bit yuv422 semi-planar */
+    case AV_PIX_FMT_NV16:
+        dst_stride[0] = FFALIGN(src->width, 64);
+        dst_stride[1] = dst_stride[0];
+        dst_stride[2] = 0;
+        dst_stride[3] = 0;
+
+        pixel_format = NI_PIX_FMT_NV16;
+        isSemiPlanar = false;
+        break;
+
+    /*8-bit yuv422 planar */
+    case AV_PIX_FMT_YUYV422:
+        dst_stride[0] = FFALIGN(src->width, 16) * 2;
+        dst_stride[1] = 0;
+        dst_stride[2] = 0;
+        dst_stride[3] = 0;
+
+        pixel_format = NI_PIX_FMT_YUYV422;
+        isSemiPlanar = false;
+        break;
+
+    case AV_PIX_FMT_UYVY422:
+        dst_stride[0] = FFALIGN(src->width, 16) * 2;
+        dst_stride[1] = 0;
+        dst_stride[2] = 0;
+        dst_stride[3] = 0;
+
+        pixel_format = NI_PIX_FMT_UYVY422;
+        isSemiPlanar = false;
+        break;
+
+    /* 10-bit YUV420 semi-planar, little endian, most significant bits */
+    case AV_PIX_FMT_P010LE:
+        dst_stride[0] = FFALIGN(src->width * 2, 128);
+        dst_stride[1] = dst_stride[0];
+        dst_stride[2] = 0;
+        dst_stride[3] = 0;
+
+        pixel_format = NI_PIX_FMT_P010LE;
+        isSemiPlanar = true;
+        break;
+
+    /* 32-bit RGBA packed */
+    case AV_PIX_FMT_RGBA:
+        /* RGBA for the scaler has a 16-byte width/64-byte stride alignment */
+        dst_stride[0] = FFALIGN(src->width, 16) * 4;
+        dst_stride[1] = 0;
+        dst_stride[2] = 0;
+        dst_stride[3] = 0;
+
+        pixel_format = NI_PIX_FMT_RGBA;
+        isSemiPlanar = false;
+        break;
+
+    case AV_PIX_FMT_BGRA:
+        dst_stride[0] = FFALIGN(src->width, 16) * 4;
+        dst_stride[1] = 0;
+        dst_stride[2] = 0;
+        dst_stride[3] = 0;
+
+        pixel_format = NI_PIX_FMT_BGRA;
+        isSemiPlanar = false;
+        break;
+
+    case AV_PIX_FMT_ABGR:
+        dst_stride[0] = FFALIGN(src->width, 16) * 4;
+        dst_stride[1] = 0;
+        dst_stride[2] = 0;
+        dst_stride[3] = 0;
+
+        pixel_format = NI_PIX_FMT_ABGR;
+        isSemiPlanar = false;
+        break;
+
+    case AV_PIX_FMT_ARGB:
+        dst_stride[0] = FFALIGN(src->width, 16) * 4;
+        dst_stride[1] = 0;
+        dst_stride[2] = 0;
+        dst_stride[3] = 0;
+
+        pixel_format = NI_PIX_FMT_ARGB;
+        isSemiPlanar = false;
+        break;
+
+    case AV_PIX_FMT_BGR0:
+        dst_stride[0] = FFALIGN(src->width, 16) * 4;
+        dst_stride[1] = 0;
+        dst_stride[2] = 0;
+        dst_stride[3] = 0;
+
+        pixel_format = NI_PIX_FMT_BGR0;
+        isSemiPlanar = false;
+        break;
+
+    default:
+        av_log(hwfc, AV_LOG_ERROR, "Pixel format %s not supported by device %s\n",
+               av_get_pix_fmt_name(src->format), ffhwframesctx(hwfc)->hw_type->name);
+        return AVERROR(EINVAL);
+    }
+
+    // check input resolution zero copy compatible or not
+    if (ni_uploader_frame_zerocopy_check(&f_hwctx->api_ctx,
+        src->width, src->height,
+        (const int *)src->linesize, pixel_format) == NI_RETCODE_SUCCESS) {
+        need_to_copy = 0;
+        p_src_session_data->data.frame.extra_data_len =
+            NI_APP_ENC_FRAME_META_DATA_SIZE;
+        // alloc metadata buffer etc. (if needed)
+        ret = ni_encoder_frame_zerocopy_buffer_alloc(
+            &p_src_session_data->data.frame, src->width,
+            src->height, (const int *)src->linesize, (const uint8_t **)src->data,
+            (int)p_src_session_data->data.frame.extra_data_len);
+        if (ret != NI_RETCODE_SUCCESS) {
+            return AVERROR(ENOMEM);
+        }
+    } else {
+        // allocate only once per upload Session when we have frame info
+        p_src_session_data->data.frame.extra_data_len =
+            NI_APP_ENC_FRAME_META_DATA_SIZE;
+
+        ret = ni_frame_buffer_alloc_pixfmt(&p_src_session_data->data.frame,
+                                           pixel_format, src->width,
+                                           src->height, dst_stride,
+                                           1, // force to av_codec_id_h264 for max compat
+                                           (int)p_src_session_data->data.frame.extra_data_len);
+        if (ret < 0) {
+            av_log(hwfc, AV_LOG_ERROR, "Cannot allocate ni_frame %d\n", ret);
+            return ret;
+        }
+    }
+
+    if (need_to_copy) {
+        ret = av_to_niframe_copy(hwfc, dst_stride, &p_src_session_data->data.frame, src);
+        if (ret < 0) {
+            av_log(hwfc, AV_LOG_ERROR, "%s can't copy frame\n", __func__);
+            return AVERROR(EINVAL);
+        }
+    }
+
+    ret = ni_device_session_hwup(&f_hwctx->api_ctx, p_src_session_data, dst_surf);
+    if (ret < 0) {
+        av_log(hwfc, AV_LOG_ERROR, "%s failed to upload frame %d\n",
+               __func__, ret);
+        return AVERROR_EXTERNAL;
+    }
+
+    dst_surf->ui16width = f_hwctx->split_ctx.w[0] = src->width;
+    dst_surf->ui16height = f_hwctx->split_ctx.h[0] = src->height;
+    dst_surf->ui32nodeAddress = 0; // always 0 offset for upload
+    dst_surf->encoding_type = isSemiPlanar ? NI_PIXEL_PLANAR_FORMAT_SEMIPLANAR
+                                           : NI_PIXEL_PLANAR_FORMAT_PLANAR;
+
+    av_log(hwfc, AV_LOG_VERBOSE, "%s trace ui16FrameIdx = [%u] hdl %d SID%d\n",
+           __func__, dst_surf->ui16FrameIdx, dst_surf->device_handle,
+           dst_surf->ui16session_ID);
+
+    // Update frames context
+    f_hwctx->split_ctx.f[0] = (int)dst_surf->encoding_type;
+
+    /* Set the hw_id/card number in AVNIFramesContext */
+    ((AVNIFramesContext*)((AVHWFramesContext*)dst->hw_frames_ctx->data)->hwctx)->hw_id = f_hwctx->api_ctx.hw_id;
+
+    crop_right  = dst->crop_right;
+    crop_bottom = dst->crop_bottom;
+
+    av_frame_copy_props(dst, src); // should get the metadata right
+    av_log(hwfc, AV_LOG_DEBUG, "%s Upload frame w/h %d/%d crop r/b %lu/%lu\n",
+           __func__, dst->width, dst->height, crop_right, crop_bottom);
+
+    return ret;
+}
+
+static int ni_transfer_data_to(AVHWFramesContext *hwfc, AVFrame *dst,
+                               const AVFrame *src)
+{
+    int err;
+    niFrameSurface1_t *dst_surf;
+
+    if (src->width > hwfc->width || src->height > hwfc->height) {
+        return AVERROR(EINVAL);
+    }
+
+    /* should check against MAX frame size */
+    err = ni_hwup_frame(hwfc, dst, src);
+    if (err) {
+        return err;
+    }
+
+    dst_surf = (niFrameSurface1_t *)(dst->data[3]);
+
+    av_log(hwfc, AV_LOG_VERBOSE,
+           "hwcontext.c:ni_hwup_frame() dst_surf FID %d %d\n",
+           dst_surf->ui16FrameIdx, dst_surf->ui16session_ID);
+
+    return 0;
+}
+
+static int ni_transfer_data_from(AVHWFramesContext *hwfc, AVFrame *dst,
+                                 const AVFrame *src)
+{
+    if (dst->width > hwfc->width || dst->height > hwfc->height) {
+        av_log(hwfc, AV_LOG_ERROR, "Invalid frame dimensions\n");
+        return AVERROR(EINVAL);
+    }
+
+    return ni_hwdl_frame(hwfc, dst, src);
+}
+
+const HWContextType ff_hwcontext_type_ni_quadra = {
+    // QUADRA
+    .type = AV_HWDEVICE_TYPE_NI_QUADRA,
+    .name = "NI_QUADRA",
+
+    .device_hwctx_size = sizeof(AVNIDeviceContext),
+    .frames_hwctx_size = sizeof(AVNIFramesContext),
+
+    .device_create = ni_device_create,
+    .device_uninit = ni_device_uninit,
+
+    .frames_get_constraints = ni_frames_get_constraints,
+
+    .frames_init   = ni_frames_init,
+    .frames_uninit = ni_frames_uninit,
+
+    .frames_get_buffer = ni_get_buffer,
+
+    .transfer_get_formats = ni_transfer_get_formats,
+    .transfer_data_to     = ni_transfer_data_to,
+    .transfer_data_from   = ni_transfer_data_from,
+
+    .pix_fmts =
+        (const enum AVPixelFormat[]){AV_PIX_FMT_NI_QUAD, AV_PIX_FMT_NONE},
+};
diff --git a/libavutil/hwcontext_ni_quad.h b/libavutil/hwcontext_ni_quad.h
new file mode 100644
index 0000000000..a8795398d7
--- /dev/null
+++ b/libavutil/hwcontext_ni_quad.h
@@ -0,0 +1,99 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HWCONTEXT_NI_QUAD_H
+#define AVUTIL_HWCONTEXT_NI_QUAD_H
+
+#include "hwcontext.h"
+#include <ni_device_api.h>
+#include <ni_rsrc_api.h>
+#include <ni_util.h>
+
+enum
+{
+    NI_MEMTYPE_VIDEO_MEMORY_NONE,
+    NI_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET,
+    NI_MEMTYPE_VIDEO_MEMORY_HWUPLOAD_TARGET,
+};
+
+typedef enum _ni_filter_poolsize_code {
+    NI_DECODER_ID       = -1,
+    NI_SCALE_ID         = -2,
+    NI_PAD_ID           = -3,
+    NI_CROP_ID          = -4,
+    NI_OVERLAY_ID       = -5,
+    NI_ROI_ID           = -6,
+    NI_BG_ID            = -7,
+    NI_STACK_ID         = -8,
+    NI_ROTATE_ID        = -9,
+    NI_DRAWBOX_ID       = -10,
+    NI_BGR_ID           = -11,
+    NI_DRAWTEXT_ID      = -12,
+    NI_AI_PREPROCESS_ID = -13,
+    NI_DELOGO_ID        = -14,
+    NI_MERGE_ID         = -15,
+    NI_FLIP_ID          = -16,
+    NI_HVSPLUS_ID       = -17,
+} ni_filter_poolsize_code;
+
+/**
+* This struct is allocated as AVHWDeviceContext.hwctx
+*/
+typedef struct AVNIDeviceContext {
+    int uploader_ID;
+    ni_device_handle_t uploader_handle;
+
+    ni_device_handle_t cards[NI_MAX_DEVICE_CNT];
+} AVNIDeviceContext;
+
+/**
+* This struct is allocated as AVHWFramesContext.hwctx
+*/
+typedef struct AVNIFramesContext {
+    niFrameSurface1_t *surfaces;
+    int               nb_surfaces;
+    int               keep_alive_timeout;
+    int               frame_type;
+    AVRational        framerate;                  /* used for modelling hwupload */
+    int               hw_id;
+    ni_session_context_t api_ctx; // for down/uploading frames
+    ni_split_context_t   split_ctx;
+    ni_device_handle_t   suspended_device_handle;
+    int                  uploader_device_id; // same one passed to libxcoder session open
+
+    // Accessed only by hwcontext_ni_quad.c
+    niFrameSurface1_t    *surfaces_internal;
+    int                  nb_surfaces_used;
+    niFrameSurface1_t    **surface_ptrs;
+    ni_session_data_io_t src_session_io_data; // for upload frame to be sent up
+} AVNIFramesContext;
+
+static inline int ni_get_cardno(const AVFrame *frame) {
+    AVNIFramesContext* ni_hwf_ctx;
+    ni_hwf_ctx = (AVNIFramesContext*)((AVHWFramesContext*)frame->hw_frames_ctx->data)->hwctx;
+    return ni_hwf_ctx->hw_id;
+}
+
+// copy hwctx specific data from one AVHWFramesContext to another
+static inline void ni_cpy_hwframe_ctx(AVHWFramesContext *in_frames_ctx,
+                                      AVHWFramesContext *out_frames_ctx)
+{
+    memcpy(out_frames_ctx->hwctx, in_frames_ctx->hwctx, sizeof(AVNIFramesContext));
+}
+
+#endif /* AVUTIL_HWCONTEXT_NI_H */
diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c
index f0be20d749..fda9274d7e 100644
--- a/libavutil/pixdesc.c
+++ b/libavutil/pixdesc.c
@@ -2266,6 +2266,21 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
         .name = "qsv",
         .flags = AV_PIX_FMT_FLAG_HWACCEL,
     },
+    // NETINT: AV_PIX_FMT_NI_QUAD pixel format for Quadra HW frame
+    [AV_PIX_FMT_NI_QUAD] = {
+        .name = "ni_quadra",
+        .flags = AV_PIX_FMT_FLAG_HWACCEL,
+    },
+    // NETINT: AV_PIX_FMT_NI_QUAD_8_TILE_4X4 pixel format for Quadra internally compressed frame
+    [AV_PIX_FMT_NI_QUAD_8_TILE_4X4] = {
+        .name = "ni_quadra_8_tile4x4",
+        .flags = AV_PIX_FMT_FLAG_HWACCEL,
+    },
+    // NETINT: AV_PIX_FMT_NI_QUAD_10_TILE_4X4 pixel format for Quadra internally compressed frame
+    [AV_PIX_FMT_NI_QUAD_10_TILE_4X4] = {
+        .name = "ni_quadra_10_tile4x4",
+        .flags = AV_PIX_FMT_FLAG_HWACCEL,
+    },
     [AV_PIX_FMT_MEDIACODEC] = {
         .name = "mediacodec",
         .flags = AV_PIX_FMT_FLAG_HWACCEL,
diff --git a/libavutil/pixfmt.h b/libavutil/pixfmt.h
index 823ea8edab..a7fbf4ca7d 100644
--- a/libavutil/pixfmt.h
+++ b/libavutil/pixfmt.h
@@ -499,6 +499,14 @@ enum AVPixelFormat {
 
     AV_PIX_FMT_OHCODEC, /// hardware decoding through openharmony
 
+    /**
+     * HW acceleration through NI, data[3] contains a pointer to the
+     * niFrameSurface1_t structure, for Netint Quadra.
+     */
+    AV_PIX_FMT_NI_QUAD,
+    AV_PIX_FMT_NI_QUAD_8_TILE_4X4,  /// 8-bit tiled 4x4 compression format within QUADRA
+    AV_PIX_FMT_NI_QUAD_10_TILE_4X4, /// 10-bit tiled 4x4 compression format within QUADRA
+
     AV_PIX_FMT_NB         ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
 };
 
-- 
2.49.1

_______________________________________________
ffmpeg-devel mailing list -- ffmpeg-devel@ffmpeg.org
To unsubscribe send an email to ffmpeg-devel-leave@ffmpeg.org

^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2025-08-28 21:14 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-08-28 21:14 [FFmpeg-devel] [PATCH] avcodec/quadra: add netint h264/h265 hwaccel encoders (PR #20364) desmondliu via ffmpeg-devel

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git