From: "jianfeng.zheng" <ggjogh@gmail.com>
To: ffmpeg-devel@ffmpeg.org
Cc: "jianfeng.zheng" <jianfeng.zheng@mthreads.com>
Subject: [FFmpeg-devel] [PATCH v1 2/2] vaapi: add vaapi_avs2 support
Date: Fri, 19 Jan 2024 23:53:00 +0800
Message-ID: <20240119155300.445106-1-jianfeng.zheng@mthreads.com> (raw)
see https://github.com/intel/libva/pull/738
[Moore Threads](https://www.mthreads.com) (short for Mthreads) is a
Chinese GPU manufacturer. All our products, like MTTS70/MTTS80/.. ,
support AVS2 8bit/10bit HW decoding at max 8k resolution.
Signed-off-by: jianfeng.zheng <jianfeng.zheng@mthreads.com>
---
configure | 7 +
libavcodec/Makefile | 2 +
libavcodec/allcodecs.c | 1 +
libavcodec/avs2.c | 345 ++++++++++++++-
libavcodec/avs2.h | 460 +++++++++++++++++++-
libavcodec/avs2_parser.c | 5 +-
libavcodec/avs2dec.c | 569 +++++++++++++++++++++++++
libavcodec/avs2dec.h | 48 +++
| 787 +++++++++++++++++++++++++++++++++++
libavcodec/codec_desc.c | 5 +-
libavcodec/defs.h | 4 +
libavcodec/hwaccels.h | 1 +
libavcodec/libdavs2.c | 2 +-
libavcodec/profiles.c | 6 +
libavcodec/profiles.h | 1 +
libavcodec/vaapi_avs2.c | 227 ++++++++++
libavcodec/vaapi_decode.c | 5 +
libavformat/matroska.c | 1 +
libavformat/mpeg.h | 1 +
19 files changed, 2450 insertions(+), 27 deletions(-)
create mode 100644 libavcodec/avs2dec.c
create mode 100644 libavcodec/avs2dec.h
create mode 100644 libavcodec/avs2dec_headers.c
create mode 100644 libavcodec/vaapi_avs2.c
diff --git a/configure b/configure
index 89759eda5d..bde3217241 100755
--- a/configure
+++ b/configure
@@ -2464,6 +2464,7 @@ HAVE_LIST="
zlib_gzip
openvino2
va_profile_avs
+ va_profile_avs2
"
# options emitted with CONFIG_ prefix but not available on the command line
@@ -3204,6 +3205,7 @@ wmv3_nvdec_hwaccel_select="vc1_nvdec_hwaccel"
wmv3_vaapi_hwaccel_select="vc1_vaapi_hwaccel"
wmv3_vdpau_hwaccel_select="vc1_vdpau_hwaccel"
cavs_vaapi_hwaccel_deps="vaapi va_profile_avs VAPictureParameterBufferAVS"
+avs2_vaapi_hwaccel_deps="vaapi va_profile_avs2 VAPictureParameterBufferAVS2"
# hardware-accelerated codecs
mediafoundation_deps="mftransform_h MFCreateAlignedMemoryBuffer"
@@ -7189,6 +7191,11 @@ if enabled vaapi; then
test_code cc va/va.h "VAProfile p1 = VAProfileAVSJizhun, p2 = VAProfileAVSGuangdian;" &&
enable va_profile_avs
enabled va_profile_avs && check_type "va/va.h va/va_dec_avs.h" "VAPictureParameterBufferAVS"
+
+ disable va_profile_avs2 &&
+ test_code cc va/va.h "VAProfile p1 = VAProfileAVS2Main, p2 = VAProfileAVS2Main10;" &&
+ enable va_profile_avs2
+ enabled va_profile_avs2 && check_type "va/va.h va/va_dec_avs2.h" "VAPictureParameterBufferAVS2"
fi
if enabled_all opencl libdrm ; then
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 7d92375fed..ac3925ed57 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -285,6 +285,7 @@ OBJS-$(CONFIG_BRENDER_PIX_DECODER) += brenderpix.o
OBJS-$(CONFIG_C93_DECODER) += c93.o
OBJS-$(CONFIG_CAVS_DECODER) += cavs.o cavsdec.o cavsdsp.o \
cavsdata.o
+OBJS-$(CONFIG_AVS2_DECODER) += avs2.o avs2dec.o avs2dec_headers.o
OBJS-$(CONFIG_CBD2_DECODER) += dpcm.o
OBJS-$(CONFIG_CCAPTION_DECODER) += ccaption_dec.o ass.o
OBJS-$(CONFIG_CDGRAPHICS_DECODER) += cdgraphics.o
@@ -1056,6 +1057,7 @@ OBJS-$(CONFIG_VP9_VDPAU_HWACCEL) += vdpau_vp9.o
OBJS-$(CONFIG_VP9_VIDEOTOOLBOX_HWACCEL) += videotoolbox_vp9.o
OBJS-$(CONFIG_VP8_QSV_HWACCEL) += qsvdec.o
OBJS-$(CONFIG_CAVS_VAAPI_HWACCEL) += vaapi_cavs.o
+OBJS-$(CONFIG_AVS2_VAAPI_HWACCEL) += vaapi_avs2.o
# Objects duplicated from other libraries for shared builds
SHLIBOBJS += log2_tab.o reverse.o
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index 93ce8e3224..5900e71804 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -76,6 +76,7 @@ extern const FFCodec ff_bmv_video_decoder;
extern const FFCodec ff_brender_pix_decoder;
extern const FFCodec ff_c93_decoder;
extern const FFCodec ff_cavs_decoder;
+extern const FFCodec ff_avs2_decoder;
extern const FFCodec ff_cdgraphics_decoder;
extern const FFCodec ff_cdtoons_decoder;
extern const FFCodec ff_cdxl_decoder;
diff --git a/libavcodec/avs2.c b/libavcodec/avs2.c
index ead8687d0a..c235708fad 100644
--- a/libavcodec/avs2.c
+++ b/libavcodec/avs2.c
@@ -1,7 +1,9 @@
/*
+ * Chinese AVS2-Video (GY/T 299.1-2016 or IEEE 1857.4-2018) decoder.
* AVS2 related definitions
*
* Copyright (C) 2022 Zhao Zhili, <zhilizhao@tencent.com>
+ * Copyright (c) 2022 JianfengZheng <jianfeng.zheng@mthreads.com>
*
* This file is part of FFmpeg.
*
@@ -20,23 +22,332 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+/**
+ * @file
+ * Chinese AVS2-Video (GY/T 299.1-2016 or IEEE 1857.4-2018) definitions
+ * @author JianfengZheng <jianfeng.zheng@mthreads.com>
+ */
+
+#include "libavcodec/internal.h"
+#include "avcodec.h"
+#include "get_bits.h"
+#include "bytestream.h"
#include "avs2.h"
+#include "startcode.h"
+
+static AVS2LevelLimit const *ff_avs2_get_level_limits(int level)
+{
+ static const AVS2LevelLimit level_limits[] = {
+ /* level, w, h, fr, slc, sr, br, bbv */
+ { AVS2_LEVEL_FORBIDDEN, 0, 0, 0, 0, 0, 0, 0 },
+
+ { AVS2_LEVEL_2_0_15, 352, 288, 15, 16, 1520640, 1500000, 1507328 },
+ { AVS2_LEVEL_2_0_15, 352, 288, 30, 16, 3041280, 2000000, 2015232 },
+ { AVS2_LEVEL_2_0_15, 352, 288, 60, 16, 6082560, 2500000, 2506752 },
+
+ { AVS2_LEVEL_4_0_30, 720, 576, 30, 32, 12441600, 6000000, 6012928 },
+ { AVS2_LEVEL_4_0_60, 720, 576, 60, 32, 24883200, 10000000, 10010624 },
+
+ { AVS2_LEVEL_6_0_30, 2048, 1152, 30, 64, 66846720, 12000000, 12009472 },
+ { AVS2_LEVEL_6_2_30, 2048, 1152, 30, 64, 66846720, 30000000, 30015488 },
+ { AVS2_LEVEL_6_0_60, 2048, 1152, 60, 64, 133693440, 20000000, 20004864 },
+ { AVS2_LEVEL_6_2_60, 2048, 1152, 60, 64, 133693440, 50000000, 50003968 },
+ { AVS2_LEVEL_6_0_120, 2048, 1152, 120, 64, 267386880, 25000000, 25001984 },
+ { AVS2_LEVEL_6_2_120, 2048, 1152, 120, 64, 267386880, 100000000, 100007936 },
+
+ { AVS2_LEVEL_8_0_30, 4096, 2304, 30, 128, 283115520, 25000000, 25001984 },
+ { AVS2_LEVEL_8_2_30, 4096, 2304, 30, 128, 283115520, 100000000, 100007936 },
+ { AVS2_LEVEL_8_0_60, 4096, 2304, 60, 128, 566231040, 40000000, 40009728 },
+ { AVS2_LEVEL_8_2_60, 4096, 2304, 60, 128, 566231040, 160000000, 160006144 },
+ { AVS2_LEVEL_8_0_120, 4096, 2304, 120, 128, 1132462080, 60000000, 60014592 },
+ { AVS2_LEVEL_8_2_120, 4096, 2304, 120, 128, 1132462080, 240000000, 240009216 },
+
+ { AVS2_LEVEL_10_0_30, 8192, 4608, 30, 256, 1069547520, 60000000, 60014592 },
+ { AVS2_LEVEL_10_2_30, 8192, 4608, 30, 256, 1069547520, 240000000, 240009216 },
+ { AVS2_LEVEL_10_0_60, 8192, 4608, 60, 256, 2139095040, 120000000, 120012800 },
+ { AVS2_LEVEL_10_2_60, 8192, 4608, 60, 256, 2139095040, 480000000, 480002048 },
+ { AVS2_LEVEL_10_0_120, 8192, 4608, 120, 256, 4278190080, 240000000, 240009216 },
+ { AVS2_LEVEL_10_2_120, 8192, 4608, 120, 256, 4278190080, 800000000, 800014336 },
+ };
+ int nb_limits = FF_ARRAY_ELEMS(level_limits);
+ for (int i = 0; i < nb_limits; i++) {
+ if (level == level_limits[i].level) {
+ return &level_limits[i];
+ }
+ }
+ return NULL;
+}
+
+void ff_avs_get_cu_align_size(AVS2SeqHeader *seq, int *w, int *h)
+{
+ int mini_size = AVS2_MINI_SIZE;
+ int align_w = (seq->width + mini_size - 1) / mini_size * mini_size;
+ int align_h = (seq->height + mini_size - 1) / mini_size * mini_size;
+ if (w) *w = align_w;
+ if (h) *h = align_h;
+}
-const AVRational ff_avs2_frame_rate_tab[16] = {
- { 0 , 0 }, // forbid
- { 24000, 1001},
- { 24 , 1 },
- { 25 , 1 },
- { 30000, 1001},
- { 30 , 1 },
- { 50 , 1 },
- { 60000, 1001},
- { 60 , 1 },
- { 100 , 1 },
- { 120 , 1 },
- { 200 , 1 },
- { 240 , 1 },
- { 300 , 1 },
- { 0 , 0 }, // reserved
- { 0 , 0 } // reserved
+int ff_avs2_get_max_dpb_size(AVS2SeqHeader *seq)
+{
+ int ret = 16;
+ int aw, ah;
+ ff_avs_get_cu_align_size(seq, &aw, & ah);
+
+ if (seq->level_id <= AVS2_LEVEL_4_0_60) {
+ return 15;
+ } else if (seq->level_id <= AVS2_LEVEL_6_2_120) {
+ ret = 13369344 / (aw * ah);
+ } else if (seq->level_id <= AVS2_LEVEL_8_2_120) {
+ ret = 56623104 / (aw * ah);
+ } else if (seq->level_id <= AVS2_LEVEL_10_2_120) {
+ ret = 213909504 / (aw * ah);
+ }
+
+ return (ret < 16 ? ret : 16) - 1;
+}
+
+static const AVS2WQMatrix avs2_default_wqm = {
+ .m44 = {
+ 64, 64, 64, 68,
+ 64, 64, 68, 72,
+ 64, 68, 76, 80,
+ 72, 76, 84, 96
+ },
+ .m88 = {
+ 64, 64, 64, 64, 68, 68, 72, 76,
+ 64, 64, 64, 68, 72, 76, 84, 92,
+ 64, 64, 68, 72, 76, 80, 88, 100,
+ 64, 68, 72, 80, 84, 92, 100, 112,
+ 68, 72, 80, 84, 92, 104, 112, 128,
+ 76, 80, 84, 92, 104, 116, 132, 152,
+ 96, 100, 104, 116, 124, 140, 164, 188,
+ 104, 108, 116, 128, 152, 172, 192, 216
+ }
};
+
+void ff_avs2_set_default_wqm(AVS2WQMatrix* wqm) {
+ memcpy(wqm, &avs2_default_wqm, sizeof(AVS2WQMatrix));
+}
+
+void ff_avs2_set_default_seq_header(AVS2SeqHeader *seq)
+{
+ memset(seq, 0, sizeof(AVS2SeqHeader));
+
+ ff_avs2_set_default_wqm(&seq->wqm);
+}
+
+void ff_avs2_set_default_pic_header(AVS2SeqHeader *seq, AVS2PicHeader *pic, int b_intra)
+{
+ memset(pic, 0, sizeof(AVS2PicHeader));
+ pic->b_intra = b_intra;
+
+ pic->b_picture_structure = AVS2_FIELD_INTERLEAVED;
+ pic->b_random_access = 1;
+
+ ff_avs2_set_default_wqm(&pic->wqm);
+}
+
+/* frame rate code 2 rational value */
+AVRational ff_avs2_frame_rate_c2q(int fr_code)
+{
+ switch (fr_code)
+ {
+ case AVS2_FR_23_976 : return av_make_q(24000, 1001);
+ case AVS2_FR_24 : return av_make_q(24, 1);
+ case AVS2_FR_25 : return av_make_q(25, 1);
+ case AVS2_FR_29_970 : return av_make_q(30000, 1001);
+ case AVS2_FR_30 : return av_make_q(30, 1);
+ case AVS2_FR_50 : return av_make_q(50, 1);
+ case AVS2_FR_59_940 : return av_make_q(60000, 1001);
+ case AVS2_FR_60 : return av_make_q(60, 1);
+ case AVS2_FR_100 : return av_make_q(100, 1);
+ case AVS2_FR_120 : return av_make_q(120, 1);
+ case AVS2_FR_200 : return av_make_q(200, 1);
+ case AVS2_FR_240 : return av_make_q(240, 1);
+ case AVS2_FR_300 : return av_make_q(300, 1);
+ default:
+ return av_make_q(0, 1);
+ }
+}
+
+AVRational ff_avs2_get_sar(AVS2SeqHeader* seq)
+{
+ AVRational sar = av_make_q(1, 1);
+ switch (seq->aspect_ratio_code)
+ {
+ case AVS2_DAR_4_3:
+ sar = av_make_q(4 * seq->height, 3 * seq->width);
+ break;
+ case AVS2_DAR_16_9:
+ sar = av_make_q(16 * seq->height, 9 * seq->width);
+ break;
+ case AVS2_DAR_221_100:
+ sar = av_make_q(221 * seq->height, 100 * seq->width);
+ break;
+ default:
+ break;
+ }
+ av_reduce(&sar.den, &sar.num, sar.den, sar.num, 1 << 30);
+ return sar;
+}
+
+int ff_avs2_get_pic_type(AVS2PicHeader *pic)
+{
+ if (pic->b_intra) {
+ return pic->b_scene_pic ? (pic->b_scene_pic_output ? AVS2_PIC_G : AVS2_PIC_GB)
+ : AVS2_PIC_I;
+ } else {
+ switch (pic->pic_coding_type)
+ {
+ case AVS2_PCT_P: return pic->b_scene_pred ? AVS2_PIC_S : AVS2_PIC_P;
+ case AVS2_PCT_B: return AVS2_PIC_B;
+ case AVS2_PCT_F: return AVS2_PIC_F;
+ default: return AVS2_PIC_UNKNOWN;
+ }
+ }
+}
+
+const char* ff_avs2_pic_type_to_str(int type)
+{
+ static const char* type_str[] = {
+ "I", "P", "B", "F", "S", "G", "GB"
+ };
+ if (type >= AVS2_PIC_I && type <= AVS2_PIC_GB) {
+ return type_str[type];
+ }
+ return "unknown";
+}
+
+const char* ff_avs2_get_pic_type_str(AVS2PicHeader *pic)
+{
+ int type = ff_avs2_get_pic_type(pic);
+ return ff_avs2_pic_type_to_str(type);
+}
+
+int ff_avs2_packet_split(AVS2PacketSplit *pkt, const uint8_t *data, int size, void *logctx)
+{
+ GetByteContext _bs, *bs=&_bs;
+ bytestream2_init(bs, data, size);
+
+ memset(pkt, 0, sizeof(*pkt));
+ while (bytestream2_get_bytes_left(bs) >= 4) {
+ AVS2EsUnit *unit = 0;
+ int valid_slice = 0;
+ uint32_t stc = -1;
+ bs->buffer = avpriv_find_start_code(bs->buffer, bs->buffer_end, &stc);
+ if (bs->buffer <= bs->buffer_end && (stc & 0xFFFFFF00) == 0x100) {
+ if (!ff_avs2_valid_start_code(stc)) {
+ av_log(logctx, AV_LOG_ERROR, "Invalid startcode 0x%08x @%d !!!\n",
+ stc, bytestream2_tell(bs));
+ return AVERROR_INVALIDDATA;
+ }
+
+ valid_slice = ff_avs2_valid_slice_stc(stc);
+
+ if (pkt->nb_alloc < pkt->nb_units + 1) {
+ int new_space = pkt->nb_units + 4;
+ void *tmp = av_realloc_array(pkt->units, new_space, sizeof(*pkt->units));
+ if (!tmp)
+ return AVERROR(ENOMEM);
+
+ pkt->units = tmp;
+ memset(pkt->units + pkt->nb_alloc, 0,
+ (new_space - pkt->nb_alloc) * sizeof(*pkt->units));
+ pkt->nb_alloc = new_space;
+ }
+
+ unit = &pkt->units[pkt->nb_units];
+ if (valid_slice)
+ bytestream2_seek(bs, -1, SEEK_CUR);
+
+ unit->start_code = stc;
+ unit->data_start = bytestream2_tell(bs);
+ unit->data_len = bytestream2_get_bytes_left(bs);
+
+ // amend previous data_len
+ if (pkt->nb_units > 0) {
+ unit[-1].data_len -= 4 + unit->data_len - valid_slice;
+ }
+
+ pkt->nb_units += 1;
+ } else {
+ break;
+ }
+ }
+
+ av_log(logctx, AV_LOG_DEBUG, "pkt size=%d, nalu=%d:", size, pkt->nb_units);
+
+ if (pkt->nb_units == 0) {
+ av_log(logctx, AV_LOG_ERROR, "No NALU found in this packet !!!");
+ return AVERROR_INVALIDDATA;
+ } else {
+ int first_stc_pos = pkt->units[0].data_start - 4;
+ if (first_stc_pos > 0) {
+ av_log(logctx, AV_LOG_WARNING, "First NALU @%d dons't start from pos 0!",
+ first_stc_pos);
+ }
+ }
+
+ for (int i = 0; i < pkt->nb_units; i++) {
+ AVS2EsUnit *unit = &pkt->units[i];
+ av_log(logctx, AV_LOG_DEBUG, " [%02X] ..%ld..", unit->start_code & 0xff, unit->data_len);
+ }
+ av_log(logctx, AV_LOG_DEBUG, "\n");
+ return 0;
+}
+
+/**
+ * Free all the allocated memory in the packet.
+ */
+void ff_avs2_packet_uninit(AVS2PacketSplit *pkt) {
+ av_freep(&pkt->units);
+ pkt->nb_units = 0;
+ pkt->nb_alloc = 0;
+}
+
+int ff_avs2_remove_pseudo_code(uint8_t *dst, const uint8_t *src, int size)
+{
+ static const uint8_t BITMASK[] = { 0x00, 0x00, 0xc0, 0x00, 0xf0, 0x00, 0xfc, 0x00 };
+ int src_pos = 0;
+ int dst_pos = 0;
+ int cur_bit = 0;
+ int last_bit = 0;
+
+ uint8_t cur_byte = 0;
+ uint8_t last_byte = 0;
+
+
+ while (src_pos < 2 && src_pos < size){
+ dst[dst_pos++] = src[src_pos++];
+ }
+
+ while (src_pos < size){
+ cur_bit = 8;
+ if (src[src_pos-2] == 0 && src[src_pos-1] == 0 && src[src_pos] == 0x02)
+ cur_bit = 6;
+ cur_byte = src[src_pos++];
+
+ if (cur_bit == 8) {
+ if (last_bit == 0) {
+ dst[dst_pos++] = cur_byte;
+ } else {
+ dst[dst_pos++] = ((last_byte & BITMASK[last_bit]) | ((cur_byte & BITMASK[8 - last_bit]) >> last_bit));
+ last_byte = (cur_byte << (8 - last_bit)) & BITMASK[last_bit];
+ }
+ } else {
+ if (last_bit == 0) {
+ last_byte = cur_byte;
+ last_bit = cur_bit;
+ } else {
+ dst[dst_pos++] = ((last_byte & BITMASK[last_bit]) | ((cur_byte & BITMASK[8 - last_bit]) >> last_bit));
+ last_byte = (cur_byte << (8 - last_bit)) & BITMASK[last_bit - 2];
+ last_bit = last_bit - 2;
+ }
+ }
+ }
+
+ if (last_bit != 0 && last_byte != 0) {
+ dst[dst_pos++] = last_byte;
+ }
+ return dst_pos; // dst size
+}
diff --git a/libavcodec/avs2.h b/libavcodec/avs2.h
index 544cf502d7..f569a5aada 100644
--- a/libavcodec/avs2.h
+++ b/libavcodec/avs2.h
@@ -1,7 +1,9 @@
/*
+ * Chinese AVS2-Video (GY/T 299.1-2016 or IEEE 1857.4-2018) decoder.
* AVS2 related definitions
*
* Copyright (C) 2022 Zhao Zhili, <zhilizhao@tencent.com>
+ * Copyright (c) 2022 JianfengZheng <jianfeng.zheng@mthreads.com>
*
* This file is part of FFmpeg.
*
@@ -20,12 +22,26 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+
+/**
+ * @file
+ * Chinese AVS2-Video (GY/T 299.1-2016 or IEEE 1857.4-2018) definitions
+ * @author JianfengZheng <jianfeng.zheng@mthreads.com>
+ */
+
#ifndef AVCODEC_AVS2_H
#define AVCODEC_AVS2_H
+#include "libavutil/frame.h"
+#include "libavutil/mem_internal.h"
#include "libavutil/rational.h"
+#include "avcodec.h"
-#define AVS2_SLICE_MAX_START_CODE 0x000001AF
+
+#define AVS2_MAX_REF_COUNT 7 /* max reference frame number */
+#define AVS2_MAX_DPB_COUNT 16 /* max DPB count including current frame */
+#define AVS2_MAX_RCS_COUNT 32 /* max number of RCS */
+#define AVS2_MINI_SIZE 8 /* Annex B.2 of GY/T 299.1-2016 */
enum {
AVS2_SEQ_START_CODE = 0xB0,
@@ -40,12 +56,450 @@ enum {
#define AVS2_ISPIC(x) ((x) == AVS2_INTRA_PIC_START_CODE || (x) == AVS2_INTER_PIC_START_CODE)
#define AVS2_ISUNIT(x) ((x) == AVS2_SEQ_START_CODE || AVS2_ISPIC(x))
+enum AVS2StartCode {
+ AVS2_STC_SEQ_HEADER = 0x000001B0, /* sequence header start code */
+ AVS2_STC_SEQ_END = 0x000001B1, /* sequence end start code */
+ AVS2_STC_USER_DATA = 0x000001B2, /* user data start code */
+ AVS2_STC_INTRA_PIC = 0x000001B3, /* intra picture start code */
+ AVS2_STC_EXTENSION = 0x000001B5, /* extension start code */
+ AVS2_STC_INTER_PIC = 0x000001B6, /* inter picture start code */
+ AVS2_STC_VIDEO_EDIT = 0x000001B7, /* video edit start code */
+ AVS2_STC_SLICE_MIN = 0x00000100, /* min slice start code */
+ AVS2_STC_SLICE_MAX = 0x0000018F /* max slice start code */
+};
+
+static inline int ff_avs2_valid_slice_stc(uint32_t stc) {
+ return (stc >= AVS2_STC_SLICE_MIN && stc <= AVS2_STC_SLICE_MAX);
+}
+
+static inline int ff_avs2_valid_start_code(uint32_t stc) {
+ return (stc >= AVS2_STC_SEQ_HEADER && stc <= AVS2_STC_VIDEO_EDIT) ||
+ ff_avs2_valid_slice_stc(stc);
+}
+
+enum AVS2ExtType {
+ AVS2_EXT_SEQ_DISPLAY = 0b0010,
+ AVS2_EXT_TEMPORAL_SCALE = 0b0011,
+ AVS2_EXT_COPYRIGHT = 0b0100,
+ AVS2_EXT_PIC_DISPLAY = 0b0111,
+ AVS2_EXT_MASTERING = 0b1010, /* mastering_display_and_content_metadata_extension */
+ AVS2_EXT_CAMERA_PARAM = 0b1011,
+ AVS2_EXT_ROI_PARAM = 0b1100,
+};
+
enum AVS2Profile {
AVS2_PROFILE_MAIN_PIC = 0x12,
AVS2_PROFILE_MAIN = 0x20,
AVS2_PROFILE_MAIN10 = 0x22,
};
-extern const AVRational ff_avs2_frame_rate_tab[16];
+enum AVS2Level {
+ AVS2_LEVEL_FORBIDDEN = 0x00,
+
+ AVS2_LEVEL_2_0_15 = 0x10, /* 352X288, 1500Kbps */
+ AVS2_LEVEL_2_0_30 = 0x12, /* 352X288, 2000Kbps */
+ AVS2_LEVEL_2_0_60 = 0x14, /* 352X288, 2500Kbps */
+
+ AVS2_LEVEL_4_0_30 = 0x20, /* 720x576, 6Mbps, 30fps */
+ AVS2_LEVEL_4_0_60 = 0x22, /* 720x576, 10Mbps, 60fps */
+
+ AVS2_LEVEL_6_0_30 = 0x40, /* 2048x1152, 12Mbps, 30fps */
+ AVS2_LEVEL_6_2_30 = 0x42, /* 2048x1152, 30Mbps, 30fps */
+ AVS2_LEVEL_6_0_60 = 0x44, /* 2048x1152, 20Mbps, 60fps */
+ AVS2_LEVEL_6_2_60 = 0x46, /* 2048x1152, 50Mbps, 60fps */
+ AVS2_LEVEL_6_0_120 = 0x48, /* 2048x1152, 25Mbps, 120fps */
+ AVS2_LEVEL_6_2_120 = 0x4a, /* 2048x1152, 100Mbps, 120fps */
+
+ AVS2_LEVEL_8_0_30 = 0x50, /* 4096x2036, 25Mbps, 30fps */
+ AVS2_LEVEL_8_2_30 = 0x52, /* 4096x2036, 100Mbps, 30fps */
+ AVS2_LEVEL_8_0_60 = 0x54, /* 4096x2036, 40Mbps, 60fps */
+ AVS2_LEVEL_8_2_60 = 0x56, /* 4096x2036, 160Mbps, 60fps */
+ AVS2_LEVEL_8_0_120 = 0x58, /* 4096x2036, 60Mbps, 120fps */
+ AVS2_LEVEL_8_2_120 = 0x5a, /* 4096x2036, 240Mbps, 120fps */
+
+ AVS2_LEVEL_10_0_30 = 0x60, /* 8192x4608, 60Mbps, 30fps */
+ AVS2_LEVEL_10_2_30 = 0x62, /* 8192x4608, 240Mbps, 30fps */
+ AVS2_LEVEL_10_0_60 = 0x64, /* 8192x4608, 120Mbps, 60fps */
+ AVS2_LEVEL_10_2_60 = 0x66, /* 8192x4608, 480Mbps, 60fps */
+ AVS2_LEVEL_10_0_120 = 0x68, /* 8192x4608, 240Mbps, 120fps */
+ AVS2_LEVEL_10_2_120 = 0x6a, /* 8192x4608, 800Mbps, 120fps */
+};
+
+typedef struct AVS2LevelLimit {
+ enum AVS2Level level;
+
+ int width;
+ int height;
+ int frame_rate;
+ int nb_slice;
+ uint64_t sample_rate;
+ uint64_t bit_rate;
+ uint64_t bbv_size;
+} AVS2LevelLimit;
+
+enum AVS2ChromaFormat {
+ AVS2_CHROMA_YUV_400 = 0, /* not supported */
+ AVS2_CHROMA_YUV_420 = 1,
+ AVS2_CHROMA_YUV_422 = 2, /* not supported */
+};
+
+enum AVS2AspectRatio {
+ AVS2_SAR_1_1 = 1, /* SAR 1:1 */
+ AVS2_DAR_4_3 = 2, /* DAR 4:3 */
+ AVS2_DAR_16_9 = 3, /* DAR 16:9 */
+ AVS2_DAR_221_100 = 4, /* DAR 2.21:1 */
+};
+
+enum AVS2FrameRate {
+ AVS2_FR_23_976 = 1, /* 24000/1001=23.976 */
+ AVS2_FR_24 = 2,
+ AVS2_FR_25 = 3,
+ AVS2_FR_29_970 = 4, /* 30000/1001=29.970 */
+ AVS2_FR_30 = 5,
+ AVS2_FR_50 = 6,
+ AVS2_FR_59_940 = 7, /* 60000/1001=59.940 */
+ AVS2_FR_60 = 8,
+ AVS2_FR_100 = 9,
+ AVS2_FR_120 = 10,
+ AVS2_FR_200 = 11,
+ AVS2_FR_240 = 12,
+ AVS2_FR_300 = 13,
+
+ AVS2_FR_MIN = AVS2_FR_23_976,
+ AVS2_FR_MAX = AVS2_FR_300,
+};
+
+enum AVS2PicCodingType {
+ AVS2_PCT_P = 0b01,
+ AVS2_PCT_B = 0b10,
+ AVS2_PCT_F = 0b11,
+};
+
+enum AVS2PicType {
+ AVS2_PIC_UNKNOWN = -1,
+ AVS2_PIC_I = 0, // AVS2_PCT_I, ScenePicFlag:0
+ AVS2_PIC_G = 5, // AVS2_PCT_I, ScenePicFlag:1, SceneOutFlag:1
+ AVS2_PIC_GB = 6, // AVS2_PCT_I, ScenePicFlag:1, SceneOutFlag:0
+
+ AVS2_PIC_P = 1, // AVS2_PCT_P, ScenePredFlag:0
+ AVS2_PIC_S = 4, // AVS2_PCT_P, ScenePredFlag:1
+
+ AVS2_PIC_B = 2, // AVS2_PCT_B
+ AVS2_PIC_F = 3, // AVS2_PCT_F
+};
+
+enum AVS2FrameStructure {
+ AVS2_FIELD_SEPARATED = 0,
+ AVS2_FIELD_INTERLEAVED = 1,
+};
+
+/* Weight Quant Matrix */
+typedef struct AVS2WQMatrix {
+ uint8_t m44[16];
+ uint8_t m88[64];
+} AVS2WQMatrix;
+
+/* reference configuration set */
+typedef struct AVS2RefCfgSet {
+ int b_ref_by_others; /* referenced by others */
+ int n_ref; /* number of reference picture */
+ int ref_delta_doi[8]; /* delta doi (decode_order_index) of ref pic */
+ int n_rm; /* number of removed picture */
+ int rm_delta_doi[8]; /* delta doi (decode_order_index) of removed pic */
+} AVS2RefCfgSet;
+
+
+typedef struct AVS2SeqDisplayExt {
+ uint32_t extension_id;
+ int video_format;
+
+ uint32_t b_full_range : 4;
+ uint32_t b_color_desc : 4;
+ uint32_t color_primaries : 8;
+ uint32_t color_transfer : 8;
+ uint32_t color_matrix : 8;
+
+ uint32_t display_w : 16;
+ uint32_t display_h : 16;
+
+ int b_td_mode;
+ int td_packing_mode;
+ int b_view_reverse;
+} AVS2SeqDisplayExt;
+
+typedef struct AVS2TemporalScaleExt {
+ uint32_t extension_id;
+ int n_level;
+ struct {
+ AVRational framerate;
+ int64_t bitrate;
+ } level[8];
+} AVS2TemporalScaleExt;
+
+typedef struct AVS2CopyrightExt {
+ uint32_t extension_id;
+ int b_flag : 1;
+ int b_original : 1;
+ int copy_id;
+ uint64_t copy_number;
+} AVS2CopyrightExt;
+
+typedef struct AVS2PicDisplayExt {
+ uint32_t extension_id;
+ int n_offset;
+ int32_t offset[3][2]; /* offset[][0:h, 1:v]*/
+} AVS2PicDisplayExt;
+
+typedef struct AVS2SeqHeader {
+ int profile_id; /* profile ID, enum AVS2Profile */
+ int level_id; /* level ID, enum AVS2Level */
+ int b_progressive; /* progressive sequence (0: interlace, 1: progressive) */
+ int b_field_coding; /* field coded sequence */
+ int width; /* image width */
+ int height; /* image height */
+ int chroma_format; /* chroma format(1: 4:2:0, 2: 4:2:2) */
+ int sample_bit_depth; /* sample precision, 8 / 10 */
+ int output_bit_depth; /* encoded precision, 8 / 10 */
+ int aspect_ratio_code; /* enum AVS2AspectRatio */
+ int frame_rate_code; /* frame rate code, mpeg12 [1...8] */
+ int64_t bitrate; /* bitrate (bps) */
+ int b_low_delay; /* has no b frames */
+ int b_has_temporal_id; /* temporal id exist flag */
+ int bbv_buffer_size;
+ int log2_lcu_size; /* largest coding block size */
+ int b_enable_wq; /* weight quant enable flag */
+ AVS2WQMatrix wqm; /* weighted quantization matrix */
+
+ int b_disable_scene_pic;
+ int b_multi_hypothesis_skip;
+ int b_dual_hypothesis_prediction;
+ int b_weighted_skip;
+ int b_amp; /* enable asymmetric_motion_partitions */
+ int b_nsqt; /* enable nonsquare_quadtree_transform */
+ int b_nsip; /* enable nonsquare_intra_prediction */
+ int b_2nd_transform; /* enable secondary_transform */
+ int b_sao; /* enable sample_adaptive_offset */
+ int b_alf; /* enable adaptive_loop_filter */
+ int b_pmvr;
+ int n_rcs; /* num of reference_configuration_set */
+ AVS2RefCfgSet rcs[AVS2_MAX_RCS_COUNT+1];
+ int output_reorder_delay;
+ int b_cross_slice_loop_filter;
+} AVS2SeqHeader;
+
+typedef struct AVS2AlfParam {
+ uint8_t b_enable[3]; // for YUV separate
+ struct {
+ int n_filter;
+ int region_distance[16];
+ int16_t coeff[16][9];
+ } luma;
+ struct {
+ int16_t coeff[9];
+ } chroma[2];
+} AVS2AlfParam;
+
+typedef struct AVS2PicHeader {
+ int b_intra;
+
+ uint32_t bbv_delay;
+
+ union {
+ struct /* intra_data */ {
+ uint32_t b_time_code : 1;
+ uint32_t time_code_hh : 5; /* time code hours */
+ uint32_t time_code_mm : 6; /* time code minutes */
+ uint32_t time_code_ss : 6; /* time code seconds */
+ uint32_t time_code_ff : 6; /* time code frames */
+
+ uint16_t b_scene_pic;
+ uint16_t b_scene_pic_output;
+ };
+ struct /* inter_data */ {
+ int pic_coding_type;
+ int b_scene_pred;
+ int b_scene_ref;
+ int b_random_access; /* random accesss decodable */
+ };
+ };
+
+ int doi; /* decode_order_index */
+ int temporal_id;
+ int output_delay; /* picture_output_delay */
+ int b_use_rcs;
+ int rcs_index;
+ int bbv_check_times;
+
+ int b_progressive_frame;
+ int b_picture_structure; /* enum AVS2FrameStructure */
+ int b_top_field_first;
+ int b_repeat_first_field;
+ int b_top_field_picture;
+
+ int b_fixed_qp;
+ int pic_qp;
+
+ /* loop filter */
+ int b_disable_lf;
+ int b_lf_param;
+ int lf_alpha_offset;
+ int lf_beta_offset;
+
+ /* quant param */
+ int b_no_chroma_quant_param;
+ int cb_quant_delta;
+ int cr_quant_delta;
+
+ int b_enable_pic_wq;
+ int wq_data_index;
+ int wq_param_index;
+ int wq_model;
+ int wq_param_delta[2][6];
+ AVS2WQMatrix wqm;
+
+ /**
+ * @brief processed alf coeff: 0-15:luma, 16:cb, 17:cr
+ * @see ff_avs2_process_alf_param()
+ */
+ int8_t alf_coeff[18][9];
+ int8_t b_alf_enable[3]; // 0:Y, 1:U, 2:V
+} AVS2PicHeader;
+
+typedef struct AVS2SliceHeader {
+ int lcu_x;
+ int lcu_y;
+ uint32_t b_fixed_qp : 16;
+ uint32_t slice_qp : 16;
+ uint8_t b_sao[3];
+
+ int aec_byte_offset; /* aec data offset in AVS2EsUnit */
+} AVS2SlcHeader;
+
+/* element stream unit */
+typedef struct AVS2EsUnit {
+ uint32_t start_code;
+ size_t data_start; /* position right after start code */
+ size_t data_len;
+} AVS2EsUnit;
+
+
+typedef struct AVS2PacketSplit {
+ AVS2EsUnit *units;
+ int nb_units;
+ int nb_alloc;
+} AVS2PacketSplit;
+
+#define AVS2_DPB_MARK_NULL (0)
+#define AVS2_DPB_MARK_USED (1 << 1)
+#define AVS2_DPB_MARK_DECODED (1 << 2)
+///! the frame maybe referenced by others
+#define AVS2_DPB_MARK_REF (1 << 3)
+///! the frame is not referenced any more
+#define AVS2_DPB_MARK_UNREF (1 << 4)
+///! the frame could be output
+#define AVS2_DPB_MARK_OUTPUTABLE (1 << 5)
+///! the frame has been output
+#define AVS2_DPB_MARK_OUTPUTED (1 << 6)
+
+#define IS_DPB_FRAME_UNUSED(frm) ((frm)->dpb_marks == AVS2_DPB_MARK_NULL)
+#define IS_DPB_FRAME_INUSE(frm) ((frm)->dpb_marks != AVS2_DPB_MARK_NULL)
+#define IS_DPB_FRAME_MARK_AS_REF(frm) ((frm)->dpb_marks & AVS2_DPB_MARK_REF)
+#define IS_DPB_FRAME_OUTPUTABLE(frm) (((frm)->dpb_marks & AVS2_DPB_MARK_OUTPUTABLE) \
+ && !((frm)->dpb_marks & AVS2_DPB_MARK_OUTPUTED))
+#define IS_DPB_FRAME_REMOVABLE(frm) (((frm)->dpb_marks & AVS2_DPB_MARK_OUTPUTED) \
+ && ((frm)->dpb_marks & AVS2_DPB_MARK_UNREF))
+
+typedef struct AVS2Frame AVS2Frame;
+struct AVS2Frame {
+ AVS2PicHeader pic_header;
+
+ int poi;
+ int pic_type; // enum AVS2_PIC_TYPE
+ int n_slice;
+
+ AVFrame *frame;
+ AVBufferRef *hwaccel_priv_buf;
+ void *hwaccel_picture_private;
+
+ uint32_t dpb_marks; // A combination of AVS2_DPB_MARK_XXX
+
+ /**
+ * reference picture list
+ */
+ int b_ref;
+ int n_ref;
+ int16_t ref_doi[AVS2_MAX_REF_COUNT];
+ int16_t ref_poi[AVS2_MAX_REF_COUNT];
+};
+
+typedef struct AVS2Context
+{
+ const AVClass *class;
+ AVCodecContext *avctx;
+ enum AVPixelFormat pix_fmt;
+
+ int width;
+ int height;
+ int b_got_seq;
+ AVS2SeqHeader seq;
+ AVS2PicHeader pic;
+ AVS2SlcHeader slc;
+ AVS2PacketSplit pkt_split;
+
+ /**
+ * @brief Decoding picture buffer. See ff_avs2_dpb_xxx()
+ */
+ AVS2Frame DPB[AVS2_MAX_DPB_COUNT];
+ AVS2Frame *curr_frame;
+
+ AVS2SeqDisplayExt seq_display_ext;
+ AVS2TemporalScaleExt tempo_scale_ext;
+ AVS2CopyrightExt seq_copyright_ext;
+ AVS2PicDisplayExt pic_display_ext;
+ AVS2CopyrightExt pic_copyright_ext;
+} AVS2Context;
+
+static inline int ff_avs2_get_min_cu_width(AVS2SeqHeader *seq) {
+ return (seq->width + AVS2_MINI_SIZE - 1) / AVS2_MINI_SIZE;
+}
+static inline int ff_avs2_get_min_cu_height(AVS2SeqHeader *seq) {
+ return (seq->height + AVS2_MINI_SIZE - 1) / AVS2_MINI_SIZE;
+}
+void ff_avs_get_cu_align_size(AVS2SeqHeader *seq, int *w, int *h);
+int ff_avs2_get_max_dpb_size(AVS2SeqHeader *seq);
+AVS2LevelLimit const *ff_ava2_get_level_desc(int level);
+void ff_avs2_set_default_wqm(AVS2WQMatrix* wqm);
+void ff_avs2_set_default_seq_header(AVS2SeqHeader *seq);
+void ff_avs2_set_default_pic_header(AVS2SeqHeader *seq, AVS2PicHeader *pic, int b_intra);
+AVRational ff_avs2_frame_rate_c2q(int fr_code);
+AVRational ff_avs2_get_sar(AVS2SeqHeader* seq);
+
+static inline int ff_avs2_is_valid_qp(AVS2SeqHeader *seq, int qp) {
+ return qp >= 0 && qp <= (63 + 8 * (seq->sample_bit_depth - 8));
+}
+static inline int ff_avs2_get_pic_poi(AVS2SeqHeader* seq, AVS2PicHeader *pic) {
+ /* Spec. 9.2.2 */
+ return pic->doi + pic->output_delay - seq->output_reorder_delay;
+}
+
+int ff_avs2_get_pic_type(AVS2PicHeader *pic);
+const char* ff_avs2_pic_type_to_str(int /*enum AVS2PicType*/ type);
+const char* ff_avs2_get_pic_type_str(AVS2PicHeader *pic);
+
+/**
+ * Split an input packet into element stream units.
+ */
+int ff_avs2_packet_split(AVS2PacketSplit *pkt, const uint8_t *data, int size, void *logctx);
+/**
+ * Free all the allocated memory in the packet.
+ */
+void ff_avs2_packet_uninit(AVS2PacketSplit *pkt);
+
+int ff_avs2_remove_pseudo_code(uint8_t *dst, const uint8_t *src, int size);
+
+#define AVS2_CHECK_RET(ret) if (ret < 0) { \
+ av_log(h, AV_LOG_ERROR, "AVS2_CHECK_RET(%d) at line:%d, file:%s\n", ret, __LINE__, __FILE__); \
+ return ret; \
+}
-#endif
+#endif /* AVCODEC_AVS2_H */
diff --git a/libavcodec/avs2_parser.c b/libavcodec/avs2_parser.c
index 200134f91d..5214b98983 100644
--- a/libavcodec/avs2_parser.c
+++ b/libavcodec/avs2_parser.c
@@ -112,10 +112,7 @@ static void parse_avs2_seq_header(AVCodecParserContext *s, const uint8_t *buf,
s->height = height;
s->coded_width = FFALIGN(width, 8);
s->coded_height = FFALIGN(height, 8);
- avctx->framerate.num =
- ff_avs2_frame_rate_tab[frame_rate_code].num;
- avctx->framerate.den =
- ff_avs2_frame_rate_tab[frame_rate_code].den;
+ avctx->framerate = ff_avs2_frame_rate_c2q(frame_rate_code);
avctx->has_b_frames = FFMAX(avctx->has_b_frames, !low_delay);
av_log(avctx, AV_LOG_DEBUG,
diff --git a/libavcodec/avs2dec.c b/libavcodec/avs2dec.c
new file mode 100644
index 0000000000..ad5abf5a6a
--- /dev/null
+++ b/libavcodec/avs2dec.c
@@ -0,0 +1,569 @@
+/*
+ * Chinese AVS2-Video (GY/T 299.1-2016 or IEEE 1857.4-2018) decoder.
+ * Copyright (c) 2022 JianfengZheng <jianfeng.zheng@mthreads.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file avs2dec.c
+ * @author JianfengZheng <jianfeng.zheng@mthreads.com>
+ * @brief Chinese AVS2-Video (GY/T 299.1-2016 or IEEE 1857.4-2018) decoder
+ */
+
+#include "config_components.h"
+#include "avcodec.h"
+#include "codec_internal.h"
+#include "internal.h"
+#include "decode.h"
+#include "get_bits.h"
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "mpeg12data.h"
+#include "hwaccel_internal.h"
+#include "hwconfig.h"
+#include "profiles.h"
+#include "avs2dec.h"
+
+static void avs2_fake_output(AVS2Context *h, AVS2Frame *output) {
+ int y, i, l;
+ AVFrame *frame = output->frame;
+ int npl = av_pix_fmt_count_planes(frame->format);
+ l = (output->poi % (frame->height / 16)) * 16;
+ /* Y */
+ for (y = 0; y < frame->height; y++) {
+ int v = (!(y & 0xf) || (y >= l && y < l + 16)) ? 0xe0 : 0x10;
+ memset(frame->data[0] + y * frame->linesize[0], v, frame->linesize[0]);
+ }
+
+ /* Cb and Cr */
+ for (i = 1; i < npl; i++) {
+ for (y = 0; y < frame->height / 2; y++) {
+ memset(frame->data[i] + y * frame->linesize[i], 0x80, frame->linesize[i]);
+ }
+ }
+
+ av_log(h, AV_LOG_WARNING, "DPB debug fake frame outputed, poi=%d <%s>\n",
+ output->poi, ff_avs2_pic_type_to_str(output->pic_type));
+}
+
+static void ff_avs2_dpb_trace(AVS2Context *h, const char* hint)
+{
+ int i, n = 0;
+ av_log(h, AV_LOG_TRACE, "[DPB Trace] %s\n", hint);
+ for (i = 0; i < AVS2_MAX_DPB_COUNT; i++) {
+ AVS2Frame* iter = &h->DPB[i];
+ if (iter && IS_DPB_FRAME_INUSE(iter)) {
+ av_log(h, AV_LOG_TRACE, " #%d: doi=%d, poi=%d, marks=%x\n",
+ i, iter->pic_header.doi, iter->poi, iter->dpb_marks);
+ ++n;
+ }
+ }
+ if (n==0) {
+ av_log(h, AV_LOG_TRACE, " #-: none\n");
+ }
+}
+
+static int ff_avs2_dpb_init(AVS2Context *h)
+{
+ int i;
+ for (i = 0; i < AVS2_MAX_DPB_COUNT; i++) {
+ h->DPB[i].frame = av_frame_alloc();
+ if (!h->DPB[i].frame)
+ return AVERROR(ENOMEM);
+ h->DPB[i].dpb_marks = AVS2_DPB_MARK_NULL;
+ }
+ return 0;
+}
+
+/**
+ * @brief remove frame out of dpb
+ * @param frame one of frame in h->DPB[]
+ */
+static inline void ff_avs2_dpb_remove_frame(AVS2Context *h, AVS2Frame *frame)
+{
+ /* frame->frame can be NULL if context init failed */
+ if (!frame->frame || !frame->frame->buf[0])
+ return;
+
+ av_buffer_unref(&frame->hwaccel_priv_buf);
+ frame->hwaccel_picture_private = NULL;
+
+ av_frame_unref(frame->frame);
+ frame->dpb_marks = AVS2_DPB_MARK_NULL;
+}
+
+static void ff_avs2_dpb_uninit(AVS2Context *h)
+{
+ int i;
+ for (i = 0; i < AVS2_MAX_DPB_COUNT; i++) {
+ ff_avs2_dpb_remove_frame(h, &h->DPB[i]);
+ av_frame_free(&h->DPB[i].frame);
+ }
+}
+
+static void ff_avs2_dpb_output_frame(AVS2Context *h, AVFrame *dst_frame, int *got_frame)
+{
+ int i;
+ int min_poi = INT_MAX;
+ AVS2Frame *out_frame = NULL;
+ for (i = 0; i < AVS2_MAX_DPB_COUNT; i++) {
+ AVS2Frame* iter = &h->DPB[i];
+ if (IS_DPB_FRAME_OUTPUTABLE(iter)) {
+ if (iter->poi < min_poi) {
+ out_frame = iter;
+ min_poi = iter->poi;
+ }
+ }
+ }
+ if (out_frame) {
+ *got_frame = 1;
+ av_frame_ref(dst_frame, out_frame->frame);
+ out_frame->dpb_marks |= AVS2_DPB_MARK_OUTPUTED;
+ av_log(h, AV_LOG_TRACE, "[DPB Trace] output poi=%d\n", min_poi);
+ } else {
+ *got_frame = 0;
+ }
+ return;
+}
+
+AVS2Frame* ff_avs2_dpb_get_frame_by_doi(AVS2Context *h, int doi)
+{
+ int i;
+ AVS2Frame* iter;
+ for (i = 0; i < AVS2_MAX_DPB_COUNT; i++) {
+ iter = &h->DPB[i];
+ if (IS_DPB_FRAME_INUSE(iter) && iter->pic_header.doi == doi) {
+ return iter;
+ }
+ }
+ return NULL;
+}
+
+static inline AVS2Frame* ff_avs2_dpb_get_unused_frame(AVS2Context *h)
+{
+ int i;
+ AVS2Frame* iter;
+ for (i = 0; i < AVS2_MAX_DPB_COUNT; i++) {
+ iter = &h->DPB[i];
+ if (IS_DPB_FRAME_UNUSED(iter))
+ return iter;
+ }
+ return NULL;
+}
+
+static int ff_avs2_dpb_get_current_frame(AVS2Context *h)
+{
+ int i, ret;
+ AVS2PicHeader *pic = &h->pic;
+ AVS2RefCfgSet *rcs = &h->seq.rcs[pic->rcs_index];
+ h->curr_frame = ff_avs2_dpb_get_unused_frame(h);
+ if (!h->curr_frame) {
+ av_log(h, AV_LOG_ERROR, "Can't get unused frame buffer for decoding !!!\n");
+ return AVERROR(ENOBUFS);
+ }
+
+ h->curr_frame->dpb_marks |= AVS2_DPB_MARK_USED;
+ memcpy(&h->curr_frame->pic_header, &h->pic, sizeof(AVS2PicHeader));
+ h->curr_frame->pic_type = ff_avs2_get_pic_type(&h->pic);
+ h->curr_frame->poi = ff_avs2_get_pic_poi(&h->seq, &h->pic);
+ h->curr_frame->n_slice = 0;
+
+ h->curr_frame->b_ref = rcs->b_ref_by_others;
+ h->curr_frame->n_ref = rcs->n_ref;
+ for (i = 0; i < rcs->n_ref; i++) {
+ uint8_t ref_doi = pic->doi - rcs->ref_delta_doi[i];
+ AVS2Frame* ref_frame = ff_avs2_dpb_get_frame_by_doi(h, ref_doi);
+ if (!ref_frame) {
+ av_log(h, AV_LOG_ERROR, "Can't get ref frame with doi=%d in dpb, "
+ "curr_doi=%d !!!\n", ref_doi, pic->doi);
+ return AVERROR_INVALIDDATA;
+ }
+ h->curr_frame->ref_doi[i] = ref_doi;
+ h->curr_frame->ref_poi[i] = ref_frame->poi;
+ }
+
+ ret = ff_get_buffer(h->avctx, h->curr_frame->frame, AV_GET_BUFFER_FLAG_REF);
+ AVS2_CHECK_RET(ret);
+
+ if (h->avctx->hwaccel) {
+ const FFHWAccel *hwaccel = ffhwaccel(h->avctx->hwaccel);
+ av_assert0(!h->curr_frame->hwaccel_picture_private);
+ if (hwaccel->frame_priv_data_size) {
+ h->curr_frame->hwaccel_priv_buf = av_buffer_allocz(hwaccel->frame_priv_data_size);
+ if (!h->curr_frame->hwaccel_priv_buf)
+ return AVERROR(ENOMEM);
+ h->curr_frame->hwaccel_picture_private = h->curr_frame->hwaccel_priv_buf->data;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Chapter 9.2.4 of GY/T 299.1-2016
+ * Update dpb marks for all buffered dpb frames. After update, dpb frames
+ * could be output or removed from dpb.
+ */
+static void ff_avs2_dpb_marks_update(AVS2Context *h)
+{
+ int i;
+ AVS2Frame* iter;
+ AVS2Frame* curr = h->curr_frame;
+ AVS2PicHeader *pic = NULL;
+ AVS2RefCfgSet *rcs = NULL;
+
+ if (!h->curr_frame)
+ return;
+ pic = &curr->pic_header;
+ rcs = &h->seq.rcs[pic->rcs_index];
+
+ /**
+ * mark unref pictures
+ */
+ for (i = 0; i < rcs->n_rm; i++) {
+ uint8_t rm_doi = pic->doi - rcs->rm_delta_doi[i];
+ iter = ff_avs2_dpb_get_frame_by_doi(h, rm_doi);
+ if (!iter) {
+ if (rcs->n_rm == 1 && rcs->rm_delta_doi[i] == 1) {
+ av_log(h, AV_LOG_TRACE, "[DPB Trace] Sliding window DPB update.\n");
+ } else {
+ av_log(h, AV_LOG_WARNING, "Can't get ref frame with doi=%d in dpb, "
+ "curr_doi=%d !!!\n", rm_doi, pic->doi);
+ }
+ continue;
+ }
+
+ iter->dpb_marks |= AVS2_DPB_MARK_UNREF;
+ }
+
+ /**
+ * mark current picture
+ */
+ if (curr->pic_type == AVS2_PIC_GB) {
+ curr->dpb_marks |= AVS2_DPB_MARK_REF;
+ curr->dpb_marks |= AVS2_DPB_MARK_OUTPUTED;
+ } else if (rcs->b_ref_by_others) {
+ curr->dpb_marks |= AVS2_DPB_MARK_REF;
+ } else {
+ curr->dpb_marks |= AVS2_DPB_MARK_UNREF;
+ }
+
+ /**
+ * mark outputable pictures
+ */
+ for (i = 0; i < AVS2_MAX_DPB_COUNT; i++) {
+ iter = &h->DPB[i];
+ if (IS_DPB_FRAME_UNUSED(iter) || iter->pic_type == AVS2_PIC_GB)
+ continue;
+ if ((uint8_t)(iter->pic_header.doi + iter->pic_header.output_delay) <= (uint8_t)pic->doi) {
+ iter->dpb_marks |= AVS2_DPB_MARK_OUTPUTABLE;
+ }
+ }
+}
+
+static void ff_avs2_dpb_mark_eos(AVS2Context *h)
+{
+ int i;
+ AVS2Frame* iter;
+ for (i = 0; i < AVS2_MAX_DPB_COUNT; i++) {
+ iter = &h->DPB[i];
+ if (IS_DPB_FRAME_UNUSED(iter))
+ continue;
+ iter->dpb_marks |= AVS2_DPB_MARK_UNREF;
+ iter->dpb_marks |= AVS2_DPB_MARK_OUTPUTABLE;
+ }
+}
+
+static void ff_avs2_dpb_remove_all_removable(AVS2Context *h)
+{
+ int i;
+ AVS2Frame* iter;
+ for (i = 0; i < AVS2_MAX_DPB_COUNT; i++) {
+ iter = &h->DPB[i];
+ if (IS_DPB_FRAME_REMOVABLE(iter)) {
+ ff_avs2_dpb_remove_frame(h, iter);
+ }
+ }
+}
+
+static int ff_avs2_get_pixel_format(AVCodecContext *avctx)
+{
+ AVS2Context *h = avctx->priv_data;
+ AVS2SeqHeader *seq = &h->seq;
+
+ int ret;
+ enum AVPixelFormat pix_fmt = AV_PIX_FMT_YUV420P;
+#define HWACCEL_MAX (CONFIG_AVS2_VAAPI_HWACCEL)
+ enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
+
+ if (seq->output_bit_depth == 10) {
+ pix_fmt = AV_PIX_FMT_YUV420P10;
+ }
+
+ av_log(avctx, AV_LOG_DEBUG, "AVS2 decode get format: %s.\n",
+ av_get_pix_fmt_name(pix_fmt));
+ h->pix_fmt = pix_fmt;
+
+ switch (h->pix_fmt) {
+ case AV_PIX_FMT_YUV420P:
+ case AV_PIX_FMT_YUV420P10:
+#if CONFIG_AVS2_VAAPI_HWACCEL
+ *fmtp++ = AV_PIX_FMT_VAAPI;
+#endif
+ break;
+ }
+
+ *fmtp++ = h->pix_fmt;
+ *fmtp = AV_PIX_FMT_NONE;
+
+ ret = ff_get_format(avctx, pix_fmts);
+ if (ret < 0)
+ return ret;
+
+ avctx->pix_fmt = ret;
+ /**
+ * TODO: Native decoder has not been supported yet. Remove this after implementation.
+ */
+ if (!avctx->hwaccel) {
+ av_log(avctx, AV_LOG_WARNING, "Your platform doesn't suppport hardware"
+ " accelerated AVS2 decoding. If you still want to decode this"
+ " stream, build FFmpeg with 'https://github.com/pkuvcl/davs2'.\n");
+ }
+
+ return 0;
+}
+
+static void ff_avs2_set_context_with_seq_header(AVCodecContext *avctx, AVS2SeqHeader* seq)
+{
+ avctx->coded_width = seq->width;
+ avctx->coded_height = seq->height;
+ avctx->width = seq->width;
+ avctx->height = seq->height;
+ avctx->has_b_frames = !seq->b_low_delay;
+ avctx->profile = seq->profile_id;
+ avctx->level = seq->level_id;
+ avctx->framerate = ff_avs2_frame_rate_c2q(seq->frame_rate_code);
+ ff_set_sar(avctx, ff_avs2_get_sar(seq));
+
+ //TODO: set color properties
+}
+
+static av_cold int ff_avs2_decode_init(AVCodecContext *avctx)
+{
+ int ret;
+ AVS2Context *h = avctx->priv_data;
+ h->avctx = avctx;
+ h->pix_fmt = AV_PIX_FMT_NONE;
+
+ ret = ff_avs2_dpb_init(h);
+ AVS2_CHECK_RET(ret);
+
+ if (!avctx->internal->is_copy) {
+ if (avctx->extradata_size > 0 && avctx->extradata) {
+ ret = ff_avs2_decode_extradata(h, avctx->extradata, avctx->extradata_size, &h->seq);
+ AVS2_CHECK_RET(ret);
+
+ ff_avs2_set_context_with_seq_header(avctx, &h->seq);
+ }
+ }
+
+ return 0;
+}
+
+static av_cold int ff_avs2_decode_end(AVCodecContext *avctx)
+{
+ AVS2Context *h = avctx->priv_data;
+ ff_avs2_packet_uninit(&h->pkt_split);
+ ff_avs2_dpb_uninit(h);
+ return 0;
+}
+
+static int ff_avs2_decode_frame_data(AVS2Context *h, const uint8_t *data, int size)
+{
+ AVCodecContext *const avctx = h->avctx;
+ int ret = 0;
+ int i_unit = 0;
+ int b_got_pic_hdr = 0;
+
+ ret = ff_avs2_packet_split(&h->pkt_split, data, size, h);
+ AVS2_CHECK_RET(ret);
+
+ for (i_unit = 0; i_unit < h->pkt_split.nb_units; i_unit++) {
+ AVS2EsUnit* unit = &h->pkt_split.units[i_unit];
+ const uint8_t *unit_data = data + unit->data_start;
+ const int unit_size = unit->data_len;
+ GetByteContext _bs, *bs=&_bs;
+ bytestream2_init(bs, unit_data, unit_size);
+
+ switch (unit->start_code)
+ {
+ case AVS2_STC_SEQ_HEADER:
+ if (b_got_pic_hdr) {
+ av_log(h, AV_LOG_ERROR, "Sequence header should come before picture header !!!\n");
+ return AVERROR_INVALIDDATA;
+ }
+ ret = ff_avs2_decode_seq_header(h, bs, &h->seq);
+ AVS2_CHECK_RET(ret);
+
+ if (!h->b_got_seq) {
+ ff_avs2_set_context_with_seq_header(avctx, &h->seq);
+ h->b_got_seq = 1;
+ }
+
+ if (h->pix_fmt == AV_PIX_FMT_NONE) {
+ ret = ff_avs2_get_pixel_format(avctx);
+ AVS2_CHECK_RET(ret);
+ }
+ break;
+ case AVS2_STC_EXTENSION:
+ ret = ff_avs2_decode_ext(h, bs, !b_got_pic_hdr);
+ break;
+ case AVS2_STC_USER_DATA:
+ ret = ff_avs2_decode_user_data(h, bs);
+ break;
+ case AVS2_STC_INTRA_PIC:
+ case AVS2_STC_INTER_PIC:
+ if (!h->b_got_seq) {
+ av_log(h, AV_LOG_ERROR, "No sequence header before picture header !!!\n");
+ return AVERROR_INVALIDDATA;
+ }
+ ret = ff_avs2_decode_pic_header(h, unit->start_code, bs, &h->pic);
+ AVS2_CHECK_RET(ret);
+ b_got_pic_hdr = 1;
+
+ ff_avs2_dpb_trace(h, "start of pic");
+ ret = ff_avs2_dpb_get_current_frame(h);
+ AVS2_CHECK_RET(ret);
+
+ if (h->avctx->hwaccel) {
+ ret = FF_HW_CALL(h->avctx, start_frame, unit_data, unit_size);
+ AVS2_CHECK_RET(ret);
+ }
+ break;
+ case AVS2_STC_SEQ_END:
+ case AVS2_STC_VIDEO_EDIT:
+ break;
+ default:
+ /**
+ * Slice Data
+ */
+ if (!b_got_pic_hdr) {
+ av_log(h, AV_LOG_ERROR, "No picture header before slice data !!!\n");
+ return AVERROR_INVALIDDATA;
+ }
+ h->curr_frame->n_slice += 1;
+ ret = ff_avs2_decode_slice_header(h, unit->start_code, bs);
+ AVS2_CHECK_RET(ret);
+ if (h->avctx->hwaccel) {
+ ret = FF_HW_CALL(h->avctx, decode_slice, unit_data, unit_size);
+ AVS2_CHECK_RET(ret);
+ } else {
+ //TODO: Native decoder has not been supported yet. Remove this after implementation.
+ av_log(h, AV_LOG_WARNING, "AVS2 SW decoding is not supported yet !!!"
+ " Decode this stream by using 'https://github.com/pkuvcl/davs2'."
+ " Or else FFmpeg just output meaningless fake frames !!!\n");
+ if (h->curr_frame->n_slice == 1)
+ avs2_fake_output(h, h->curr_frame);
+ }
+ break;
+ }
+
+ AVS2_CHECK_RET(ret);
+ }
+
+ if (h->curr_frame && h->curr_frame->n_slice > 0 && h->avctx->hwaccel) {
+ ret = FF_HW_SIMPLE_CALL(h->avctx, end_frame);
+ AVS2_CHECK_RET(ret);
+ }
+
+ return size;
+}
+
+static int ff_avs2_decode_frame(AVCodecContext *avctx, AVFrame *out_frame,
+ int *got_output, AVPacket *pkt)
+{
+ int ret;
+ size_t new_extradata_size;
+ uint8_t *new_extradata;
+ AVS2Context *h = avctx->priv_data;
+
+ if (!pkt || pkt->size <= 0) {
+ ff_avs2_dpb_mark_eos(h);
+ ff_avs2_dpb_output_frame(h, out_frame, got_output);
+ ff_avs2_dpb_remove_all_removable(h);
+ ff_avs2_dpb_trace(h, "end of stream");
+ return 0;
+ }
+
+ new_extradata = av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA,
+ &new_extradata_size);
+ if (new_extradata && new_extradata_size > 0) {
+ av_log(avctx, AV_LOG_DEBUG, "new_extradata found\n");
+ ret = ff_avs2_decode_extradata(h, new_extradata, new_extradata_size, &h->seq);
+ AVS2_CHECK_RET(ret);
+ }
+
+ ret = ff_avs2_decode_frame_data(h, pkt->data, pkt->size);
+
+ ff_avs2_dpb_marks_update(h);
+ ff_avs2_dpb_output_frame(h, out_frame, got_output);
+ ff_avs2_dpb_remove_all_removable(h);
+ ff_avs2_dpb_trace(h, "end of pic");
+ h->curr_frame = NULL;
+
+ return pkt->size;
+}
+
+static void ff_avs2_decode_flush(AVCodecContext *avctx)
+{
+ int i;
+ AVS2Context *h = avctx->priv_data;
+
+ h->curr_frame = NULL;
+ for (i = 0; i < AVS2_MAX_DPB_COUNT; i++)
+ ff_avs2_dpb_remove_frame(h, &h->DPB[i]);
+
+ ff_avs2_dpb_trace(h, "decode flush");
+}
+
+static const AVClass avs2_class = {
+ .class_name = "AVS2 video Decoder",
+ .item_name = av_default_item_name,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+const FFCodec ff_avs2_decoder = {
+ .p.name = "avs2",
+ CODEC_LONG_NAME("AVS2-Video; Chinese GY/T 299.1-2016 or GB/T 33475.2-2016; IEEE 1857.4-2018"),
+ .p.type = AVMEDIA_TYPE_VIDEO,
+ .p.id = AV_CODEC_ID_AVS2,
+ .priv_data_size = sizeof(AVS2Context),
+ .init = ff_avs2_decode_init,
+ .close = ff_avs2_decode_end,
+ FF_CODEC_DECODE_CB(ff_avs2_decode_frame),
+ .flush = ff_avs2_decode_flush,
+ .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
+ .hw_configs = (const AVCodecHWConfigInternal *const []) {
+#if CONFIG_AVS2_VAAPI_HWACCEL
+ HWACCEL_VAAPI(avs2),
+#endif
+ NULL
+ },
+ .p.priv_class = &avs2_class,
+ .p.profiles = NULL_IF_CONFIG_SMALL(ff_avs2_profiles),
+};
diff --git a/libavcodec/avs2dec.h b/libavcodec/avs2dec.h
new file mode 100644
index 0000000000..1d902e2033
--- /dev/null
+++ b/libavcodec/avs2dec.h
@@ -0,0 +1,48 @@
+/*
+ * Chinese AVS2-Video (GY/T 299.1-2016 or IEEE 1857.4-2018) decoder.
+ * Copyright (c) 2022 JianfengZheng <jianfeng.zheng@mthreads.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Chinese AVS2-Video (GY/T 299.1-2016 or IEEE 1857.4-2018) decoder
+ * @author JianfengZheng <jianfeng.zheng@mthreads.com>
+ */
+
+#ifndef AVCODEC_AVS2DEC_H
+#define AVCODEC_AVS2DEC_H
+
+#include "avcodec.h"
+#include "internal.h"
+#include "bytestream.h"
+#include "avs2.h"
+
+int ff_avs2_decode_ext(AVS2Context *h, GetByteContext* bs, int b_seq_ext);
+int ff_avs2_decode_user_data(AVS2Context *h, GetByteContext* bs);
+int ff_avs2_decode_extradata(AVS2Context *h, const uint8_t *data, int size,
+ AVS2SeqHeader *seq);
+int ff_avs2_decode_seq_header(AVS2Context *h, GetByteContext* bs,
+ AVS2SeqHeader *seq);
+int ff_avs2_decode_pic_header(AVS2Context *h, uint32_t stc,
+ GetByteContext* bs, AVS2PicHeader *pic);
+int ff_avs2_decode_slice_header(AVS2Context *h, uint32_t stc, GetByteContext *bs);
+
+AVS2Frame* ff_avs2_dpb_get_frame_by_doi(AVS2Context *h, int doi);
+
+#endif /* AVCODEC_AVS2DEC_H */
--git a/libavcodec/avs2dec_headers.c b/libavcodec/avs2dec_headers.c
new file mode 100644
index 0000000000..edbdd7a16d
--- /dev/null
+++ b/libavcodec/avs2dec_headers.c
@@ -0,0 +1,787 @@
+/*
+ * Chinese AVS2-Video (GY/T 299.1-2016 or IEEE 1857.4-2018) decoder.
+ * Copyright (c) 2022 JianfengZheng <jianfeng.zheng@mthreads.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file avs2dec_headers.c
+ * @author JianfengZheng <jianfeng.zheng@mthreads.com>
+ * @brief Chinese AVS2-Video (GY/T 299.1-2016) headers decoding
+ */
+
+#include <ctype.h>
+#include "libavutil/avassert.h"
+#include "libavutil/pixdesc.h"
+#include "get_bits.h"
+#include "golomb.h"
+#include "profiles.h"
+#include "mpegvideodec.h"
+#include "avs2dec.h"
+
+
+static const uint8_t avs2_wq_model88[4][64] = {
+ // l a b c d h
+ // 0 1 2 3 4 5
+ {
+ // Mode 0
+ 0,0,0,4,4,4,5,5,
+ 0,0,3,3,3,3,5,5,
+ 0,3,2,2,1,1,5,5,
+ 4,3,2,2,1,5,5,5,
+ 4,3,1,1,5,5,5,5,
+ 4,3,1,5,5,5,5,5,
+ 5,5,5,5,5,5,5,5,
+ 5,5,5,5,5,5,5,5 },
+ {
+ // Mode 1
+ 0,0,0,4,4,4,5,5,
+ 0,0,4,4,4,4,5,5,
+ 0,3,2,2,2,1,5,5,
+ 3,3,2,2,1,5,5,5,
+ 3,3,2,1,5,5,5,5,
+ 3,3,1,5,5,5,5,5,
+ 5,5,5,5,5,5,5,5,
+ 5,5,5,5,5,5,5,5 },
+ {
+ // Mode 2
+ 0,0,0,4,4,3,5,5,
+ 0,0,4,4,3,2,5,5,
+ 0,4,4,3,2,1,5,5,
+ 4,4,3,2,1,5,5,5,
+ 4,3,2,1,5,5,5,5,
+ 3,2,1,5,5,5,5,5,
+ 5,5,5,5,5,5,5,5,
+ 5,5,5,5,5,5,5,5 },
+ {
+ // Mode 3
+ 0,0,0,3,2,1,5,5,
+ 0,0,4,3,2,1,5,5,
+ 0,4,4,3,2,1,5,5,
+ 3,3,3,3,2,5,5,5,
+ 2,2,2,2,5,5,5,5,
+ 1,1,1,5,5,5,5,5,
+ 5,5,5,5,5,5,5,5,
+ 5,5,5,5,5,5,5,5 }
+};
+
+static const uint8_t avs2_wq_model44[4][16] = {
+ // l a b c d h
+ // 0 1 2 3 4 5
+ {
+ // Mode 0
+ 0, 4, 3, 5,
+ 4, 2, 1, 5,
+ 3, 1, 1, 5,
+ 5, 5, 5, 5 },
+ {
+ // Mode 1
+ 0, 4, 4, 5,
+ 3, 2, 2, 5,
+ 3, 2, 1, 5,
+ 5, 5, 5, 5 },
+ {
+ // Mode 2
+ 0, 4, 3, 5,
+ 4, 3, 2, 5,
+ 3, 2, 1, 5,
+ 5, 5, 5, 5 },
+ {
+ // Mode 3
+ 0, 3, 1, 5,
+ 3, 4, 2, 5,
+ 1, 2, 2, 5,
+ 5, 5, 5, 5 }
+};
+
+static const uint8_t avs2_default_wq_param[2][6]=
+{
+ { 67,71,71,80,80,106 },
+ { 64,49,53,58,58,64 }
+};
+
+
+static int ff_avs2_decode_rcs(GetBitContext* gb, AVS2RefCfgSet* rcs, void* logctx) {
+ int j = 0;
+ rcs->b_ref_by_others = get_bits1(gb);
+ rcs->n_ref = get_bits(gb, 3);
+ for (j = 0; j < rcs->n_ref; j++) {
+ rcs->ref_delta_doi[j] = get_bits(gb, 6);
+ }
+ rcs->n_rm = get_bits(gb, 3);
+ for (j = 0; j < rcs->n_rm; j++) {
+ rcs->rm_delta_doi[j] = get_bits(gb, 6);
+ }
+ if(check_marker(logctx, gb, "[end of 'rcs[i]']")==0)
+ return AVERROR_INVALIDDATA;
+ return 0;
+}
+
+static int ff_avs2_decode_wqm(GetBitContext* gb, AVS2WQMatrix *wqm) {
+ int i;
+ for (i = 0; i < 16; i++) {
+ wqm->m44[i] = get_ue_golomb(gb);
+ }
+ for (i = 0; i < 64; i++) {
+ wqm->m88[i] = get_ue_golomb(gb);
+ }
+ return 0;
+}
+
+static void ff_avs2_amend_alf_coeff(int8_t *dst, const int16_t *src)
+{
+ int i, sum = src[8] + 64;
+ for (i = 0; i < 8; i++) {
+ dst[i] = av_clip(src[i], -64, 63);
+ sum -= 2 * dst[i];
+ }
+ dst[8] = av_clip(sum, 0, 127);
+ }
+
+/**
+ * Chapter 9.12.2 of GY/T 299.1-2016
+ */
+static void ff_avs2_process_alf_param(int8_t (*coeff)[9], const AVS2AlfParam *alf)
+{
+ int i, j, c = 0;
+ int tab[16] = { 0 };
+ if(alf->b_enable[0]){
+ // distance:[0,2,3,5] -> tab:[0,0, 1,1,1, 2,2,2,2,2, 3,3,3,3,3,3]
+ for (i = 1; i < alf->luma.n_filter; i++) {
+ for (j = 0; j < alf->luma.region_distance[i]; j++) {
+ tab[c+1] = tab[c];
+ c += 1;
+ }
+ tab[c] += 1;
+ }
+ for (i = c; i < 16; i++) {
+ tab[i] = tab[c];
+ }
+
+ for (i = 0; i < 16; i++) {
+ ff_avs2_amend_alf_coeff(coeff[i], alf->luma.coeff[tab[i]]);
+ }
+ }
+ for (i = 0; i < 2; i++) {
+ if(alf->b_enable[i+1])
+ ff_avs2_amend_alf_coeff(coeff[16 + i], alf->chroma[i].coeff);
+ }
+}
+
+static int ff_avs2_decode_alf_param(GetBitContext* gb, AVS2PicHeader *pic)
+{
+ int i, j, s;
+ AVS2AlfParam _alf, *alf = &_alf;
+ memset(alf, 0, sizeof(AVS2AlfParam));
+ for (i = 0; i < 16; i++) {
+ alf->luma.region_distance[i] = i > 0;
+ }
+
+ /**
+ * Chapter 7.1.8 of GY/T 299.1-2016
+ */
+ for (i = 0; i < 3; i++) {
+ alf->b_enable[i] = get_bits1(gb);
+ }
+
+ if (alf->b_enable[0]) {
+ alf->luma.n_filter = get_ue_golomb(gb) + 1;
+ for (i = 0; i < alf->luma.n_filter; i++) {
+ if (i > 0 && alf->luma.n_filter != 16) {
+ alf->luma.region_distance[i] = get_ue_golomb(gb);
+ }
+ for (j = 0; j < 9; j++) {
+ alf->luma.coeff[i][j] = get_se_golomb(gb);
+ }
+ }
+ }
+ for (i = 0; i < 2; i++) {
+ if (alf->b_enable[i+1]) {
+ for (j = 0; j < 9; j++) {
+ alf->chroma[i].coeff[j] = get_se_golomb(gb);
+ }
+ }
+ }
+
+ /**
+ * Chapter 7.2.8 of GY/T 299.1-2016
+ */
+ for (s = 0, i = 0; i < alf->luma.n_filter; i++) {
+ s += alf->luma.region_distance[i];
+ }
+ if (s > 15) {
+ return AVERROR_INVALIDDATA;
+ }
+
+ pic->b_alf_enable[0] = alf->b_enable[0];
+ pic->b_alf_enable[1] = alf->b_enable[1];
+ pic->b_alf_enable[2] = alf->b_enable[2];
+ ff_avs2_process_alf_param(pic->alf_coeff, alf);
+
+ return 0;
+}
+
+int ff_avs2_decode_seq_header(AVS2Context *h, GetByteContext* bs, AVS2SeqHeader *seq)
+{
+ int i;
+ unsigned int br_lower, br_upper;
+ GetBitContext _gb, *gb = &_gb;
+ init_get_bits8(gb, bs->buffer, bs->buffer_end - bs->buffer);
+
+ ff_avs2_set_default_seq_header(&h->seq);
+ seq->profile_id = get_bits(gb, 8);
+ seq->level_id = get_bits(gb, 8);
+ seq->b_progressive = get_bits1(gb);
+ seq->b_field_coding = get_bits1(gb);
+
+ seq->width = get_bits(gb, 14);
+ seq->height = get_bits(gb, 14);
+ if (seq->width < 16 || seq->height < 16) {
+ return AVERROR_INVALIDDATA;
+ }
+
+ seq->chroma_format = get_bits(gb, 2);
+ if (seq->chroma_format != AVS2_CHROMA_YUV_420) {
+ av_log(h, AV_LOG_ERROR, "AVS2 don't support chroma format other than YUV420 !!!\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ /* sample_precision seems useless */
+ if (seq->profile_id == AVS2_PROFILE_MAIN10) {
+ seq->output_bit_depth = 6 + (get_bits(gb, 3) << 1);
+ seq->sample_bit_depth = 6 + (get_bits(gb, 3) << 1);
+ } else {
+ seq->output_bit_depth = 6 + (get_bits(gb, 3) << 1);
+ seq->sample_bit_depth = 8;
+ }
+ if (seq->sample_bit_depth != 8 && seq->sample_bit_depth != 10) {
+ av_log(h, AV_LOG_ERROR, "Invalid sample_precision : %d !!!\n", seq->sample_bit_depth);
+ return AVERROR_INVALIDDATA;
+ }
+ if (seq->output_bit_depth != 8 && seq->output_bit_depth != 10) {
+ av_log(h, AV_LOG_ERROR, "Invalid encoding_precision : %d !!!\n", seq->output_bit_depth);
+ return AVERROR_INVALIDDATA;
+ }
+ if (seq->sample_bit_depth < seq->output_bit_depth) {
+ av_log(h, AV_LOG_ERROR, "encoding_precision smaller than sample_precision !!!\n");
+ return AVERROR_INVALIDDATA;
+ }
+ seq->aspect_ratio_code = get_bits(gb, 4);
+ seq->frame_rate_code = get_bits(gb, 4);
+
+ br_lower = get_bits(gb, 18);
+ if(check_marker(h, gb, "[before 'bit_rate_upper']")==0)
+ return AVERROR_INVALIDDATA;
+ br_upper = get_bits(gb, 12);
+ seq->bitrate = ((br_upper << 18) + br_lower) * (int64_t)400;
+
+ seq->b_low_delay = get_bits1(gb);
+ if(check_marker(h, gb, "[before 'temporal_id_enable_flag']")==0)
+ return AVERROR_INVALIDDATA;
+ seq->b_has_temporal_id = get_bits1(gb);
+ seq->bbv_buffer_size = get_bits(gb, 18);
+ seq->log2_lcu_size = get_bits(gb, 3);;
+
+ if (seq->log2_lcu_size < 4 || seq->log2_lcu_size > 6) {
+ av_log(h, AV_LOG_ERROR, "Invalid LCU size: %d\n", seq->log2_lcu_size);
+ return AVERROR_INVALIDDATA;
+ }
+
+ seq->b_enable_wq = get_bits1(gb);
+ if (seq->b_enable_wq) {
+ if (get_bits1(gb)) {
+ ff_avs2_decode_wqm(gb, &seq->wqm);
+ }
+ }
+
+ seq->b_disable_scene_pic = get_bits1(gb);
+ seq->b_multi_hypothesis_skip = get_bits1(gb);
+ seq->b_dual_hypothesis_prediction = get_bits1(gb);
+ seq->b_weighted_skip = get_bits1(gb);
+
+ seq->b_amp = get_bits1(gb);
+ seq->b_nsqt = get_bits1(gb);
+ seq->b_nsip = get_bits1(gb);
+ seq->b_2nd_transform = get_bits1(gb);
+ seq->b_sao = get_bits1(gb);
+ seq->b_alf = get_bits1(gb);
+ seq->b_pmvr = get_bits1(gb);
+
+ if(check_marker(h, gb, "[before 'num_of_rcs']")==0)
+ return AVERROR_INVALIDDATA;
+ seq->n_rcs = get_bits(gb, 6);
+ if (seq->n_rcs > AVS2_MAX_RCS_COUNT) {
+ av_log(h, AV_LOG_ERROR, "num_of_rcs(%d) should not exceed 32\n", seq->n_rcs);
+ return AVERROR_INVALIDDATA;
+ }
+ for (i = 0; i < seq->n_rcs; i++) {
+ AVS2RefCfgSet* rcs = &seq->rcs[i];
+ if (ff_avs2_decode_rcs(gb, rcs, h) < 0) {
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
+ if (seq->b_low_delay == 0) {
+ seq->output_reorder_delay = get_bits(gb, 5);
+ }
+ seq->b_cross_slice_loop_filter = get_bits1(gb);
+
+ skip_bits(gb, 2);
+ align_get_bits(gb);
+
+ av_log(h, AV_LOG_INFO, "Got seq header: %dx%d, lcu:%d, profile=%d, level=%d\n",
+ seq->width, seq->height, seq->log2_lcu_size,
+ seq->profile_id, seq->level_id);
+ return 0;
+}
+
+int ff_avs2_decode_user_data(AVS2Context *h, GetByteContext* bs)
+{
+ const uint8_t *p;
+ const uint8_t *log_fmt;
+ int log_level;
+
+ for (p = bs->buffer; p < bs->buffer_end && isprint(*p); p++) {}
+ if (p == bs->buffer_end) {
+ log_level = AV_LOG_DEBUG;
+ log_fmt = "%c";
+ } else {
+ log_level = AV_LOG_TRACE;
+ log_fmt = "%02x ";
+ }
+
+ av_log(h, log_level, "Got user Data: ");
+ for (p = bs->buffer; p < bs->buffer_end; p++)
+ av_log(h, log_level, log_fmt, *p);
+ av_log(h, log_level, "\n");
+
+ return 0;
+}
+
+static int ff_avs2_decode_seq_display_ext(AVS2Context *h, GetBitContext* gb)
+{
+ AVS2SeqDisplayExt* ext = &h->seq_display_ext;
+ ext->extension_id = AVS2_EXT_SEQ_DISPLAY;
+
+ ext->video_format = get_bits(gb, 3);
+ ext->b_full_range = get_bits1(gb);
+ ext->b_color_desc = get_bits1(gb);
+ if (ext->b_color_desc) {
+ ext->color_primaries = get_bits(gb, 8);
+ ext->color_transfer = get_bits(gb, 8);
+ ext->color_matrix = get_bits(gb, 8);
+ }
+ ext->display_h = get_bits(gb, 14);
+ if(check_marker(h, gb, "[sequence_display_extension]")==0)
+ return AVERROR_INVALIDDATA;
+ ext->display_w = get_bits(gb, 14);
+
+ ext->b_td_mode = get_bits1(gb);
+ if (ext->b_td_mode) {
+ ext->td_packing_mode = get_bits(gb, 8);
+ ext->b_view_reverse = get_bits1(gb);
+ }
+
+ av_log(h, AV_LOG_INFO, "Got sequence_display_extension\n");
+ return 0;
+}
+
+static int ff_avs2_decode_temporal_scale_ext(AVS2Context *h, GetBitContext* gb)
+{
+ int i, fr_code, br_lower, br_upper;
+ AVS2TemporalScaleExt* ext = &h->tempo_scale_ext;
+ ext->extension_id = AVS2_EXT_TEMPORAL_SCALE;
+
+ av_log(h, AV_LOG_INFO, "got temporal_scalability_extension()\n");
+
+ ext->n_level = get_bits(gb, 3);
+ if (get_bits_left(gb) < 33 * ext->n_level) {
+ av_log(h, AV_LOG_ERROR, "NOT enough data for temporal_scalability_extension()\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ for (i = 0; i < ext->n_level; i++) {
+ fr_code = get_bits(gb, 4);
+ br_lower = get_bits(gb, 18);
+ if(check_marker(h, gb, "[temporal_scale_ext]")==0)
+ return AVERROR_INVALIDDATA;
+ br_upper = get_bits(gb, 12);
+ ext->level[i].framerate = ff_avs2_frame_rate_c2q(fr_code);
+ ext->level[i].bitrate = ((br_upper << 18) + br_lower) * (int64_t)400;
+ }
+
+ av_log(h, AV_LOG_INFO, "Got temporal_scalability_extension: %d level\n", ext->n_level);
+ for (i = 0; i < ext->n_level; i++) {
+ av_log(h, AV_LOG_INFO, "level[%d] : framerate=%f, bitrate=%" PRId64 "\n",
+ i, av_q2d(ext->level[i].framerate), ext->level[i].bitrate);
+ }
+ return 0;
+}
+
+static int ff_avs2_decode_copyright_ext(AVS2Context *h, GetBitContext* gb, AVS2CopyrightExt* ext)
+{
+ if (get_bits_left(gb) < 1+8+1+7+23*3 ) {
+ av_log(h, AV_LOG_ERROR, "NOT enough data for copyright_extension()\n");
+ return AVERROR_INVALIDDATA;
+ }
+ ext->extension_id = AVS2_EXT_COPYRIGHT;
+
+ ext->b_flag = get_bits1(gb);
+ ext->copy_id = get_bits(gb, 8);
+ ext->b_original = get_bits1(gb);
+ skip_bits(gb, 7);
+
+ if(check_marker(h, gb, "copyright_number_1")==0)
+ return AVERROR_INVALIDDATA;
+ ext->copy_number = (uint64_t)get_bits(gb, 20) << 44;
+
+ if(check_marker(h, gb, "copyright_number_2")==0)
+ return AVERROR_INVALIDDATA;
+ ext->copy_number += (uint64_t)get_bits(gb, 22) << 22;
+
+ if(check_marker(h, gb, "copyright_number_3")==0)
+ return AVERROR_INVALIDDATA;
+ ext->copy_number += (uint64_t)get_bits(gb, 22);
+
+ av_log(h, AV_LOG_INFO, "Got copyright_extension: original:%d, id:%d, number%" PRId64 "\n",
+ ext->b_original, ext->copy_id, ext->copy_number);
+ return 0;
+}
+
+static int ff_avs2_decode_pic_display_ext(AVS2Context *h, GetBitContext* gb)
+{
+ int i = 0;
+ AVS2SeqHeader *seq = &h->seq;
+ AVS2PicHeader *pic = &h->pic;
+ AVS2PicDisplayExt *ext = &h->pic_display_ext;
+ ext->extension_id = AVS2_EXT_PIC_DISPLAY;
+
+ if (seq->b_progressive) {
+ if (pic->b_repeat_first_field) {
+ ext->n_offset = pic->b_top_field_first ? 3 : 2;
+ } else {
+ ext->n_offset = 1;
+ }
+ } else {
+ if (pic->b_picture_structure == 0) {
+ ext->n_offset = 1;
+ } else {
+ ext->n_offset = pic->b_repeat_first_field ? 3 : 2;
+ }
+ }
+
+ if (get_bits_left(gb) < 34 * ext->n_offset) {
+ av_log(h, AV_LOG_ERROR, "NOT enough data for picture_display_extension()\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ for (i = 0; i < ext->n_offset; i++) {
+ ext->offset[i][0] = (int16_t)get_bits(gb, 16);
+ if(check_marker(h, gb, "picture_centre_horizontal_offset")==0)
+ return AVERROR_INVALIDDATA;
+
+ ext->offset[i][1] = (int16_t)get_bits(gb, 16);
+ if(check_marker(h, gb, "picture_centre_vertical_offset")==0)
+ return AVERROR_INVALIDDATA;
+ }
+
+ av_log(h, AV_LOG_INFO, "Got picture_display_extension\n");
+ return 0;
+}
+
+int ff_avs2_decode_ext(AVS2Context *h, GetByteContext* bs, int b_seq_ext)
+{
+ int ret = 0;
+ int ext_type = 0;
+ GetBitContext _gb, *gb = &_gb;
+ init_get_bits8(gb, bs->buffer, bs->buffer_end - bs->buffer);
+
+ ext_type = get_bits(gb, 4);
+ if (b_seq_ext) {
+ if (ext_type == AVS2_EXT_SEQ_DISPLAY) {
+ ret = ff_avs2_decode_seq_display_ext(h, gb);
+ } else if (ext_type == AVS2_EXT_TEMPORAL_SCALE) {
+ ret = ff_avs2_decode_temporal_scale_ext(h, gb);
+ } else if (ext_type == AVS2_EXT_COPYRIGHT) {
+ ret = ff_avs2_decode_copyright_ext(h, gb, &h->seq_copyright_ext);
+ } else if (ext_type == AVS2_EXT_MASTERING) {
+ av_log(h, AV_LOG_WARNING, "Skip mastering_display_and_content_metadata_extension() \n");
+ } else if (ext_type == AVS2_EXT_CAMERA_PARAM) {
+ av_log(h, AV_LOG_WARNING, "Skip seq camera_parameters_extension() \n");
+ } else {
+ av_log(h, AV_LOG_WARNING, "Skip seq reserved_extension_data_byte \n");
+ }
+ } else {
+ if (ext_type == AVS2_EXT_COPYRIGHT) {
+ ret = ff_avs2_decode_copyright_ext(h, gb, &h->pic_copyright_ext);
+ } else if (ext_type == AVS2_EXT_PIC_DISPLAY) {
+ ret = ff_avs2_decode_pic_display_ext(h, gb);
+ } else if (ext_type == AVS2_EXT_CAMERA_PARAM) {
+ av_log(h, AV_LOG_WARNING, "Skip pic camera_parameters_extension()() \n");
+ } else if (ext_type == AVS2_EXT_ROI_PARAM) {
+ av_log(h, AV_LOG_WARNING, "Skip roi_parameters_extension() \n");
+ } else {
+ av_log(h, AV_LOG_WARNING, "Skip pic reserved_extension_data_byte \n");
+ }
+ }
+ AVS2_CHECK_RET(ret);
+
+ return 0;
+}
+
+int ff_avs2_decode_extradata(AVS2Context *h, const uint8_t *data, int size,
+ AVS2SeqHeader *seq)
+{
+ int ret = 0;
+ int i_unit = 0;
+
+ ret = ff_avs2_packet_split(&h->pkt_split, data, size, h);
+ AVS2_CHECK_RET(ret);
+
+ for (i_unit = 0; i_unit < h->pkt_split.nb_units; i_unit++) {
+ AVS2EsUnit* unit = &h->pkt_split.units[i_unit];
+ GetByteContext _bs, *bs=&_bs;
+ bytestream2_init(bs, data + unit->data_start, unit->data_len);
+
+ switch (unit->start_code)
+ {
+ case AVS2_STC_SEQ_HEADER:
+ ret = ff_avs2_decode_seq_header(h, bs, &h->seq);
+ break;
+ case AVS2_STC_EXTENSION:
+ ret = ff_avs2_decode_ext(h, bs, 1);
+ break;
+ case AVS2_STC_USER_DATA:
+ ret = ff_avs2_decode_user_data(h, bs);
+ break;
+ default:
+ av_log(h, AV_LOG_ERROR, "Extradata contain un-supported start code 0x%08x !!!\n",
+ unit->start_code);
+ return AVERROR_INVALIDDATA;
+ }
+
+ AVS2_CHECK_RET(ret);
+ }
+
+ return 0;
+}
+
+int ff_avs2_decode_pic_header(AVS2Context *h, uint32_t stc,
+ GetByteContext* bs, AVS2PicHeader *pic)
+{
+ int i, ret, buf_size;
+ AVS2SeqHeader *seq = &h->seq;
+ GetBitContext _gb, *gb = &_gb;
+
+ uint8_t *rm_pseudo_buffer = av_mallocz(bs->buffer_end - bs->buffer_start);
+ if (!rm_pseudo_buffer)
+ goto error;
+
+ buf_size = ff_avs2_remove_pseudo_code(rm_pseudo_buffer, bs->buffer, bs->buffer_end - bs->buffer_start);
+
+ init_get_bits8(gb, rm_pseudo_buffer, buf_size);
+
+ ff_avs2_set_default_pic_header(&h->seq, &h->pic, stc == AVS2_STC_INTRA_PIC);
+ pic->bbv_delay = get_bits_long(gb, 32);
+
+ if (pic->b_intra) {
+ pic->b_time_code = get_bits1(gb);
+ if (pic->b_time_code) {
+ skip_bits1(gb);
+ pic->time_code_hh = get_bits(gb, 5);
+ pic->time_code_mm = get_bits(gb, 6);
+ pic->time_code_ss = get_bits(gb, 6);
+ pic->time_code_ff = get_bits(gb, 6);
+ }
+ if (seq->b_disable_scene_pic == 0) {
+ pic->b_scene_pic = get_bits1(gb);
+ if (pic->b_scene_pic) {
+ pic->b_scene_pic_output = get_bits1(gb);
+ }
+ }
+ } else {
+ pic->pic_coding_type = get_bits(gb, 2);
+ if (seq->b_disable_scene_pic == 0) {
+ if (pic->pic_coding_type == AVS2_PCT_P) {
+ pic->b_scene_pred = get_bits1(gb);
+ }
+ if (pic->pic_coding_type != AVS2_PCT_B && pic->b_scene_pred == 0) {
+ pic->b_scene_ref = get_bits1(gb);
+ }
+ }
+ }
+
+ pic->doi = get_bits(gb, 8);
+ if (seq->b_has_temporal_id) {
+ pic->temporal_id = get_bits(gb, 3);
+ }
+
+ if (seq->b_low_delay == 0) {
+ if (pic->b_intra) {
+ if (pic->b_scene_pic == 0 || pic->b_scene_pic_output == 1) {
+ pic->output_delay = get_ue_golomb(gb);
+ }
+ } else {
+ pic->output_delay = get_ue_golomb(gb);
+ }
+ }
+
+ pic->b_use_rcs = get_bits1(gb);
+ if (pic->b_use_rcs) {
+ pic->rcs_index = get_bits(gb, 5);
+ } else {
+ pic->rcs_index = seq->n_rcs;
+ ret = ff_avs2_decode_rcs(gb, &seq->rcs[seq->n_rcs], h);
+ AVS2_CHECK_RET(ret);
+ }
+
+ if (seq->b_low_delay) {
+ pic->bbv_check_times = get_ue_golomb(gb);
+ }
+
+ pic->b_progressive_frame = get_bits1(gb);
+ if (pic->b_progressive_frame == 0) {
+ pic->b_picture_structure = get_bits1(gb);
+ }
+ pic->b_top_field_first = get_bits1(gb);
+ pic->b_repeat_first_field = get_bits1(gb);
+ if (seq->b_field_coding) {
+ pic->b_top_field_picture = get_bits1(gb);
+ skip_bits1(gb);
+ }
+
+ pic->b_fixed_qp = get_bits1(gb);
+ pic->pic_qp = get_bits(gb, 7);
+
+ if (!pic->b_intra) {
+ if (!(pic->pic_coding_type == AVS2_PCT_B && pic->b_picture_structure)) {
+ skip_bits1(gb);
+ }
+ pic->b_random_access = get_bits1(gb);
+ }
+
+ pic->b_disable_lf = get_bits1(gb);
+ if (!pic->b_disable_lf) {
+ pic->b_lf_param = get_bits1(gb);
+ if (pic->b_lf_param) {
+ pic->lf_alpha_offset = get_se_golomb(gb);
+ pic->lf_beta_offset = get_se_golomb(gb);
+ }
+ }
+
+ pic->b_no_chroma_quant_param = get_bits1(gb);
+ if (pic->b_no_chroma_quant_param == 0) {
+ pic->cb_quant_delta = get_se_golomb(gb);
+ pic->cr_quant_delta = get_se_golomb(gb);
+ }
+
+ pic->b_enable_pic_wq = seq->b_enable_wq && get_bits1(gb);
+ if (pic->b_enable_pic_wq) {
+ pic->wq_data_index = get_bits(gb, 2);
+ if (pic->wq_data_index == 1) {
+ int8_t wq_param[6];
+ skip_bits1(gb);
+ pic->wq_param_index = get_bits(gb, 2);
+ pic->wq_model = get_bits(gb, 2);
+ if (pic->wq_param_index == 0){
+ for (i = 0; i < 6; i++) {
+ wq_param[i] = avs2_default_wq_param[1][i];
+ }
+ }
+ else if (pic->wq_param_index == 1 || pic->wq_param_index == 2) {
+ int *wq_param_delta = pic->wq_param_delta[pic->wq_param_index - 1];
+ for (i = 0; i < 6; i++) {
+ wq_param_delta[i] = get_se_golomb(gb);
+ wq_param[i] = wq_param_delta[i] + avs2_default_wq_param[pic->wq_param_index - 1][i];
+ }
+ }
+
+ for (i = 0; i < 64; i++)
+ pic->wqm.m88[i] = wq_param[avs2_wq_model88[pic->wq_model][i]];
+ for (i = 0; i < 16; i++)
+ pic->wqm.m44[i] = wq_param[avs2_wq_model44[pic->wq_model][i]];
+
+ } else if (pic->wq_data_index == 2) {
+ ff_avs2_decode_wqm(gb, &pic->wqm);
+ }
+ }
+
+ if (seq->b_alf) {
+ ret = ff_avs2_decode_alf_param(gb, &h->pic);
+ AVS2_CHECK_RET(ret);
+ }
+
+ align_get_bits(gb);
+
+ av_log(h, AV_LOG_DEBUG, "<%s>, ra:%d, tid=%d, doi=%d, poi=%d \n",
+ ff_avs2_get_pic_type_str(&h->pic), h->pic.b_random_access,
+ h->pic.temporal_id, h->pic.doi,
+ ff_avs2_get_pic_poi(&h->seq, &h->pic));
+error:
+ av_free(rm_pseudo_buffer);
+ return 0;
+}
+
+int ff_avs2_decode_slice_header(AVS2Context *h, uint32_t stc, GetByteContext *bs)
+{
+ AVS2SeqHeader *seq = &h->seq;
+ AVS2PicHeader *pic = &h->pic;
+ AVS2SlcHeader *slc = &h->slc;
+ GetBitContext _gb, *gb = &_gb;
+
+ int const MAX_SLICE_HEADER_BYTES = 5;
+ int buf_size;
+
+ uint8_t *rm_pseudo_buffer = av_mallocz(MAX_SLICE_HEADER_BYTES);
+ if (!rm_pseudo_buffer)
+ goto error;
+
+ buf_size = ff_avs2_remove_pseudo_code(rm_pseudo_buffer, bs->buffer, MAX_SLICE_HEADER_BYTES);
+
+ init_get_bits8(gb, rm_pseudo_buffer, buf_size);
+
+ slc->lcu_y = get_bits(gb, 8);
+ if (seq->height > (144 << seq->log2_lcu_size)) {
+ slc->lcu_y += get_bits(gb, 3) << 7 ;
+ }
+ slc->lcu_x = get_bits(gb, 8);
+ if (seq->width > (255 << seq->log2_lcu_size)) {
+ slc->lcu_x += get_bits(gb, 2) << 8;
+ }
+ if (!pic->b_fixed_qp) {
+ slc->b_fixed_qp = get_bits1(gb);
+ slc->slice_qp = get_bits(gb, 7);
+ } else {
+ slc->b_fixed_qp = 1;
+ slc->slice_qp = pic->pic_qp;
+ }
+ if (seq->b_sao) {
+ slc->b_sao[0] = get_bits1(gb);
+ slc->b_sao[1] = get_bits1(gb);
+ slc->b_sao[2] = get_bits1(gb);
+ }
+
+ align_get_bits(gb); // aec_byte_alignment_bit
+ slc->aec_byte_offset = get_bits_count(gb) >> 3;
+
+ av_log(h, AV_LOG_TRACE, "slice[%d, %d]\n", slc->lcu_x, slc->lcu_y);
+
+error:
+ av_free(rm_pseudo_buffer);
+ return 0;
+}
\ No newline at end of file
diff --git a/libavcodec/codec_desc.c b/libavcodec/codec_desc.c
index 033344304c..cdaf184f47 100644
--- a/libavcodec/codec_desc.c
+++ b/libavcodec/codec_desc.c
@@ -1411,8 +1411,9 @@ static const AVCodecDescriptor codec_descriptors[] = {
.type = AVMEDIA_TYPE_VIDEO,
.name = "avs2",
.long_name = NULL_IF_CONFIG_SMALL("AVS2-P2/IEEE1857.4"),
- .props = AV_CODEC_PROP_LOSSY,
- },
+ .props = AV_CODEC_PROP_LOSSY | AV_CODEC_PROP_REORDER,
+ .profiles = NULL_IF_CONFIG_SMALL(ff_avs2_profiles),
+ },
{
.id = AV_CODEC_ID_PGX,
.type = AVMEDIA_TYPE_VIDEO,
diff --git a/libavcodec/defs.h b/libavcodec/defs.h
index d59816a70f..fd75b0f905 100644
--- a/libavcodec/defs.h
+++ b/libavcodec/defs.h
@@ -195,6 +195,10 @@
#define AV_PROFILE_CAVS_JIZHUN 0x20
#define AV_PROFILE_CAVS_GUANGDIAN 0x48
+#define AV_PROFILE_AVS2_PIC 0x12
+#define AV_PROFILE_AVS2_MAIN 0x20
+#define AV_PROFILE_AVS2_MAIN_10 0x22
+
#define AV_LEVEL_UNKNOWN -99
diff --git a/libavcodec/hwaccels.h b/libavcodec/hwaccels.h
index a1a973b460..cfe681fced 100644
--- a/libavcodec/hwaccels.h
+++ b/libavcodec/hwaccels.h
@@ -90,5 +90,6 @@ extern const struct FFHWAccel ff_wmv3_nvdec_hwaccel;
extern const struct FFHWAccel ff_wmv3_vaapi_hwaccel;
extern const struct FFHWAccel ff_wmv3_vdpau_hwaccel;
extern const struct FFHWAccel ff_cavs_vaapi_hwaccel;
+extern const struct FFHWAccel ff_avs2_vaapi_hwaccel;
#endif /* AVCODEC_HWACCELS_H */
diff --git a/libavcodec/libdavs2.c b/libavcodec/libdavs2.c
index 179d2f4e4b..615f11a49e 100644
--- a/libavcodec/libdavs2.c
+++ b/libavcodec/libdavs2.c
@@ -87,7 +87,7 @@ static int davs2_dump_frames(AVCodecContext *avctx, davs2_picture_t *pic, int *g
avctx->has_b_frames = FFMAX(avctx->has_b_frames, !headerset->low_delay);
if (headerset->frame_rate_id < 16)
- avctx->framerate = ff_avs2_frame_rate_tab[headerset->frame_rate_id];
+ avctx->framerate = ff_avs2_frame_rate_c2q(headerset->frame_rate_id);
*got_frame = 0;
return 0;
}
diff --git a/libavcodec/profiles.c b/libavcodec/profiles.c
index b312f12281..b225e07db1 100644
--- a/libavcodec/profiles.c
+++ b/libavcodec/profiles.c
@@ -206,4 +206,10 @@ const AVProfile ff_cavs_profiles[] = {
{ AV_PROFILE_UNKNOWN },
};
+const AVProfile ff_avs2_profiles[] = {
+ { AV_PROFILE_AVS2_MAIN, "Main" },
+ { AV_PROFILE_AVS2_MAIN_10, "Main 10" },
+ { AV_PROFILE_UNKNOWN },
+};
+
#endif /* !CONFIG_SMALL */
diff --git a/libavcodec/profiles.h b/libavcodec/profiles.h
index 9a2b348ad4..22765b93c5 100644
--- a/libavcodec/profiles.h
+++ b/libavcodec/profiles.h
@@ -76,5 +76,6 @@ extern const AVProfile ff_mjpeg_profiles[];
extern const AVProfile ff_arib_caption_profiles[];
extern const AVProfile ff_evc_profiles[];
extern const AVProfile ff_cavs_profiles[];
+extern const AVProfile ff_avs2_profiles[];
#endif /* AVCODEC_PROFILES_H */
diff --git a/libavcodec/vaapi_avs2.c b/libavcodec/vaapi_avs2.c
new file mode 100644
index 0000000000..fcb39130b8
--- /dev/null
+++ b/libavcodec/vaapi_avs2.c
@@ -0,0 +1,227 @@
+/*
+ * AVS2 (Chinese GY/T 299.1-2016) HW decode acceleration through VA API
+ * Copyright (c) 2022 JianfengZheng <jianfeng.zheng@mthreads.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "hwconfig.h"
+#include "hwaccel_internal.h"
+#include "vaapi_decode.h"
+#include "avs2dec.h"
+
+/**
+ * @file
+ * This file implements the glue code between FFmpeg's and VA API's
+ * structures for AVS2 (Chinese GY/T 299.1-2016) decoding.
+ */
+
+static int vaapi_avs2_pic_type_cvt(int avs2_pic_type)
+{
+ switch (avs2_pic_type)
+ {
+ case AVS2_PIC_I: return VA_AVS2_I_IMG;
+ case AVS2_PIC_P: return VA_AVS2_P_IMG;
+ case AVS2_PIC_B: return VA_AVS2_B_IMG;
+ case AVS2_PIC_F: return VA_AVS2_F_IMG;
+ case AVS2_PIC_S: return VA_AVS2_S_IMG;
+ case AVS2_PIC_G: return VA_AVS2_G_IMG;
+ case AVS2_PIC_GB: return VA_AVS2_GB_IMG;
+ default: return VA_AVS2_I_IMG;
+ }
+
+}
+
+static void vaapi_avs2_init_pic(VAPictureAVS2 *va_pic)
+{
+ va_pic->surface_id = VA_INVALID_SURFACE;
+ va_pic->doi = -1;
+ va_pic->poi = -1;
+ va_pic->num_ref = 0;
+}
+
+static void vaapi_avs2_fill_pic(VAPictureAVS2 *va_pic, const AVS2Frame *frame)
+{
+ int i;
+ va_pic->surface_id = ff_vaapi_get_surface_id(frame->frame);
+ va_pic->doi = frame->pic_header.doi;
+ va_pic->poi = frame->poi;
+ va_pic->num_ref = frame->n_ref;
+ for (i = 0; i < frame->n_ref; i++) {
+ va_pic->ref_doi[i] = frame->ref_doi[i];
+ va_pic->ref_poi[i] = frame->ref_poi[i];
+ }
+}
+
+/** Initialize and start decoding a frame with VA API. */
+static int vaapi_avs2_start_frame(AVCodecContext *avctx,
+ av_unused const uint8_t *buffer,
+ av_unused uint32_t size)
+{
+ int i, err;
+ AVS2Frame *ref_frame;
+ AVS2Context *h = avctx->priv_data;
+ AVS2SeqHeader *seq = &h->seq;
+ AVS2PicHeader *pic = &h->pic;
+
+ VAPictureParameterBufferAVS2 pic_param;
+
+ VAAPIDecodePicture *vapic = h->curr_frame->hwaccel_picture_private;
+ vapic->output_surface = ff_vaapi_get_surface_id(h->curr_frame->frame);
+
+ //@see avs2_dec_gen_pic_param() in avs2_dec_pic.c
+ pic_param = (VAPictureParameterBufferAVS2) {
+ .width = seq->width,
+ .height = seq->height,
+
+ .log2_lcu_size_minus4 = seq->log2_lcu_size - 4,
+ .chroma_format = seq->chroma_format,
+ .output_bit_depth_minus8 = seq->output_bit_depth - 8,
+ .weighted_skip_enable = seq->b_weighted_skip,
+ .multi_hypothesis_skip_enable = seq->b_multi_hypothesis_skip,
+ .nonsquare_intra_prediction_enable = seq->b_nsip,
+ .dph_enable = seq->b_dual_hypothesis_prediction,
+ .encoding_bit_depth_minus8 = seq->sample_bit_depth - 8,
+ .field_coded_sequence = seq->b_field_coding,
+ .pmvr_enable = seq->b_pmvr,
+ .nonsquare_quadtree_transform_enable = seq->b_nsqt,
+ .inter_amp_enable = seq->b_amp,
+ .secondary_transform_enable_flag = seq->b_2nd_transform,
+ .fixed_pic_qp = pic->b_fixed_qp,
+ .pic_qp = pic->pic_qp,
+ .picture_structure = pic->b_picture_structure,
+ .top_field_picture_flag = pic->b_top_field_picture,
+ .scene_picture_disable = seq->b_disable_scene_pic,
+ .scene_reference_enable = pic->b_intra ? 0 : pic->b_scene_ref,
+ .pic_type = vaapi_avs2_pic_type_cvt(h->curr_frame->pic_type),
+
+ .lf_cross_slice_enable_flag = seq->b_cross_slice_loop_filter,
+ .lf_pic_dbk_disable_flag = pic->b_disable_lf,
+ .sao_enable = seq->b_sao,
+ .alf_enable = seq->b_alf,
+ .alpha_c_offset = pic->lf_alpha_offset,
+ .beta_offset = pic->lf_beta_offset,
+ .pic_alf_on_Y = pic->b_alf_enable[0],
+ .pic_alf_on_U = pic->b_alf_enable[1],
+ .pic_alf_on_V = pic->b_alf_enable[2],
+ .pic_weight_quant_enable = pic->b_enable_pic_wq,
+ .pic_weight_quant_data_index = pic->wq_data_index,
+ .chroma_quant_param_delta_cb = pic->cb_quant_delta,
+ .chroma_quant_param_delta_cr = pic->cr_quant_delta,
+
+ .non_ref_flag = !h->curr_frame->b_ref,
+ .num_of_ref = h->curr_frame->n_ref,
+ };
+
+ vaapi_avs2_fill_pic(&pic_param.CurrPic, h->curr_frame);
+ for (i = 0; i < VA_AVS2_MAX_REF_COUNT; i++) {
+ vaapi_avs2_init_pic(&pic_param.ref_list[i]);
+ }
+ for (i = 0; i < h->curr_frame->n_ref; i++) {
+ ref_frame = ff_avs2_dpb_get_frame_by_doi(h, h->curr_frame->ref_doi[i]);
+ if (!ref_frame) {
+ av_log(avctx, AV_LOG_ERROR, "Can't get ref frame with doi=%d in dpb, "
+ "curr_doi=%d !!!\n", h->curr_frame->ref_doi[i], pic->doi);
+ return AVERROR_INVALIDDATA;
+ }
+ vaapi_avs2_fill_pic(&pic_param.ref_list[i], ref_frame);
+ }
+ if(pic->wq_data_index == 0){
+ memcpy(pic_param.wq_mat, seq->wqm.m44, 16);
+ memcpy(pic_param.wq_mat + 16, seq->wqm.m88, 64);
+ }
+ else{
+ memcpy(pic_param.wq_mat, pic->wqm.m44, 16);
+ memcpy(pic_param.wq_mat + 16, pic->wqm.m88, 64);
+ }
+ memcpy(pic_param.alf_coeff[0], pic->alf_coeff[0], sizeof(pic_param.alf_coeff));
+
+ err = ff_vaapi_decode_make_param_buffer(avctx, vapic,
+ VAPictureParameterBufferType,
+ &pic_param, sizeof(pic_param));
+ if (err < 0)
+ goto fail;
+
+ return 0;
+fail:
+ ff_vaapi_decode_cancel(avctx, vapic);
+ return err;
+}
+
+/** End a hardware decoding based frame. */
+static int vaapi_avs2_end_frame(AVCodecContext *avctx)
+{
+ AVS2Context *h = avctx->priv_data;
+ VAAPIDecodePicture *vapic = h->curr_frame->hwaccel_picture_private;
+ return ff_vaapi_decode_issue(avctx, vapic);
+}
+
+/** Decode the given H.264 slice with VA API. */
+static int vaapi_avs2_decode_slice(AVCodecContext *avctx,
+ const uint8_t *buffer,
+ uint32_t size)
+{
+ int err;
+ AVS2Context *h = avctx->priv_data;
+ AVS2SlcHeader *slc = &h->slc;
+ VAAPIDecodePicture *vapic = h->curr_frame->hwaccel_picture_private;
+
+ VASliceParameterBufferAVS2 slice_param;
+ slice_param = (VASliceParameterBufferAVS2) {
+ .slice_data_size = size,
+ .slice_data_offset = 0,
+ .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
+ .lcu_start_x = slc->lcu_x,
+ .lcu_start_y = slc->lcu_y,
+
+ .fixed_slice_qp = slc->b_fixed_qp,
+ .slice_qp = slc->slice_qp,
+ .slice_sao_enable_Y = slc->b_sao[0],
+ .slice_sao_enable_U = slc->b_sao[1],
+ .slice_sao_enable_V = slc->b_sao[2],
+
+ .vlc_byte_offset = slc->aec_byte_offset & 0xf,
+ };
+
+ err = ff_vaapi_decode_make_slice_buffer(avctx, vapic,
+ &slice_param, sizeof(slice_param),
+ buffer, size);
+ if (err < 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ ff_vaapi_decode_cancel(avctx, vapic);
+ return err;
+}
+
+const FFHWAccel ff_avs2_vaapi_hwaccel = {
+ .p.name = "avs2_vaapi",
+ .p.type = AVMEDIA_TYPE_VIDEO,
+ .p.id = AV_CODEC_ID_AVS2,
+ .p.pix_fmt = AV_PIX_FMT_VAAPI,
+ .start_frame = &vaapi_avs2_start_frame,
+ .end_frame = &vaapi_avs2_end_frame,
+ .decode_slice = &vaapi_avs2_decode_slice,
+ .frame_priv_data_size = sizeof(VAAPIDecodePicture),
+ .init = &ff_vaapi_decode_init,
+ .uninit = &ff_vaapi_decode_uninit,
+ .frame_params = &ff_vaapi_common_frame_params,
+ .priv_data_size = sizeof(VAAPIDecodeContext),
+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
+};
diff --git a/libavcodec/vaapi_decode.c b/libavcodec/vaapi_decode.c
index 13a3f6aa42..fb25a7c57b 100644
--- a/libavcodec/vaapi_decode.c
+++ b/libavcodec/vaapi_decode.c
@@ -412,6 +412,10 @@ static const struct {
MAP(CAVS, CAVS_JIZHUN, AVSJizhun ),
MAP(CAVS, CAVS_GUANGDIAN, AVSGuangdian),
#endif
+#if HAVE_VA_PROFILE_AVS2
+ MAP(AVS2, AVS2_MAIN, AVS2Main ),
+ MAP(AVS2, AVS2_MAIN_10, AVS2Main10 ),
+#endif
#if VA_CHECK_VERSION(0, 37, 0)
MAP(HEVC, HEVC_MAIN, HEVCMain ),
MAP(HEVC, HEVC_MAIN_10, HEVCMain10 ),
@@ -611,6 +615,7 @@ static int vaapi_decode_make_config(AVCodecContext *avctx,
case AV_CODEC_ID_H264:
case AV_CODEC_ID_HEVC:
case AV_CODEC_ID_AV1:
+ case AV_CODEC_ID_AVS2:
frames->initial_pool_size += 16;
break;
case AV_CODEC_ID_VP9:
diff --git a/libavformat/matroska.c b/libavformat/matroska.c
index 5878594e68..4de23f4cc0 100644
--- a/libavformat/matroska.c
+++ b/libavformat/matroska.c
@@ -100,6 +100,7 @@ const CodecTags ff_mkv_codec_tags[]={
{"V_UNCOMPRESSED" , AV_CODEC_ID_RAWVIDEO},
{"V_VP8" , AV_CODEC_ID_VP8},
{"V_VP9" , AV_CODEC_ID_VP9},
+ {"V_AVS2" , AV_CODEC_ID_AVS2},
{"" , AV_CODEC_ID_NONE}
};
diff --git a/libavformat/mpeg.h b/libavformat/mpeg.h
index b635295776..5d294e6f8e 100644
--- a/libavformat/mpeg.h
+++ b/libavformat/mpeg.h
@@ -57,6 +57,7 @@
#define STREAM_TYPE_VIDEO_H264 0x1b
#define STREAM_TYPE_VIDEO_HEVC 0x24
#define STREAM_TYPE_VIDEO_CAVS 0x42
+#define STREAM_TYPE_VIDEO_AVS2 0xd2
#define STREAM_TYPE_AUDIO_AC3 0x81
--
2.25.1
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
next reply other threads:[~2024-01-19 15:53 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-19 15:53 jianfeng.zheng [this message]
2024-01-20 4:22 ` Zhao Zhili
2024-01-21 14:47 ` Jianfeng Zheng
2024-01-22 0:06 ` Liu Steven
2024-01-22 11:56 ` Zhao Zhili
2024-01-22 12:32 ` Jianfeng Zheng
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240119155300.445106-1-jianfeng.zheng@mthreads.com \
--to=ggjogh@gmail.com \
--cc=ffmpeg-devel@ffmpeg.org \
--cc=jianfeng.zheng@mthreads.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
This inbox may be cloned and mirrored by anyone:
git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git
# If you have public-inbox 1.1+ installed, you may
# initialize and index your mirror using the following commands:
public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
ffmpegdev@gitmailbox.com
public-inbox-index ffmpegdev
Example config snippet for mirrors.
AGPL code for this site: git clone https://public-inbox.org/public-inbox.git