Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
From: Andreas Rheinhardt <ffmpegagent-at-gmail.com@ffmpeg.org>
To: ffmpeg-devel@ffmpeg.org
Cc: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
Subject: [FFmpeg-devel] [PATCH 30/48] avcodec/mpeg12dec: Add Mpeg12SliceContext
Date: Mon, 23 Jun 2025 13:36:30 +0000
Message-ID: <e550205048533f2642a756ac9c01ea68df7a910e.1750685809.git.ffmpegagent@gmail.com> (raw)
In-Reply-To: <pull.102.ffstaging.FFmpeg.1750685808.ffmpegagent@gmail.com>

From: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>

This is in preparation for removing the GetBitContext
from MPVContext.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
---
 libavcodec/mpeg12dec.c | 863 +++++++++++++++++++++--------------------
 libavcodec/mpegvideo.c |   2 +-
 libavcodec/mpegvideo.h |   1 +
 3 files changed, 436 insertions(+), 430 deletions(-)

diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c
index 15bfab9623..1ef581ff99 100644
--- a/libavcodec/mpeg12dec.c
+++ b/libavcodec/mpeg12dec.c
@@ -70,8 +70,12 @@ enum Mpeg2ClosedCaptionsFormat {
     CC_FORMAT_DISH
 };
 
+typedef struct Mpeg12SliceContext {
+    MPVContext c;
+} Mpeg12SliceContext;
+
 typedef struct Mpeg1Context {
-    MpegEncContext mpeg_enc_ctx;
+    Mpeg12SliceContext slice;
     AVPanScan pan_scan;         /* some temporary storage for the panscan */
     enum AVStereo3DType stereo3d_type;
     int has_stereo3d;
@@ -95,22 +99,22 @@ typedef struct Mpeg1Context {
 } Mpeg1Context;
 
 /* as H.263, but only 17 codes */
-static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
+static int mpeg_decode_motion(Mpeg12SliceContext *const s, int fcode, int pred)
 {
     int code, sign, val, shift;
 
-    code = get_vlc2(&s->gb, ff_mv_vlc, MV_VLC_BITS, 2);
+    code = get_vlc2(&s->c.gb, ff_mv_vlc, MV_VLC_BITS, 2);
     if (code == 0)
         return pred;
     if (code < 0)
         return 0xffff;
 
-    sign  = get_bits1(&s->gb);
+    sign  = get_bits1(&s->c.gb);
     shift = fcode - 1;
     val   = code;
     if (shift) {
         val  = (val - 1) << shift;
-        val |= get_bits(&s->gb, shift);
+        val |= get_bits(&s->c.gb, shift);
         val++;
     }
     if (sign)
@@ -125,39 +129,39 @@ static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
 #define check_scantable_index(ctx, x)                                         \
     do {                                                                      \
         if ((x) > MAX_INDEX) {                                                \
-            av_log(ctx->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",     \
-                   ctx->mb_x, ctx->mb_y);                                     \
+            av_log(ctx->c.avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",   \
+                   ctx->c.mb_x, ctx->c.mb_y);                                 \
             return AVERROR_INVALIDDATA;                                       \
         }                                                                     \
     } while (0)
 
-static inline int mpeg1_decode_block_inter(MpegEncContext *s,
+static inline int mpeg1_decode_block_inter(Mpeg12SliceContext *const s,
                                            int16_t *block, int n)
 {
     int level, i, j, run;
-    const uint8_t *const scantable = s->intra_scantable.permutated;
-    const uint16_t *quant_matrix = s->inter_matrix;
-    const int qscale             = s->qscale;
+    const uint8_t *const scantable = s->c.intra_scantable.permutated;
+    const uint16_t *quant_matrix = s->c.inter_matrix;
+    const int qscale             = s->c.qscale;
 
     {
-        OPEN_READER(re, &s->gb);
+        OPEN_READER(re, &s->c.gb);
         i = -1;
         // special case for first coefficient, no need to add second VLC table
-        UPDATE_CACHE(re, &s->gb);
-        if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
+        UPDATE_CACHE(re, &s->c.gb);
+        if (((int32_t) GET_CACHE(re, &s->c.gb)) < 0) {
             level = (3 * qscale * quant_matrix[0]) >> 5;
             level = (level - 1) | 1;
-            if (GET_CACHE(re, &s->gb) & 0x40000000)
+            if (GET_CACHE(re, &s->c.gb) & 0x40000000)
                 level = -level;
             block[0] = level;
             i++;
-            SKIP_BITS(re, &s->gb, 2);
-            if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
+            SKIP_BITS(re, &s->c.gb, 2);
+            if (((int32_t) GET_CACHE(re, &s->c.gb)) <= (int32_t) 0xBFFFFFFF)
                 goto end;
         }
         /* now quantify & encode AC coefficients */
         for (;;) {
-            GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
+            GET_RL_VLC(level, run, re, &s->c.gb, ff_mpeg1_rl_vlc,
                        TEX_VLC_BITS, 2, 0);
 
             if (level != 0) {
@@ -167,22 +171,22 @@ static inline int mpeg1_decode_block_inter(MpegEncContext *s,
                 j = scantable[i];
                 level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
                 level = (level - 1) | 1;
-                level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
-                        SHOW_SBITS(re, &s->gb, 1);
-                SKIP_BITS(re, &s->gb, 1);
+                level = (level ^ SHOW_SBITS(re, &s->c.gb, 1)) -
+                        SHOW_SBITS(re, &s->c.gb, 1);
+                SKIP_BITS(re, &s->c.gb, 1);
             } else {
                 /* escape */
-                run = SHOW_UBITS(re, &s->gb, 6) + 1;
-                LAST_SKIP_BITS(re, &s->gb, 6);
-                UPDATE_CACHE(re, &s->gb);
-                level = SHOW_SBITS(re, &s->gb, 8);
-                SKIP_BITS(re, &s->gb, 8);
+                run = SHOW_UBITS(re, &s->c.gb, 6) + 1;
+                LAST_SKIP_BITS(re, &s->c.gb, 6);
+                UPDATE_CACHE(re, &s->c.gb);
+                level = SHOW_SBITS(re, &s->c.gb, 8);
+                SKIP_BITS(re, &s->c.gb, 8);
                 if (level == -128) {
-                    level = SHOW_UBITS(re, &s->gb, 8) - 256;
-                    SKIP_BITS(re, &s->gb, 8);
+                    level = SHOW_UBITS(re, &s->c.gb, 8) - 256;
+                    SKIP_BITS(re, &s->c.gb, 8);
                 } else if (level == 0) {
-                    level = SHOW_UBITS(re, &s->gb, 8);
-                    SKIP_BITS(re, &s->gb, 8);
+                    level = SHOW_UBITS(re, &s->c.gb, 8);
+                    SKIP_BITS(re, &s->c.gb, 8);
                 }
                 i += run;
                 if (i > MAX_INDEX)
@@ -200,57 +204,57 @@ static inline int mpeg1_decode_block_inter(MpegEncContext *s,
             }
 
             block[j] = level;
-            if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
+            if (((int32_t) GET_CACHE(re, &s->c.gb)) <= (int32_t) 0xBFFFFFFF)
                 break;
-            UPDATE_CACHE(re, &s->gb);
+            UPDATE_CACHE(re, &s->c.gb);
         }
 end:
-        LAST_SKIP_BITS(re, &s->gb, 2);
-        CLOSE_READER(re, &s->gb);
+        LAST_SKIP_BITS(re, &s->c.gb, 2);
+        CLOSE_READER(re, &s->c.gb);
     }
 
     check_scantable_index(s, i);
 
-    s->block_last_index[n] = i;
+    s->c.block_last_index[n] = i;
     return 0;
 }
 
-static inline int mpeg2_decode_block_non_intra(MpegEncContext *s,
+static inline int mpeg2_decode_block_non_intra(Mpeg12SliceContext *const s,
                                                int16_t *block, int n)
 {
     int level, i, j, run;
-    const uint8_t *const scantable = s->intra_scantable.permutated;
+    const uint8_t *const scantable = s->c.intra_scantable.permutated;
     const uint16_t *quant_matrix;
-    const int qscale = s->qscale;
+    const int qscale = s->c.qscale;
     int mismatch;
 
     mismatch = 1;
 
     {
-        OPEN_READER(re, &s->gb);
+        OPEN_READER(re, &s->c.gb);
         i = -1;
         if (n < 4)
-            quant_matrix = s->inter_matrix;
+            quant_matrix = s->c.inter_matrix;
         else
-            quant_matrix = s->chroma_inter_matrix;
+            quant_matrix = s->c.chroma_inter_matrix;
 
         // Special case for first coefficient, no need to add second VLC table.
-        UPDATE_CACHE(re, &s->gb);
-        if (((int32_t) GET_CACHE(re, &s->gb)) < 0) {
+        UPDATE_CACHE(re, &s->c.gb);
+        if (((int32_t) GET_CACHE(re, &s->c.gb)) < 0) {
             level = (3 * qscale * quant_matrix[0]) >> 5;
-            if (GET_CACHE(re, &s->gb) & 0x40000000)
+            if (GET_CACHE(re, &s->c.gb) & 0x40000000)
                 level = -level;
             block[0]  = level;
             mismatch ^= level;
             i++;
-            SKIP_BITS(re, &s->gb, 2);
-            if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
+            SKIP_BITS(re, &s->c.gb, 2);
+            if (((int32_t) GET_CACHE(re, &s->c.gb)) <= (int32_t) 0xBFFFFFFF)
                 goto end;
         }
 
         /* now quantify & encode AC coefficients */
         for (;;) {
-            GET_RL_VLC(level, run, re, &s->gb, ff_mpeg1_rl_vlc,
+            GET_RL_VLC(level, run, re, &s->c.gb, ff_mpeg1_rl_vlc,
                        TEX_VLC_BITS, 2, 0);
 
             if (level != 0) {
@@ -259,16 +263,16 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s,
                     break;
                 j = scantable[i];
                 level = ((level * 2 + 1) * qscale * quant_matrix[j]) >> 5;
-                level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
-                        SHOW_SBITS(re, &s->gb, 1);
-                SKIP_BITS(re, &s->gb, 1);
+                level = (level ^ SHOW_SBITS(re, &s->c.gb, 1)) -
+                        SHOW_SBITS(re, &s->c.gb, 1);
+                SKIP_BITS(re, &s->c.gb, 1);
             } else {
                 /* escape */
-                run = SHOW_UBITS(re, &s->gb, 6) + 1;
-                LAST_SKIP_BITS(re, &s->gb, 6);
-                UPDATE_CACHE(re, &s->gb);
-                level = SHOW_SBITS(re, &s->gb, 12);
-                SKIP_BITS(re, &s->gb, 12);
+                run = SHOW_UBITS(re, &s->c.gb, 6) + 1;
+                LAST_SKIP_BITS(re, &s->c.gb, 6);
+                UPDATE_CACHE(re, &s->c.gb);
+                level = SHOW_SBITS(re, &s->c.gb, 12);
+                SKIP_BITS(re, &s->c.gb, 12);
 
                 i += run;
                 if (i > MAX_INDEX)
@@ -284,60 +288,60 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s,
 
             mismatch ^= level;
             block[j]  = level;
-            if (((int32_t) GET_CACHE(re, &s->gb)) <= (int32_t) 0xBFFFFFFF)
+            if (((int32_t) GET_CACHE(re, &s->c.gb)) <= (int32_t) 0xBFFFFFFF)
                 break;
-            UPDATE_CACHE(re, &s->gb);
+            UPDATE_CACHE(re, &s->c.gb);
         }
 end:
-        LAST_SKIP_BITS(re, &s->gb, 2);
-        CLOSE_READER(re, &s->gb);
+        LAST_SKIP_BITS(re, &s->c.gb, 2);
+        CLOSE_READER(re, &s->c.gb);
     }
     block[63] ^= (mismatch & 1);
 
     check_scantable_index(s, i);
 
-    s->block_last_index[n] = i;
+    s->c.block_last_index[n] = i;
     return 0;
 }
 
-static inline int mpeg2_decode_block_intra(MpegEncContext *s,
+static inline int mpeg2_decode_block_intra(Mpeg12SliceContext *const s,
                                            int16_t *block, int n)
 {
     int level, dc, diff, i, j, run;
     int component;
     const RL_VLC_ELEM *rl_vlc;
-    const uint8_t *const scantable = s->intra_scantable.permutated;
+    const uint8_t *const scantable = s->c.intra_scantable.permutated;
     const uint16_t *quant_matrix;
-    const int qscale = s->qscale;
+    const int qscale = s->c.qscale;
     int mismatch;
 
     /* DC coefficient */
     if (n < 4) {
-        quant_matrix = s->intra_matrix;
+        quant_matrix = s->c.intra_matrix;
         component    = 0;
     } else {
-        quant_matrix = s->chroma_intra_matrix;
+        quant_matrix = s->c.chroma_intra_matrix;
         component    = (n & 1) + 1;
     }
-    diff = decode_dc(&s->gb, component);
-    dc  = s->last_dc[component];
+    diff = decode_dc(&s->c.gb, component);
+    dc  = s->c.last_dc[component];
     dc += diff;
-    s->last_dc[component] = dc;
-    block[0] = dc * (1 << (3 - s->intra_dc_precision));
-    ff_tlog(s->avctx, "dc=%d\n", block[0]);
+    s->c.last_dc[component] = dc;
+    block[0] = dc * (1 << (3 - s->c.intra_dc_precision));
+    ff_tlog(s->c.avctx, "dc=%d\n", block[0]);
     mismatch = block[0] ^ 1;
     i = 0;
-    if (s->intra_vlc_format)
+    if (s->c.intra_vlc_format)
         rl_vlc = ff_mpeg2_rl_vlc;
     else
         rl_vlc = ff_mpeg1_rl_vlc;
 
     {
-        OPEN_READER(re, &s->gb);
+        OPEN_READER(re, &s->c.gb);
         /* now quantify & encode AC coefficients */
         for (;;) {
-            UPDATE_CACHE(re, &s->gb);
-            GET_RL_VLC(level, run, re, &s->gb, rl_vlc,
+            UPDATE_CACHE(re, &s->c.gb);
+            GET_RL_VLC(level, run, re, &s->c.gb, rl_vlc,
                        TEX_VLC_BITS, 2, 0);
 
             if (level == 127) {
@@ -348,15 +352,15 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s,
                     break;
                 j = scantable[i];
                 level = (level * qscale * quant_matrix[j]) >> 4;
-                level = (level ^ SHOW_SBITS(re, &s->gb, 1)) -
-                        SHOW_SBITS(re, &s->gb, 1);
-                LAST_SKIP_BITS(re, &s->gb, 1);
+                level = (level ^ SHOW_SBITS(re, &s->c.gb, 1)) -
+                        SHOW_SBITS(re, &s->c.gb, 1);
+                LAST_SKIP_BITS(re, &s->c.gb, 1);
             } else {
                 /* escape */
-                run = SHOW_UBITS(re, &s->gb, 6) + 1;
-                SKIP_BITS(re, &s->gb, 6);
-                level = SHOW_SBITS(re, &s->gb, 12);
-                LAST_SKIP_BITS(re, &s->gb, 12);
+                run = SHOW_UBITS(re, &s->c.gb, 6) + 1;
+                SKIP_BITS(re, &s->c.gb, 6);
+                level = SHOW_SBITS(re, &s->c.gb, 12);
+                LAST_SKIP_BITS(re, &s->c.gb, 12);
                 i += run;
                 if (i > MAX_INDEX)
                     break;
@@ -372,7 +376,7 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s,
             mismatch ^= level;
             block[j]  = level;
         }
-        CLOSE_READER(re, &s->gb);
+        CLOSE_READER(re, &s->c.gb);
     }
     block[63] ^= mismatch & 1;
 
@@ -381,10 +385,10 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s,
     return 0;
 }
 
-static inline int get_dmv(MpegEncContext *s)
+static inline int get_dmv(Mpeg12SliceContext *const s)
 {
-    if (get_bits1(&s->gb))
-        return 1 - (get_bits1(&s->gb) << 1);
+    if (get_bits1(&s->c.gb))
+        return 1 - (get_bits1(&s->c.gb) << 1);
     else
         return 0;
 }
@@ -395,51 +399,51 @@ static inline int get_dmv(MpegEncContext *s)
 #define MT_16X8  2
 #define MT_DMV   3
 
-static int mpeg_decode_mb(MPVContext *const s)
+static int mpeg_decode_mb(Mpeg12SliceContext *const s)
 {
     int i, j, k, cbp, val, mb_type, motion_type;
-    const int mb_block_count = 4 + (1 << s->chroma_format);
+    const int mb_block_count = 4 + (1 << s->c.chroma_format);
     int ret;
 
-    ff_tlog(s->avctx, "decode_mb: x=%d y=%d\n", s->mb_x, s->mb_y);
+    ff_tlog(s->c.avctx, "decode_mb: x=%d y=%d\n", s->c.mb_x, s->c.mb_y);
 
-    av_assert2(s->mb_skipped == 0);
+    av_assert2(s->c.mb_skipped == 0);
 
-    if (s->mb_skip_run-- != 0) {
-        if (s->pict_type == AV_PICTURE_TYPE_P) {
-            s->mb_skipped = 1;
-            s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
+    if (s->c.mb_skip_run-- != 0) {
+        if (s->c.pict_type == AV_PICTURE_TYPE_P) {
+            s->c.mb_skipped = 1;
+            s->c.cur_pic.mb_type[s->c.mb_x + s->c.mb_y * s->c.mb_stride] =
                 MB_TYPE_SKIP | MB_TYPE_FORWARD_MV | MB_TYPE_16x16;
         } else {
             int mb_type;
 
-            if (s->mb_x)
-                mb_type = s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
+            if (s->c.mb_x)
+                mb_type = s->c.cur_pic.mb_type[s->c.mb_x + s->c.mb_y * s->c.mb_stride - 1];
             else
                 // FIXME not sure if this is allowed in MPEG at all
-                mb_type = s->cur_pic.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1];
+                mb_type = s->c.cur_pic.mb_type[s->c.mb_width + (s->c.mb_y - 1) * s->c.mb_stride - 1];
             if (IS_INTRA(mb_type)) {
-                av_log(s->avctx, AV_LOG_ERROR, "skip with previntra\n");
+                av_log(s->c.avctx, AV_LOG_ERROR, "skip with previntra\n");
                 return AVERROR_INVALIDDATA;
             }
-            s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
+            s->c.cur_pic.mb_type[s->c.mb_x + s->c.mb_y * s->c.mb_stride] =
                 mb_type | MB_TYPE_SKIP;
 
-            if ((s->mv[0][0][0] | s->mv[0][0][1] | s->mv[1][0][0] | s->mv[1][0][1]) == 0)
-                s->mb_skipped = 1;
+            if ((s->c.mv[0][0][0] | s->c.mv[0][0][1] | s->c.mv[1][0][0] | s->c.mv[1][0][1]) == 0)
+                s->c.mb_skipped = 1;
         }
 
         return 0;
     }
 
-    switch (s->pict_type) {
+    switch (s->c.pict_type) {
     default:
     case AV_PICTURE_TYPE_I:
-        if (get_bits1(&s->gb) == 0) {
-            if (get_bits1(&s->gb) == 0) {
-                av_log(s->avctx, AV_LOG_ERROR,
+        if (get_bits1(&s->c.gb) == 0) {
+            if (get_bits1(&s->c.gb) == 0) {
+                av_log(s->c.avctx, AV_LOG_ERROR,
                        "Invalid mb type in I-frame at %d %d\n",
-                       s->mb_x, s->mb_y);
+                       s->c.mb_x, s->c.mb_y);
                 return AVERROR_INVALIDDATA;
             }
             mb_type = MB_TYPE_QUANT | MB_TYPE_INTRA;
@@ -448,74 +452,74 @@ static int mpeg_decode_mb(MPVContext *const s)
         }
         break;
     case AV_PICTURE_TYPE_P:
-        mb_type = get_vlc2(&s->gb, ff_mb_ptype_vlc, MB_PTYPE_VLC_BITS, 1);
+        mb_type = get_vlc2(&s->c.gb, ff_mb_ptype_vlc, MB_PTYPE_VLC_BITS, 1);
         if (mb_type < 0) {
-            av_log(s->avctx, AV_LOG_ERROR,
-                   "Invalid mb type in P-frame at %d %d\n", s->mb_x, s->mb_y);
+            av_log(s->c.avctx, AV_LOG_ERROR,
+                   "Invalid mb type in P-frame at %d %d\n", s->c.mb_x, s->c.mb_y);
             return AVERROR_INVALIDDATA;
         }
         break;
     case AV_PICTURE_TYPE_B:
-        mb_type = get_vlc2(&s->gb, ff_mb_btype_vlc, MB_BTYPE_VLC_BITS, 1);
+        mb_type = get_vlc2(&s->c.gb, ff_mb_btype_vlc, MB_BTYPE_VLC_BITS, 1);
         if (mb_type < 0) {
-            av_log(s->avctx, AV_LOG_ERROR,
-                   "Invalid mb type in B-frame at %d %d\n", s->mb_x, s->mb_y);
+            av_log(s->c.avctx, AV_LOG_ERROR,
+                   "Invalid mb type in B-frame at %d %d\n", s->c.mb_x, s->c.mb_y);
             return AVERROR_INVALIDDATA;
         }
         break;
     }
-    ff_tlog(s->avctx, "mb_type=%x\n", mb_type);
+    ff_tlog(s->c.avctx, "mb_type=%x\n", mb_type);
 //    motion_type = 0; /* avoid warning */
     if (IS_INTRA(mb_type)) {
-        s->bdsp.clear_blocks(s->block[0]);
+        s->c.bdsp.clear_blocks(s->c.block[0]);
 
-        if (!s->chroma_y_shift)
-            s->bdsp.clear_blocks(s->block[6]);
+        if (!s->c.chroma_y_shift)
+            s->c.bdsp.clear_blocks(s->c.block[6]);
 
         /* compute DCT type */
         // FIXME: add an interlaced_dct coded var?
-        if (s->picture_structure == PICT_FRAME &&
-            !s->frame_pred_frame_dct)
-            s->interlaced_dct = get_bits1(&s->gb);
+        if (s->c.picture_structure == PICT_FRAME &&
+            !s->c.frame_pred_frame_dct)
+            s->c.interlaced_dct = get_bits1(&s->c.gb);
 
         if (IS_QUANT(mb_type))
-            s->qscale = mpeg_get_qscale(&s->gb, s->q_scale_type);
+            s->c.qscale = mpeg_get_qscale(&s->c.gb, s->c.q_scale_type);
 
-        if (s->concealment_motion_vectors) {
+        if (s->c.concealment_motion_vectors) {
             /* just parse them */
-            if (s->picture_structure != PICT_FRAME)
-                skip_bits1(&s->gb);  /* field select */
-
-            s->mv[0][0][0]      =
-            s->last_mv[0][0][0] =
-            s->last_mv[0][1][0] = mpeg_decode_motion(s, s->mpeg_f_code[0][0],
-                                                     s->last_mv[0][0][0]);
-            s->mv[0][0][1]      =
-            s->last_mv[0][0][1] =
-            s->last_mv[0][1][1] = mpeg_decode_motion(s, s->mpeg_f_code[0][1],
-                                                     s->last_mv[0][0][1]);
-
-            check_marker(s->avctx, &s->gb, "after concealment_motion_vectors");
+            if (s->c.picture_structure != PICT_FRAME)
+                skip_bits1(&s->c.gb);  /* field select */
+
+            s->c.mv[0][0][0]      =
+            s->c.last_mv[0][0][0] =
+            s->c.last_mv[0][1][0] = mpeg_decode_motion(s, s->c.mpeg_f_code[0][0],
+                                                       s->c.last_mv[0][0][0]);
+            s->c.mv[0][0][1]      =
+            s->c.last_mv[0][0][1] =
+            s->c.last_mv[0][1][1] = mpeg_decode_motion(s, s->c.mpeg_f_code[0][1],
+                                                       s->c.last_mv[0][0][1]);
+
+            check_marker(s->c.avctx, &s->c.gb, "after concealment_motion_vectors");
         } else {
             /* reset mv prediction */
-            memset(s->last_mv, 0, sizeof(s->last_mv));
+            memset(s->c.last_mv, 0, sizeof(s->c.last_mv));
         }
-        s->mb_intra = 1;
+        s->c.mb_intra = 1;
 
-        if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
+        if (s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO) {
             for (i = 0; i < mb_block_count; i++)
-                if ((ret = mpeg2_decode_block_intra(s, s->block[i], i)) < 0)
+                if ((ret = mpeg2_decode_block_intra(s, s->c.block[i], i)) < 0)
                     return ret;
         } else {
             for (i = 0; i < 6; i++) {
-                ret = ff_mpeg1_decode_block_intra(&s->gb,
-                                                  s->intra_matrix,
-                                                  s->intra_scantable.permutated,
-                                                  s->last_dc, s->block[i],
-                                                  i, s->qscale);
+                ret = ff_mpeg1_decode_block_intra(&s->c.gb,
+                                                  s->c.intra_matrix,
+                                                  s->c.intra_scantable.permutated,
+                                                  s->c.last_dc, s->c.block[i],
+                                                  i, s->c.qscale);
                 if (ret < 0) {
-                    av_log(s->avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",
-                           s->mb_x, s->mb_y);
+                    av_log(s->c.avctx, AV_LOG_ERROR, "ac-tex damaged at %d %d\n",
+                           s->c.mb_x, s->c.mb_y);
                     return ret;
                 }
             }
@@ -524,83 +528,83 @@ static int mpeg_decode_mb(MPVContext *const s)
         if (mb_type & MB_TYPE_ZERO_MV) {
             av_assert2(mb_type & MB_TYPE_CBP);
 
-            s->mv_dir = MV_DIR_FORWARD;
-            if (s->picture_structure == PICT_FRAME) {
-                if (s->picture_structure == PICT_FRAME
-                    && !s->frame_pred_frame_dct)
-                    s->interlaced_dct = get_bits1(&s->gb);
-                s->mv_type = MV_TYPE_16X16;
+            s->c.mv_dir = MV_DIR_FORWARD;
+            if (s->c.picture_structure == PICT_FRAME) {
+                if (s->c.picture_structure == PICT_FRAME
+                    && !s->c.frame_pred_frame_dct)
+                    s->c.interlaced_dct = get_bits1(&s->c.gb);
+                s->c.mv_type = MV_TYPE_16X16;
             } else {
-                s->mv_type            = MV_TYPE_FIELD;
+                s->c.mv_type            = MV_TYPE_FIELD;
                 mb_type              |= MB_TYPE_INTERLACED;
-                s->field_select[0][0] = s->picture_structure - 1;
+                s->c.field_select[0][0] = s->c.picture_structure - 1;
             }
 
             if (IS_QUANT(mb_type))
-                s->qscale = mpeg_get_qscale(&s->gb, s->q_scale_type);
-
-            s->last_mv[0][0][0] = 0;
-            s->last_mv[0][0][1] = 0;
-            s->last_mv[0][1][0] = 0;
-            s->last_mv[0][1][1] = 0;
-            s->mv[0][0][0]      = 0;
-            s->mv[0][0][1]      = 0;
+                s->c.qscale = mpeg_get_qscale(&s->c.gb, s->c.q_scale_type);
+
+            s->c.last_mv[0][0][0] = 0;
+            s->c.last_mv[0][0][1] = 0;
+            s->c.last_mv[0][1][0] = 0;
+            s->c.last_mv[0][1][1] = 0;
+            s->c.mv[0][0][0]      = 0;
+            s->c.mv[0][0][1]      = 0;
         } else {
             av_assert2(mb_type & MB_TYPE_BIDIR_MV);
             // FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
             /* get additional motion vector type */
-            if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) {
+            if (s->c.picture_structure == PICT_FRAME && s->c.frame_pred_frame_dct) {
                 motion_type = MT_FRAME;
             } else {
-                motion_type = get_bits(&s->gb, 2);
-                if (s->picture_structure == PICT_FRAME && HAS_CBP(mb_type))
-                    s->interlaced_dct = get_bits1(&s->gb);
+                motion_type = get_bits(&s->c.gb, 2);
+                if (s->c.picture_structure == PICT_FRAME && HAS_CBP(mb_type))
+                    s->c.interlaced_dct = get_bits1(&s->c.gb);
             }
 
             if (IS_QUANT(mb_type))
-                s->qscale = mpeg_get_qscale(&s->gb, s->q_scale_type);
+                s->c.qscale = mpeg_get_qscale(&s->c.gb, s->c.q_scale_type);
 
             /* motion vectors */
-            s->mv_dir = MB_TYPE_MV_2_MV_DIR(mb_type);
-            ff_tlog(s->avctx, "motion_type=%d\n", motion_type);
+            s->c.mv_dir = MB_TYPE_MV_2_MV_DIR(mb_type);
+            ff_tlog(s->c.avctx, "motion_type=%d\n", motion_type);
             switch (motion_type) {
             case MT_FRAME: /* or MT_16X8 */
-                if (s->picture_structure == PICT_FRAME) {
+                if (s->c.picture_structure == PICT_FRAME) {
                     mb_type   |= MB_TYPE_16x16;
-                    s->mv_type = MV_TYPE_16X16;
+                    s->c.mv_type = MV_TYPE_16X16;
                     for (i = 0; i < 2; i++) {
                         if (HAS_MV(mb_type, i)) {
                             /* MT_FRAME */
-                            s->mv[i][0][0]      =
-                            s->last_mv[i][0][0] =
-                            s->last_mv[i][1][0] =
-                                mpeg_decode_motion(s, s->mpeg_f_code[i][0],
-                                                   s->last_mv[i][0][0]);
-                            s->mv[i][0][1]      =
-                            s->last_mv[i][0][1] =
-                            s->last_mv[i][1][1] =
-                                mpeg_decode_motion(s, s->mpeg_f_code[i][1],
-                                                   s->last_mv[i][0][1]);
+                            s->c.mv[i][0][0]      =
+                            s->c.last_mv[i][0][0] =
+                            s->c.last_mv[i][1][0] =
+                                mpeg_decode_motion(s, s->c.mpeg_f_code[i][0],
+                                                   s->c.last_mv[i][0][0]);
+                            s->c.mv[i][0][1]      =
+                            s->c.last_mv[i][0][1] =
+                            s->c.last_mv[i][1][1] =
+                                mpeg_decode_motion(s, s->c.mpeg_f_code[i][1],
+                                                   s->c.last_mv[i][0][1]);
                             /* full_pel: only for MPEG-1 */
-                            if (s->full_pel[i]) {
-                                s->mv[i][0][0] *= 2;
-                                s->mv[i][0][1] *= 2;
+                            if (s->c.full_pel[i]) {
+                                s->c.mv[i][0][0] *= 2;
+                                s->c.mv[i][0][1] *= 2;
                             }
                         }
                     }
                 } else {
                     mb_type   |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
-                    s->mv_type = MV_TYPE_16X8;
+                    s->c.mv_type = MV_TYPE_16X8;
                     for (i = 0; i < 2; i++) {
                         if (HAS_MV(mb_type, i)) {
                             /* MT_16X8 */
                             for (j = 0; j < 2; j++) {
-                                s->field_select[i][j] = get_bits1(&s->gb);
+                                s->c.field_select[i][j] = get_bits1(&s->c.gb);
                                 for (k = 0; k < 2; k++) {
-                                    val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
-                                                             s->last_mv[i][j][k]);
-                                    s->last_mv[i][j][k] = val;
-                                    s->mv[i][j][k]      = val;
+                                    val = mpeg_decode_motion(s, s->c.mpeg_f_code[i][k],
+                                                             s->c.last_mv[i][j][k]);
+                                    s->c.last_mv[i][j][k] = val;
+                                    s->c.mv[i][j][k]      = val;
                                 }
                             }
                         }
@@ -608,151 +612,151 @@ static int mpeg_decode_mb(MPVContext *const s)
                 }
                 break;
             case MT_FIELD:
-                s->mv_type = MV_TYPE_FIELD;
-                if (s->picture_structure == PICT_FRAME) {
+                s->c.mv_type = MV_TYPE_FIELD;
+                if (s->c.picture_structure == PICT_FRAME) {
                     mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
                     for (i = 0; i < 2; i++) {
                         if (HAS_MV(mb_type, i)) {
                             for (j = 0; j < 2; j++) {
-                                s->field_select[i][j] = get_bits1(&s->gb);
-                                val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
-                                                         s->last_mv[i][j][0]);
-                                s->last_mv[i][j][0] = val;
-                                s->mv[i][j][0]      = val;
-                                ff_tlog(s->avctx, "fmx=%d\n", val);
-                                val = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
-                                                         s->last_mv[i][j][1] >> 1);
-                                s->last_mv[i][j][1] = 2 * val;
-                                s->mv[i][j][1]      = val;
-                                ff_tlog(s->avctx, "fmy=%d\n", val);
+                                s->c.field_select[i][j] = get_bits1(&s->c.gb);
+                                val = mpeg_decode_motion(s, s->c.mpeg_f_code[i][0],
+                                                         s->c.last_mv[i][j][0]);
+                                s->c.last_mv[i][j][0] = val;
+                                s->c.mv[i][j][0]      = val;
+                                ff_tlog(s->c.avctx, "fmx=%d\n", val);
+                                val = mpeg_decode_motion(s, s->c.mpeg_f_code[i][1],
+                                                         s->c.last_mv[i][j][1] >> 1);
+                                s->c.last_mv[i][j][1] = 2 * val;
+                                s->c.mv[i][j][1]      = val;
+                                ff_tlog(s->c.avctx, "fmy=%d\n", val);
                             }
                         }
                     }
                 } else {
-                    av_assert0(!s->progressive_sequence);
+                    av_assert0(!s->c.progressive_sequence);
                     mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
                     for (i = 0; i < 2; i++) {
                         if (HAS_MV(mb_type, i)) {
-                            s->field_select[i][0] = get_bits1(&s->gb);
+                            s->c.field_select[i][0] = get_bits1(&s->c.gb);
                             for (k = 0; k < 2; k++) {
-                                val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
-                                                         s->last_mv[i][0][k]);
-                                s->last_mv[i][0][k] = val;
-                                s->last_mv[i][1][k] = val;
-                                s->mv[i][0][k]      = val;
+                                val = mpeg_decode_motion(s, s->c.mpeg_f_code[i][k],
+                                                         s->c.last_mv[i][0][k]);
+                                s->c.last_mv[i][0][k] = val;
+                                s->c.last_mv[i][1][k] = val;
+                                s->c.mv[i][0][k]      = val;
                             }
                         }
                     }
                 }
                 break;
             case MT_DMV:
-                if (s->progressive_sequence){
-                    av_log(s->avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
+                if (s->c.progressive_sequence){
+                    av_log(s->c.avctx, AV_LOG_ERROR, "MT_DMV in progressive_sequence\n");
                     return AVERROR_INVALIDDATA;
                 }
-                s->mv_type = MV_TYPE_DMV;
+                s->c.mv_type = MV_TYPE_DMV;
                 for (i = 0; i < 2; i++) {
                     if (HAS_MV(mb_type, i)) {
                         int dmx, dmy, mx, my, m;
-                        const int my_shift = s->picture_structure == PICT_FRAME;
+                        const int my_shift = s->c.picture_structure == PICT_FRAME;
 
-                        mx = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
-                                                s->last_mv[i][0][0]);
-                        s->last_mv[i][0][0] = mx;
-                        s->last_mv[i][1][0] = mx;
+                        mx = mpeg_decode_motion(s, s->c.mpeg_f_code[i][0],
+                                                s->c.last_mv[i][0][0]);
+                        s->c.last_mv[i][0][0] = mx;
+                        s->c.last_mv[i][1][0] = mx;
                         dmx = get_dmv(s);
-                        my  = mpeg_decode_motion(s, s->mpeg_f_code[i][1],
-                                                 s->last_mv[i][0][1] >> my_shift);
+                        my  = mpeg_decode_motion(s, s->c.mpeg_f_code[i][1],
+                                                 s->c.last_mv[i][0][1] >> my_shift);
                         dmy = get_dmv(s);
 
 
-                        s->last_mv[i][0][1] = my * (1 << my_shift);
-                        s->last_mv[i][1][1] = my * (1 << my_shift);
+                        s->c.last_mv[i][0][1] = my * (1 << my_shift);
+                        s->c.last_mv[i][1][1] = my * (1 << my_shift);
 
-                        s->mv[i][0][0] = mx;
-                        s->mv[i][0][1] = my;
-                        s->mv[i][1][0] = mx; // not used
-                        s->mv[i][1][1] = my; // not used
+                        s->c.mv[i][0][0] = mx;
+                        s->c.mv[i][0][1] = my;
+                        s->c.mv[i][1][0] = mx; // not used
+                        s->c.mv[i][1][1] = my; // not used
 
-                        if (s->picture_structure == PICT_FRAME) {
+                        if (s->c.picture_structure == PICT_FRAME) {
                             mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
 
-                            // m = 1 + 2 * s->top_field_first;
-                            m = s->top_field_first ? 1 : 3;
+                            // m = 1 + 2 * s->c.top_field_first;
+                            m = s->c.top_field_first ? 1 : 3;
 
                             /* top -> top pred */
-                            s->mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
-                            s->mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
+                            s->c.mv[i][2][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
+                            s->c.mv[i][2][1] = ((my * m + (my > 0)) >> 1) + dmy - 1;
                             m = 4 - m;
-                            s->mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
-                            s->mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
+                            s->c.mv[i][3][0] = ((mx * m + (mx > 0)) >> 1) + dmx;
+                            s->c.mv[i][3][1] = ((my * m + (my > 0)) >> 1) + dmy + 1;
                         } else {
                             mb_type |= MB_TYPE_16x16;
 
-                            s->mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
-                            s->mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
-                            if (s->picture_structure == PICT_TOP_FIELD)
-                                s->mv[i][2][1]--;
+                            s->c.mv[i][2][0] = ((mx + (mx > 0)) >> 1) + dmx;
+                            s->c.mv[i][2][1] = ((my + (my > 0)) >> 1) + dmy;
+                            if (s->c.picture_structure == PICT_TOP_FIELD)
+                                s->c.mv[i][2][1]--;
                             else
-                                s->mv[i][2][1]++;
+                                s->c.mv[i][2][1]++;
                         }
                     }
                 }
                 break;
             default:
-                av_log(s->avctx, AV_LOG_ERROR,
-                       "00 motion_type at %d %d\n", s->mb_x, s->mb_y);
+                av_log(s->c.avctx, AV_LOG_ERROR,
+                       "00 motion_type at %d %d\n", s->c.mb_x, s->c.mb_y);
                 return AVERROR_INVALIDDATA;
             }
         }
 
-        s->mb_intra = 0;
-        s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 128 << s->intra_dc_precision;
+        s->c.mb_intra = 0;
+        s->c.last_dc[0] = s->c.last_dc[1] = s->c.last_dc[2] = 128 << s->c.intra_dc_precision;
         if (HAS_CBP(mb_type)) {
-            s->bdsp.clear_blocks(s->block[0]);
+            s->c.bdsp.clear_blocks(s->c.block[0]);
 
-            cbp = get_vlc2(&s->gb, ff_mb_pat_vlc, MB_PAT_VLC_BITS, 1);
+            cbp = get_vlc2(&s->c.gb, ff_mb_pat_vlc, MB_PAT_VLC_BITS, 1);
             if (mb_block_count > 6) {
                 cbp *= 1 << mb_block_count - 6;
-                cbp  |= get_bits(&s->gb, mb_block_count - 6);
-                s->bdsp.clear_blocks(s->block[6]);
+                cbp  |= get_bits(&s->c.gb, mb_block_count - 6);
+                s->c.bdsp.clear_blocks(s->c.block[6]);
             }
             if (cbp <= 0) {
-                av_log(s->avctx, AV_LOG_ERROR,
-                       "invalid cbp %d at %d %d\n", cbp, s->mb_x, s->mb_y);
+                av_log(s->c.avctx, AV_LOG_ERROR,
+                       "invalid cbp %d at %d %d\n", cbp, s->c.mb_x, s->c.mb_y);
                 return AVERROR_INVALIDDATA;
             }
 
-            if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
+            if (s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO) {
                 cbp <<= 12 - mb_block_count;
 
                 for (i = 0; i < mb_block_count; i++) {
                     if (cbp & (1 << 11)) {
-                        if ((ret = mpeg2_decode_block_non_intra(s, s->block[i], i)) < 0)
+                        if ((ret = mpeg2_decode_block_non_intra(s, s->c.block[i], i)) < 0)
                             return ret;
                     } else {
-                        s->block_last_index[i] = -1;
+                        s->c.block_last_index[i] = -1;
                     }
                     cbp += cbp;
                 }
             } else {
                 for (i = 0; i < 6; i++) {
                     if (cbp & 32) {
-                        if ((ret = mpeg1_decode_block_inter(s, s->block[i], i)) < 0)
+                        if ((ret = mpeg1_decode_block_inter(s, s->c.block[i], i)) < 0)
                             return ret;
                     } else {
-                        s->block_last_index[i] = -1;
+                        s->c.block_last_index[i] = -1;
                     }
                     cbp += cbp;
                 }
             }
         } else {
             for (i = 0; i < 12; i++)
-                s->block_last_index[i] = -1;
+                s->c.block_last_index[i] = -1;
         }
     }
 
-    s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
+    s->c.cur_pic.mb_type[s->c.mb_x + s->c.mb_y * s->c.mb_stride] = mb_type;
 
     return 0;
 }
@@ -760,9 +764,10 @@ static int mpeg_decode_mb(MPVContext *const s)
 static av_cold int mpeg_decode_init(AVCodecContext *avctx)
 {
     Mpeg1Context *s    = avctx->priv_data;
-    MpegEncContext *s2 = &s->mpeg_enc_ctx;
+    MPVContext *const s2 = &s->slice.c;
     int ret;
 
+    s2->slice_ctx_size = sizeof(s->slice);
     s2->out_format = FMT_MPEG1;
 
     if (   avctx->codec_tag != AV_RL32("VCR2")
@@ -830,7 +835,7 @@ static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
 static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVContext *const s = &s1->slice.c;
     const enum AVPixelFormat *pix_fmts;
 
     if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
@@ -853,7 +858,7 @@ static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
 static int mpeg_decode_postinit(AVCodecContext *avctx)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVContext *const s = &s1->slice.c;
     int ret;
 
     if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
@@ -969,7 +974,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
                                 int buf_size)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVContext *const s = &s1->slice.c;
     GetBitContext gb0, *const gb = &gb0;
     int ref, f_code, vbv_delay, ret;
 
@@ -1014,7 +1019,7 @@ static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
 static void mpeg_decode_sequence_extension(Mpeg1Context *const s1,
                                            GetBitContext *const gb)
 {
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVContext *const s = &s1->slice.c;
     int horiz_size_ext, vert_size_ext;
     int bit_rate_ext;
 
@@ -1058,7 +1063,7 @@ static void mpeg_decode_sequence_extension(Mpeg1Context *const s1,
 static void mpeg_decode_sequence_display_extension(Mpeg1Context *const s1,
                                                    GetBitContext *const gb)
 {
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVContext *const s = &s1->slice.c;
     int color_description, w, h;
 
     skip_bits(gb, 3); /* video format */
@@ -1083,7 +1088,7 @@ static void mpeg_decode_sequence_display_extension(Mpeg1Context *const s1,
 static void mpeg_decode_picture_display_extension(Mpeg1Context *const s1,
                                                   GetBitContext *const gb)
 {
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVContext *const s = &s1->slice.c;
     int i, nofco;
 
     nofco = 1;
@@ -1156,7 +1161,7 @@ static void mpeg_decode_quant_matrix_extension(MPVContext *const s,
 static int mpeg_decode_picture_coding_extension(Mpeg1Context *const s1,
                                                 GetBitContext *const gb)
 {
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVContext *const s = &s1->slice.c;
 
     s->full_pel[0]       = s->full_pel[1] = 0;
     s->mpeg_f_code[0][0] = get_bits(gb, 4);
@@ -1214,7 +1219,7 @@ static int mpeg_decode_picture_coding_extension(Mpeg1Context *const s1,
 
 static int mpeg_field_start(Mpeg1Context *s1, const uint8_t *buf, int buf_size)
 {
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVContext *const s = &s1->slice.c;
     AVCodecContext *avctx = s->avctx;
     int second_field = 0;
     int ret;
@@ -1343,69 +1348,69 @@ static int mpeg_field_start(Mpeg1Context *s1, const uint8_t *buf, int buf_size)
 
 /**
  * Decode a slice.
- * MpegEncContext.mb_y must be set to the MB row from the startcode.
+ * Mpeg12SliceContext.c.mb_y must be set to the MB row from the startcode.
  * @return DECODE_SLICE_ERROR if the slice is damaged,
  *         DECODE_SLICE_OK if this slice is OK
  */
-static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
+static int mpeg_decode_slice(Mpeg12SliceContext *const s, int mb_y,
                              const uint8_t **buf, int buf_size)
 {
-    AVCodecContext *avctx = s->avctx;
-    const int lowres      = s->avctx->lowres;
-    const int field_pic   = s->picture_structure != PICT_FRAME;
+    AVCodecContext *avctx = s->c.avctx;
+    const int lowres      = s->c.avctx->lowres;
+    const int field_pic   = s->c.picture_structure != PICT_FRAME;
     int ret;
 
-    s->resync_mb_x =
-    s->resync_mb_y = -1;
+    s->c.resync_mb_x =
+    s->c.resync_mb_y = -1;
 
-    av_assert0(mb_y < s->mb_height);
+    av_assert0(mb_y < s->c.mb_height);
 
-    ret = init_get_bits8(&s->gb, *buf, buf_size);
+    ret = init_get_bits8(&s->c.gb, *buf, buf_size);
     if (ret < 0)
         return ret;
 
-    if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
-        skip_bits(&s->gb, 3);
+    if (s->c.codec_id != AV_CODEC_ID_MPEG1VIDEO && s->c.mb_height > 2800/16)
+        skip_bits(&s->c.gb, 3);
 
-    ff_mpeg1_clean_buffers(s);
-    s->interlaced_dct = 0;
+    ff_mpeg1_clean_buffers(&s->c);
+    s->c.interlaced_dct = 0;
 
-    s->qscale = mpeg_get_qscale(&s->gb, s->q_scale_type);
+    s->c.qscale = mpeg_get_qscale(&s->c.gb, s->c.q_scale_type);
 
-    if (s->qscale == 0) {
-        av_log(s->avctx, AV_LOG_ERROR, "qscale == 0\n");
+    if (s->c.qscale == 0) {
+        av_log(s->c.avctx, AV_LOG_ERROR, "qscale == 0\n");
         return AVERROR_INVALIDDATA;
     }
 
     /* extra slice info */
-    if (skip_1stop_8data_bits(&s->gb) < 0)
+    if (skip_1stop_8data_bits(&s->c.gb) < 0)
         return AVERROR_INVALIDDATA;
 
-    s->mb_x = 0;
+    s->c.mb_x = 0;
 
-    if (mb_y == 0 && s->codec_tag == AV_RL32("SLIF")) {
-        skip_bits1(&s->gb);
+    if (mb_y == 0 && s->c.codec_tag == AV_RL32("SLIF")) {
+        skip_bits1(&s->c.gb);
     } else {
-        while (get_bits_left(&s->gb) > 0) {
-            int code = get_vlc2(&s->gb, ff_mbincr_vlc,
+        while (get_bits_left(&s->c.gb) > 0) {
+            int code = get_vlc2(&s->c.gb, ff_mbincr_vlc,
                                 MBINCR_VLC_BITS, 2);
             if (code < 0) {
-                av_log(s->avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
+                av_log(s->c.avctx, AV_LOG_ERROR, "first mb_incr damaged\n");
                 return AVERROR_INVALIDDATA;
             }
             if (code >= 33) {
                 if (code == 33)
-                    s->mb_x += 33;
+                    s->c.mb_x += 33;
                 /* otherwise, stuffing, nothing to do */
             } else {
-                s->mb_x += code;
+                s->c.mb_x += code;
                 break;
             }
         }
     }
 
-    if (s->mb_x >= (unsigned) s->mb_width) {
-        av_log(s->avctx, AV_LOG_ERROR, "initial skip overflow\n");
+    if (s->c.mb_x >= (unsigned) s->c.mb_width) {
+        av_log(s->c.avctx, AV_LOG_ERROR, "initial skip overflow\n");
         return AVERROR_INVALIDDATA;
     }
 
@@ -1415,36 +1420,36 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
         buf_end = avpriv_find_start_code(buf_start + 2, *buf + buf_size, &start_code);
         if (buf_end < *buf + buf_size)
             buf_end -= 4;
-        s->mb_y = mb_y;
+        s->c.mb_y = mb_y;
         if (FF_HW_CALL(avctx, decode_slice, buf_start, buf_end - buf_start) < 0)
             return DECODE_SLICE_ERROR;
         *buf = buf_end;
         return DECODE_SLICE_OK;
     }
 
-    s->resync_mb_x = s->mb_x;
-    s->resync_mb_y = s->mb_y = mb_y;
-    s->mb_skip_run = 0;
-    ff_init_block_index(s);
+    s->c.resync_mb_x = s->c.mb_x;
+    s->c.resync_mb_y = s->c.mb_y = mb_y;
+    s->c.mb_skip_run = 0;
+    ff_init_block_index(&s->c);
 
-    if (s->mb_y == 0 && s->mb_x == 0 && (s->first_field || s->picture_structure == PICT_FRAME)) {
-        if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
-            av_log(s->avctx, AV_LOG_DEBUG,
+    if (s->c.mb_y == 0 && s->c.mb_x == 0 && (s->c.first_field || s->c.picture_structure == PICT_FRAME)) {
+        if (s->c.avctx->debug & FF_DEBUG_PICT_INFO) {
+            av_log(s->c.avctx, AV_LOG_DEBUG,
                    "qp:%d fc:%2d%2d%2d%2d %c %s %s %s %s dc:%d pstruct:%d fdct:%d cmv:%d qtype:%d ivlc:%d rff:%d %s\n",
-                   s->qscale,
-                   s->mpeg_f_code[0][0], s->mpeg_f_code[0][1],
-                   s->mpeg_f_code[1][0], s->mpeg_f_code[1][1],
-                   s->pict_type  == AV_PICTURE_TYPE_I ? 'I' :
-                   (s->pict_type == AV_PICTURE_TYPE_P ? 'P' :
-                   (s->pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
-                   s->progressive_sequence ? "ps"  : "",
-                   s->progressive_frame    ? "pf"  : "",
-                   s->alternate_scan       ? "alt" : "",
-                   s->top_field_first      ? "top" : "",
-                   s->intra_dc_precision, s->picture_structure,
-                   s->frame_pred_frame_dct, s->concealment_motion_vectors,
-                   s->q_scale_type, s->intra_vlc_format,
-                   s->repeat_first_field, s->chroma_420_type ? "420" : "");
+                   s->c.qscale,
+                   s->c.mpeg_f_code[0][0], s->c.mpeg_f_code[0][1],
+                   s->c.mpeg_f_code[1][0], s->c.mpeg_f_code[1][1],
+                   s->c.pict_type  == AV_PICTURE_TYPE_I ? 'I' :
+                   (s->c.pict_type == AV_PICTURE_TYPE_P ? 'P' :
+                   (s->c.pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
+                   s->c.progressive_sequence ? "ps"  : "",
+                   s->c.progressive_frame    ? "pf"  : "",
+                   s->c.alternate_scan       ? "alt" : "",
+                   s->c.top_field_first      ? "top" : "",
+                   s->c.intra_dc_precision, s->c.picture_structure,
+                   s->c.frame_pred_frame_dct, s->c.concealment_motion_vectors,
+                   s->c.q_scale_type, s->c.intra_vlc_format,
+                   s->c.repeat_first_field, s->c.chroma_420_type ? "420" : "");
         }
     }
 
@@ -1454,67 +1459,67 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
             return ret;
 
         // Note motion_val is normally NULL unless we want to extract the MVs.
-        if (s->cur_pic.motion_val[0]) {
-            const int wrap = s->b8_stride;
-            int xy         = s->mb_x * 2 + s->mb_y * 2 * wrap;
-            int b8_xy      = 4 * (s->mb_x + s->mb_y * s->mb_stride);
+        if (s->c.cur_pic.motion_val[0]) {
+            const int wrap = s->c.b8_stride;
+            int xy         = s->c.mb_x * 2 + s->c.mb_y * 2 * wrap;
+            int b8_xy      = 4 * (s->c.mb_x + s->c.mb_y * s->c.mb_stride);
             int motion_x, motion_y, dir, i;
 
             for (i = 0; i < 2; i++) {
                 for (dir = 0; dir < 2; dir++) {
-                    if (s->mb_intra ||
-                        (dir == 1 && s->pict_type != AV_PICTURE_TYPE_B)) {
+                    if (s->c.mb_intra ||
+                        (dir == 1 && s->c.pict_type != AV_PICTURE_TYPE_B)) {
                         motion_x = motion_y = 0;
-                    } else if (s->mv_type == MV_TYPE_16X16 ||
-                               (s->mv_type == MV_TYPE_FIELD && field_pic)) {
-                        motion_x = s->mv[dir][0][0];
-                        motion_y = s->mv[dir][0][1];
-                    } else { /* if ((s->mv_type == MV_TYPE_FIELD) || (s->mv_type == MV_TYPE_16X8)) */
-                        motion_x = s->mv[dir][i][0];
-                        motion_y = s->mv[dir][i][1];
+                    } else if (s->c.mv_type == MV_TYPE_16X16 ||
+                               (s->c.mv_type == MV_TYPE_FIELD && field_pic)) {
+                        motion_x = s->c.mv[dir][0][0];
+                        motion_y = s->c.mv[dir][0][1];
+                    } else { /* if ((s->c.mv_type == MV_TYPE_FIELD) || (s->c.mv_type == MV_TYPE_16X8)) */
+                        motion_x = s->c.mv[dir][i][0];
+                        motion_y = s->c.mv[dir][i][1];
                     }
 
-                    s->cur_pic.motion_val[dir][xy][0]     = motion_x;
-                    s->cur_pic.motion_val[dir][xy][1]     = motion_y;
-                    s->cur_pic.motion_val[dir][xy + 1][0] = motion_x;
-                    s->cur_pic.motion_val[dir][xy + 1][1] = motion_y;
-                    s->cur_pic.ref_index [dir][b8_xy]     =
-                    s->cur_pic.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
-                    av_assert2(s->field_select[dir][i] == 0 ||
-                               s->field_select[dir][i] == 1);
+                    s->c.cur_pic.motion_val[dir][xy][0]     = motion_x;
+                    s->c.cur_pic.motion_val[dir][xy][1]     = motion_y;
+                    s->c.cur_pic.motion_val[dir][xy + 1][0] = motion_x;
+                    s->c.cur_pic.motion_val[dir][xy + 1][1] = motion_y;
+                    s->c.cur_pic.ref_index [dir][b8_xy]     =
+                    s->c.cur_pic.ref_index [dir][b8_xy + 1] = s->c.field_select[dir][i];
+                    av_assert2(s->c.field_select[dir][i] == 0 ||
+                               s->c.field_select[dir][i] == 1);
                 }
                 xy    += wrap;
                 b8_xy += 2;
             }
         }
 
-        s->dest[0] += 16 >> lowres;
-        s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
-        s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
+        s->c.dest[0] += 16 >> lowres;
+        s->c.dest[1] +=(16 >> lowres) >> s->c.chroma_x_shift;
+        s->c.dest[2] +=(16 >> lowres) >> s->c.chroma_x_shift;
 
-        ff_mpv_reconstruct_mb(s, s->block);
+        ff_mpv_reconstruct_mb(&s->c, s->c.block);
 
-        if (++s->mb_x >= s->mb_width) {
-            const int mb_size = 16 >> s->avctx->lowres;
+        if (++s->c.mb_x >= s->c.mb_width) {
+            const int mb_size = 16 >> s->c.avctx->lowres;
             int left;
 
-            ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size);
+            ff_mpeg_draw_horiz_band(&s->c, mb_size * (s->c.mb_y >> field_pic), mb_size);
 
-            s->mb_x  = 0;
-            s->mb_y += 1 << field_pic;
+            s->c.mb_x  = 0;
+            s->c.mb_y += 1 << field_pic;
 
-            if (s->mb_y >= s->mb_height) {
-                int left   = get_bits_left(&s->gb);
-                int is_d10 = s->chroma_format == CHROMA_422 &&
-                             s->pict_type == AV_PICTURE_TYPE_I &&
+            if (s->c.mb_y >= s->c.mb_height) {
+                int left   = get_bits_left(&s->c.gb);
+                int is_d10 = s->c.chroma_format == CHROMA_422 &&
+                             s->c.pict_type == AV_PICTURE_TYPE_I &&
                              avctx->profile == 0 && avctx->level == 5 &&
-                             s->intra_dc_precision == 2 &&
-                             s->q_scale_type == 1 && s->alternate_scan == 0 &&
-                             s->progressive_frame == 0
+                             s->c.intra_dc_precision == 2 &&
+                             s->c.q_scale_type == 1 && s->c.alternate_scan == 0 &&
+                             s->c.progressive_frame == 0
                              /* vbv_delay == 0xBBB || 0xE10 */;
 
                 if (left >= 32 && !is_d10) {
-                    GetBitContext gb = s->gb;
+                    GetBitContext gb = s->c.gb;
                     align_get_bits(&gb);
                     if (show_bits(&gb, 24) == 0x060E2B) {
                         av_log(avctx, AV_LOG_DEBUG, "Invalid MXF data found in video stream\n");
@@ -1527,10 +1532,10 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
                 }
 
                 if (left < 0 ||
-                    (left && show_bits(&s->gb, FFMIN(left, 23)) && !is_d10) ||
+                    (left && show_bits(&s->c.gb, FFMIN(left, 23)) && !is_d10) ||
                     ((avctx->err_recognition & (AV_EF_BITSTREAM | AV_EF_AGGRESSIVE)) && left > 8)) {
                     av_log(avctx, AV_LOG_ERROR, "end mismatch left=%d %0X at %d %d\n",
-                           left, left>0 ? show_bits(&s->gb, FFMIN(left, 23)) : 0, s->mb_x, s->mb_y);
+                           left, left>0 ? show_bits(&s->c.gb, FFMIN(left, 23)) : 0, s->c.mb_x, s->c.mb_y);
                     return AVERROR_INVALIDDATA;
                 } else
                     goto eos;
@@ -1539,136 +1544,136 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
             // in cases where the slice is completely outside the visible
             // area, we detect this here instead of running into the end expecting
             // more data
-            left = get_bits_left(&s->gb);
-            if (s->mb_y >= ((s->height + 15) >> 4) &&
-                !s->progressive_sequence &&
+            left = get_bits_left(&s->c.gb);
+            if (s->c.mb_y >= ((s->c.height + 15) >> 4) &&
+                !s->c.progressive_sequence &&
                 left <= 25 &&
                 left >= 0 &&
-                s->mb_skip_run == -1 &&
-                (!left || show_bits(&s->gb, left) == 0))
+                s->c.mb_skip_run == -1 &&
+                (!left || show_bits(&s->c.gb, left) == 0))
                 goto eos;
 
-            ff_init_block_index(s);
+            ff_init_block_index(&s->c);
         }
 
         /* skip mb handling */
-        if (s->mb_skip_run == -1) {
+        if (s->c.mb_skip_run == -1) {
             /* read increment again */
-            s->mb_skip_run = 0;
+            s->c.mb_skip_run = 0;
             for (;;) {
-                int code = get_vlc2(&s->gb, ff_mbincr_vlc,
+                int code = get_vlc2(&s->c.gb, ff_mbincr_vlc,
                                     MBINCR_VLC_BITS, 2);
                 if (code < 0) {
-                    av_log(s->avctx, AV_LOG_ERROR, "mb incr damaged\n");
+                    av_log(s->c.avctx, AV_LOG_ERROR, "mb incr damaged\n");
                     return AVERROR_INVALIDDATA;
                 }
                 if (code >= 33) {
                     if (code == 33) {
-                        s->mb_skip_run += 33;
+                        s->c.mb_skip_run += 33;
                     } else if (code == 35) {
-                        if (s->mb_skip_run != 0 || show_bits(&s->gb, 15) != 0) {
-                            av_log(s->avctx, AV_LOG_ERROR, "slice mismatch\n");
+                        if (s->c.mb_skip_run != 0 || show_bits(&s->c.gb, 15) != 0) {
+                            av_log(s->c.avctx, AV_LOG_ERROR, "slice mismatch\n");
                             return AVERROR_INVALIDDATA;
                         }
                         goto eos; /* end of slice */
                     }
                     /* otherwise, stuffing, nothing to do */
                 } else {
-                    s->mb_skip_run += code;
+                    s->c.mb_skip_run += code;
                     break;
                 }
             }
-            if (s->mb_skip_run) {
+            if (s->c.mb_skip_run) {
                 int i;
-                if (s->pict_type == AV_PICTURE_TYPE_I) {
-                    av_log(s->avctx, AV_LOG_ERROR,
-                           "skipped MB in I-frame at %d %d\n", s->mb_x, s->mb_y);
+                if (s->c.pict_type == AV_PICTURE_TYPE_I) {
+                    av_log(s->c.avctx, AV_LOG_ERROR,
+                           "skipped MB in I-frame at %d %d\n", s->c.mb_x, s->c.mb_y);
                     return AVERROR_INVALIDDATA;
                 }
 
                 /* skip mb */
-                s->mb_intra = 0;
+                s->c.mb_intra = 0;
                 for (i = 0; i < 12; i++)
-                    s->block_last_index[i] = -1;
-                s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 128 << s->intra_dc_precision;
-                if (s->picture_structure == PICT_FRAME)
-                    s->mv_type = MV_TYPE_16X16;
+                    s->c.block_last_index[i] = -1;
+                s->c.last_dc[0] = s->c.last_dc[1] = s->c.last_dc[2] = 128 << s->c.intra_dc_precision;
+                if (s->c.picture_structure == PICT_FRAME)
+                    s->c.mv_type = MV_TYPE_16X16;
                 else
-                    s->mv_type = MV_TYPE_FIELD;
-                if (s->pict_type == AV_PICTURE_TYPE_P) {
+                    s->c.mv_type = MV_TYPE_FIELD;
+                if (s->c.pict_type == AV_PICTURE_TYPE_P) {
                     /* if P type, zero motion vector is implied */
-                    s->mv_dir             = MV_DIR_FORWARD;
-                    s->mv[0][0][0]        = s->mv[0][0][1]      = 0;
-                    s->last_mv[0][0][0]   = s->last_mv[0][0][1] = 0;
-                    s->last_mv[0][1][0]   = s->last_mv[0][1][1] = 0;
-                    s->field_select[0][0] = (s->picture_structure - 1) & 1;
+                    s->c.mv_dir             = MV_DIR_FORWARD;
+                    s->c.mv[0][0][0]        = s->c.mv[0][0][1]      = 0;
+                    s->c.last_mv[0][0][0]   = s->c.last_mv[0][0][1] = 0;
+                    s->c.last_mv[0][1][0]   = s->c.last_mv[0][1][1] = 0;
+                    s->c.field_select[0][0] = (s->c.picture_structure - 1) & 1;
                 } else {
                     /* if B type, reuse previous vectors and directions */
-                    s->mv[0][0][0] = s->last_mv[0][0][0];
-                    s->mv[0][0][1] = s->last_mv[0][0][1];
-                    s->mv[1][0][0] = s->last_mv[1][0][0];
-                    s->mv[1][0][1] = s->last_mv[1][0][1];
-                    s->field_select[0][0] = (s->picture_structure - 1) & 1;
-                    s->field_select[1][0] = (s->picture_structure - 1) & 1;
+                    s->c.mv[0][0][0] = s->c.last_mv[0][0][0];
+                    s->c.mv[0][0][1] = s->c.last_mv[0][0][1];
+                    s->c.mv[1][0][0] = s->c.last_mv[1][0][0];
+                    s->c.mv[1][0][1] = s->c.last_mv[1][0][1];
+                    s->c.field_select[0][0] = (s->c.picture_structure - 1) & 1;
+                    s->c.field_select[1][0] = (s->c.picture_structure - 1) & 1;
                 }
             }
         }
     }
 eos: // end of slice
-    if (get_bits_left(&s->gb) < 0) {
-        av_log(s->avctx, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->gb));
+    if (get_bits_left(&s->c.gb) < 0) {
+        av_log(s->c.avctx, AV_LOG_ERROR, "overread %d\n", -get_bits_left(&s->c.gb));
         return AVERROR_INVALIDDATA;
     }
-    *buf += (get_bits_count(&s->gb) - 1) / 8;
-    ff_dlog(s->avctx, "Slice start:%d %d  end:%d %d\n", s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y);
+    *buf += (get_bits_count(&s->c.gb) - 1) / 8;
+    ff_dlog(s->c.avctx, "Slice start:%d %d  end:%d %d\n", s->c.resync_mb_x, s->c.resync_mb_y, s->c.mb_x, s->c.mb_y);
     return 0;
 }
 
 static int slice_decode_thread(AVCodecContext *c, void *arg)
 {
-    MpegEncContext *s   = *(void **) arg;
-    const uint8_t *buf  = s->gb.buffer;
-    int mb_y            = s->start_mb_y;
-    const int field_pic = s->picture_structure != PICT_FRAME;
+    Mpeg12SliceContext *const s = *(void **) arg;
+    const uint8_t *buf  = s->c.gb.buffer;
+    int mb_y            = s->c.start_mb_y;
+    const int field_pic = s->c.picture_structure != PICT_FRAME;
 
-    s->er.error_count = (3 * (s->end_mb_y - s->start_mb_y) * s->mb_width) >> field_pic;
+    s->c.er.error_count = (3 * (s->c.end_mb_y - s->c.start_mb_y) * s->c.mb_width) >> field_pic;
 
     for (;;) {
         uint32_t start_code;
         int ret;
 
-        ret = mpeg_decode_slice(s, mb_y, &buf, s->gb.buffer_end - buf);
+        ret = mpeg_decode_slice(s, mb_y, &buf, s->c.gb.buffer_end - buf);
         emms_c();
         ff_dlog(c, "ret:%d resync:%d/%d mb:%d/%d ts:%d/%d ec:%d\n",
-                ret, s->resync_mb_x, s->resync_mb_y, s->mb_x, s->mb_y,
-                s->start_mb_y, s->end_mb_y, s->er.error_count);
+                ret, s->c.resync_mb_x, s->c.resync_mb_y, s->c.mb_x, s->c.mb_y,
+                s->c.start_mb_y, s->c.end_mb_y, s->c.er.error_count);
         if (ret < 0) {
             if (c->err_recognition & AV_EF_EXPLODE)
                 return ret;
-            if (s->resync_mb_x >= 0 && s->resync_mb_y >= 0)
-                ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
-                                s->mb_x, s->mb_y,
+            if (s->c.resync_mb_x >= 0 && s->c.resync_mb_y >= 0)
+                ff_er_add_slice(&s->c.er, s->c.resync_mb_x, s->c.resync_mb_y,
+                                s->c.mb_x, s->c.mb_y,
                                 ER_AC_ERROR | ER_DC_ERROR | ER_MV_ERROR);
         } else {
-            ff_er_add_slice(&s->er, s->resync_mb_x, s->resync_mb_y,
-                            s->mb_x - 1, s->mb_y,
+            ff_er_add_slice(&s->c.er, s->c.resync_mb_x, s->c.resync_mb_y,
+                            s->c.mb_x - 1, s->c.mb_y,
                             ER_AC_END | ER_DC_END | ER_MV_END);
         }
 
-        if (s->mb_y == s->end_mb_y)
+        if (s->c.mb_y == s->c.end_mb_y)
             return 0;
 
         start_code = -1;
-        buf        = avpriv_find_start_code(buf, s->gb.buffer_end, &start_code);
+        buf        = avpriv_find_start_code(buf, s->c.gb.buffer_end, &start_code);
         if (start_code < SLICE_MIN_START_CODE || start_code > SLICE_MAX_START_CODE)
             return AVERROR_INVALIDDATA;
         mb_y       = start_code - SLICE_MIN_START_CODE;
-        if (s->codec_id != AV_CODEC_ID_MPEG1VIDEO && s->mb_height > 2800/16)
+        if (s->c.codec_id != AV_CODEC_ID_MPEG1VIDEO && s->c.mb_height > 2800/16)
             mb_y += (*buf&0xE0)<<2;
         mb_y <<= field_pic;
-        if (s->picture_structure == PICT_BOTTOM_FIELD)
+        if (s->c.picture_structure == PICT_BOTTOM_FIELD)
             mb_y++;
-        if (mb_y >= s->end_mb_y)
+        if (mb_y >= s->c.end_mb_y)
             return AVERROR_INVALIDDATA;
     }
 }
@@ -1680,7 +1685,7 @@ static int slice_decode_thread(AVCodecContext *c, void *arg)
 static int slice_end(AVCodecContext *avctx, AVFrame *pict, int *got_output)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVContext *const s = &s1->slice.c;
 
     if (!s->context_initialized || !s->cur_pic.ptr)
         return 0;
@@ -1731,7 +1736,7 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx,
                                  const uint8_t *buf, int buf_size)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVContext *const s = &s1->slice.c;
     GetBitContext gb0, *const gb = &gb0;
     int width, height;
     int i, v, j;
@@ -1820,7 +1825,7 @@ static int mpeg1_decode_sequence(AVCodecContext *avctx,
 static int vcr2_init_sequence(AVCodecContext *avctx)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVContext *const s = &s1->slice.c;
     int i, v, ret;
 
     /* start new MPEG-1 context decoding */
@@ -1951,7 +1956,7 @@ static int mpeg_decode_a53_cc(AVCodecContext *avctx,
                         cap[0] = cap[1] = cap[2] = 0x00;
                     } else {
                         field = (field == 2 ? 1 : 0);
-                        if (!s1->mpeg_enc_ctx.top_field_first) field = !field;
+                        if (!s1->slice.c.top_field_first) field = !field;
                         cap[0] = 0x04 | field;
                         cap[1] = ff_reverse[cc1];
                         cap[2] = ff_reverse[cc2];
@@ -2165,7 +2170,7 @@ static int mpeg_decode_gop(AVCodecContext *avctx,
                             const uint8_t *buf, int buf_size)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVContext *const s = &s1->slice.c;
     GetBitContext gb0, *const gb = &gb0;
     int broken_link;
     int64_t tc;
@@ -2198,12 +2203,12 @@ static void mpeg12_execute_slice_threads(AVCodecContext *avctx,
 {
     if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) &&
         !avctx->hwaccel) {
-        MpegEncContext *const s2 = &s->mpeg_enc_ctx;
+        MPVContext *const s2 = &s->slice.c;
         int error_count = 0;
 
         avctx->execute(avctx, slice_decode_thread,
-                       s2->thread_context, NULL,
-                       s->slice_count, sizeof(void *));
+                       s2->mpeg12_contexts, NULL,
+                       s->slice_count, sizeof(s2->mpeg12_contexts[0]));
 
         for (int i = 0; i < s->slice_count; i++) {
             MpegEncContext *const slice = s2->thread_context[i];
@@ -2226,7 +2231,7 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
                          int *got_output, const uint8_t *buf, int buf_size)
 {
     Mpeg1Context *s = avctx->priv_data;
-    MpegEncContext *s2 = &s->mpeg_enc_ctx;
+    MPVContext *const s2 = &s->slice.c;
     const uint8_t *buf_ptr = buf;
     const uint8_t *buf_end = buf + buf_size;
     int ret, input_size;
@@ -2501,24 +2506,24 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
                                      s2->slice_context_count / 2) /
                                     s2->slice_context_count;
                     if (threshold <= mb_y) {
-                        MpegEncContext *thread_context = s2->thread_context[s->slice_count];
+                        Mpeg12SliceContext *const thread_context = s2->mpeg12_contexts[s->slice_count];
 
-                        thread_context->start_mb_y = mb_y;
-                        thread_context->end_mb_y   = s2->mb_height;
+                        thread_context->c.start_mb_y = mb_y;
+                        thread_context->c.end_mb_y   = s2->mb_height;
                         if (s->slice_count) {
                             s2->thread_context[s->slice_count - 1]->end_mb_y = mb_y;
-                            ret = ff_update_duplicate_context(thread_context, s2);
+                            ret = ff_update_duplicate_context(&thread_context->c, s2);
                             if (ret < 0)
                                 return ret;
                         }
-                        ret = init_get_bits8(&thread_context->gb, buf_ptr, input_size);
+                        ret = init_get_bits8(&thread_context->c.gb, buf_ptr, input_size);
                         if (ret < 0)
                             return ret;
                         s->slice_count++;
                     }
                     buf_ptr += 2; // FIXME add minimum number of bytes per slice
                 } else {
-                    ret = mpeg_decode_slice(s2, mb_y, &buf_ptr, input_size);
+                    ret = mpeg_decode_slice(&s->slice, mb_y, &buf_ptr, input_size);
                     emms_c();
 
                     if (ret < 0) {
@@ -2547,7 +2552,7 @@ static int mpeg_decode_frame(AVCodecContext *avctx, AVFrame *picture,
     int ret;
     int buf_size = avpkt->size;
     Mpeg1Context *s = avctx->priv_data;
-    MpegEncContext *s2 = &s->mpeg_enc_ctx;
+    MPVContext *const s2 = &s->slice.c;
 
     if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
         /* special case for last picture */
@@ -2746,7 +2751,7 @@ const FFCodec ff_mpegvideo_decoder = {
 };
 
 typedef struct IPUContext {
-    MpegEncContext m;
+    Mpeg12SliceContext m;
 
     int flags;
     DECLARE_ALIGNED(32, int16_t, block)[6][64];
@@ -2756,7 +2761,7 @@ static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame,
                             int *got_frame, AVPacket *avpkt)
 {
     IPUContext *s = avctx->priv_data;
-    MpegEncContext *m = &s->m;
+    MPVContext *const m = &s->m.c;
     GetBitContext *gb = &m->gb;
     int ret;
 
@@ -2817,7 +2822,7 @@ static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame,
                                                       m->last_dc, s->block[n],
                                                       n, m->qscale);
                 } else {
-                    ret = mpeg2_decode_block_intra(m, s->block[n], n);
+                    ret = mpeg2_decode_block_intra(&s->m, s->block[n], n);
                 }
 
                 if (ret < 0)
@@ -2851,7 +2856,7 @@ static int ipu_decode_frame(AVCodecContext *avctx, AVFrame *frame,
 static av_cold int ipu_decode_init(AVCodecContext *avctx)
 {
     IPUContext *s = avctx->priv_data;
-    MpegEncContext *m = &s->m;
+    MPVContext *const m = &s->m.c;
 
     avctx->pix_fmt = AV_PIX_FMT_YUV420P;
     m->avctx       = avctx;
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 3cbd686558..ae294e9dd8 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -128,7 +128,7 @@ static av_cold int init_duplicate_context(MpegEncContext *s)
 av_cold int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
 {
     int nb_slices = s->slice_context_count, ret;
-    size_t slice_size = s->slice_ctx_size ? s->slice_ctx_size : sizeof(*s);
+    const size_t slice_size = s->slice_ctx_size;
 
     /* We initialize the copies before the original so that
      * fields allocated in init_duplicate_context are NULL after
diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
index 8b94cc1ff0..2581641d0c 100644
--- a/libavcodec/mpegvideo.h
+++ b/libavcodec/mpegvideo.h
@@ -116,6 +116,7 @@ typedef struct MpegEncContext {
     int end_mb_y;              ///< end   mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
     union {
         struct MpegEncContext *thread_context[MAX_THREADS];
+        struct Mpeg12SliceContext *mpeg12_contexts[MAX_THREADS];
         struct MPVEncContext  *enc_contexts[MAX_THREADS];
     };
     int slice_context_count;   ///< number of used thread_contexts
-- 
ffmpeg-codebot

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

  parent reply	other threads:[~2025-06-23 13:43 UTC|newest]

Thread overview: 49+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-06-23 13:36 [FFmpeg-devel] [PATCH 00/48] H263DecContext ffmpegagent
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 01/48] avcodec/ituh263dec: Use correct logcontext Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 02/48] avcodec/rl: Avoid branch in index lookup Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 03/48] avcodec/ituh263enc: Simplify creating LUT Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 04/48] avcodec/ituh263dec: Only initialize ff_h263_rl_inter when needed Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 05/48] avcodec/mpegvideoenc: Allocate blocks as part of MPVEncContext Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 06/48] avcodec/mpegvideo: Add MPVContext typedef Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 07/48] avcodec/mpegvideo_dec: Factor debugging dct coefficients out Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 08/48] avcodec/mpegvideo_dec: Reindent after the previous commit Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 09/48] avcodec/mpeg_er: Don't use MpegEncContext.block Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 10/48] avcodec/mpegvideodec: Remove size expectation from ff_mpv_reconstruct_mb Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 11/48] avcodec/h261dec: Stop using MpegEncContext.gb Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 12/48] avcodec/h261dec: Don't use MpegEncContext.block Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 13/48] avcodec/mpeg12dec: Put GetBitContext on the stack where advantageous Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 14/48] avcodec/mpeg12dec: Remove unused function parameter Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 15/48] avcodec/rv34: Don't use MpegEncContext.gb Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 16/48] avcodec/rv34: Don't use MpegEncContext.block Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 17/48] avcodec/intrax8: Don't pretend to need more than one int16_t[64] Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 18/48] avcodec/vc1: Stop using MpegEncContext.gb Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 19/48] avcodec/vc1: Don't use MpegEncContext.block Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 20/48] avcodec/mpeg12dec: Deduplicate variables Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 21/48] avcodec/mpegvideo: Move flipflop_rounding to {MSMPEG4Dec, MPVEnc}Context Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 22/48] avcodec/mpegvideo: Move unrestricted_mv to MotionEstContext Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 23/48] avcodec/mpeg4videodec: Avoid unnecessary indirections Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 24/48] avcodec/{h263, mpeg4video}dec: Pass MPVContext*, not Mpeg4DecContext* Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 25/48] avcodec/mpegvideo: Move dct_precision to Mpeg4DecContext Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 26/48] avcodec/h263dec: Add H263DecContext Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 27/48] avcodec/h263dec: Remove redundant block parameter from decode_mb Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 28/48] avcodec/h263dec: Don't use MpegEncContext.block Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 29/48] avcodec/h263dec: Stop using MpegEncContext.gb Andreas Rheinhardt
2025-06-23 13:36 ` Andreas Rheinhardt [this message]
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 31/48] avcodec/mpegvideo: Add missing headers Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 32/48] avcodec/mpeg12dec: Move MpegEncContext.gb to Mpeg12SliceContext Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 33/48] avcodec/mpeg12dec: Don't use MPVContext.block Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 34/48] avcodec/mpegvideo: Move fields only used by H.263 decoders to H263DecCtx Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 35/48] avcodec/mpegvideo: Move mb_num_left to {H263, RV34}DecContext Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 36/48] avcodec/mpeg12dec: Put mb_skip_run on the stack Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 37/48] avcodec/mpegvideo: Move mb_skip_run to {RV34Dec, MPVEnc}Context Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 38/48] avcodec/mpegvideo: Move SLICE_* defs to h263dec.h, h261dec.c Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 39/48] avcodec/msmpeg4dec: Move ff_msmpeg4_decode_init() down Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 40/48] avcodec/h263dec: Use function ptr for decode_picture_header Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 41/48] avcodec/ituh263enc: Inline value of h263_flv Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 42/48] avcodec/flvdec: Binarize h263_flv Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 43/48] avcodec/mpegvideo: Move fields to {H263Dec, MPVEnc}Context when possible Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 44/48] avcodec/mpeg_er: Allow to skip setting partitioned_frame, p[pb]_time Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 45/48] avcodec/mpegvideo: Move partitioned_frame to {H263Dec, MPVEnc}Context Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 46/48] avcodec/mpegvideo: Move loop_filter to {H263Dec, MPVEnc, VC1}Context Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 47/48] avcodec/rv34: Don't report progress unnecessarily Andreas Rheinhardt
2025-06-23 13:36 ` [FFmpeg-devel] [PATCH 48/48] avcodec/rv34: Fix spelling mistake Andreas Rheinhardt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=e550205048533f2642a756ac9c01ea68df7a910e.1750685809.git.ffmpegagent@gmail.com \
    --to=ffmpegagent-at-gmail.com@ffmpeg.org \
    --cc=andreas.rheinhardt@outlook.com \
    --cc=ffmpeg-devel@ffmpeg.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git