Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
From: Lynne via ffmpeg-devel <ffmpeg-devel@ffmpeg.org>
To: ffmpeg-devel@ffmpeg.org
Cc: Lynne <code@ffmpeg.org>
Subject: [FFmpeg-devel] [PATCH] Hardware resizing fixes, DPX decoder cleanups (PR #20854)
Date: Thu, 06 Nov 2025 21:28:48 -0000
Message-ID: <176246452880.25.7236117879275360565@2cb04c0e5124> (raw)

PR #20854 opened by Lynne
URL: https://code.ffmpeg.org/FFmpeg/FFmpeg/pulls/20854
Patch URL: https://code.ffmpeg.org/FFmpeg/FFmpeg/pulls/20854.patch

Decoders have to manually check if the frame size has changed, and call ff_get_format if it has. H.264, HEVC, and most decoders did this, but this was not done for the recently added hooks.

This also adds minor DPX decoder cleanups, in preparations for a Vulkan DPX unpacker.hardware


>From 4e33e8171905f70c455e451cf4d349f8af77df6f Mon Sep 17 00:00:00 2001
From: Lynne <dev@lynne.ee>
Date: Wed, 5 Nov 2025 20:25:31 +0100
Subject: [PATCH 1/9] prores: call ff_get_format if width and height change

The issue is that hardware decoders may have some state they depend on,
which would get broken if the dimensions change.
---
 libavcodec/proresdec.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/libavcodec/proresdec.c b/libavcodec/proresdec.c
index 40c15a0c85..5c6b505527 100644
--- a/libavcodec/proresdec.c
+++ b/libavcodec/proresdec.c
@@ -185,7 +185,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
 static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
                                const int data_size, AVCodecContext *avctx)
 {
-    int hdr_size, width, height, flags;
+    int hdr_size, width, height, flags, dimensions_changed = 0;
     int version;
     const uint8_t *ptr;
     enum AVPixelFormat pix_fmt;
@@ -214,6 +214,7 @@ static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
                avctx->width, avctx->height, width, height);
         if ((ret = ff_set_dimensions(avctx, width, height)) < 0)
             return ret;
+        dimensions_changed = 1;
     }
 
     ctx->frame_type = (buf[12] >> 2) & 3;
@@ -250,7 +251,7 @@ static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
         }
     }
 
-    if (pix_fmt != ctx->pix_fmt) {
+    if (pix_fmt != ctx->pix_fmt || dimensions_changed) {
 #define HWACCEL_MAX (CONFIG_PRORES_VIDEOTOOLBOX_HWACCEL + CONFIG_PRORES_VULKAN_HWACCEL)
 #if HWACCEL_MAX
         enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts;
-- 
2.49.1


>From 12942f1210db38fa35c4eadfd7c3e9323208945b Mon Sep 17 00:00:00 2001
From: Lynne <dev@lynne.ee>
Date: Wed, 5 Nov 2025 20:29:33 +0100
Subject: [PATCH 2/9] prores_raw: call ff_get_format if width and height change

---
 libavcodec/prores_raw.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/libavcodec/prores_raw.c b/libavcodec/prores_raw.c
index 0298956efd..65e0576619 100644
--- a/libavcodec/prores_raw.c
+++ b/libavcodec/prores_raw.c
@@ -329,7 +329,7 @@ static int decode_frame(AVCodecContext *avctx,
                         AVFrame *frame, int *got_frame_ptr,
                         AVPacket *avpkt)
 {
-    int ret;
+    int ret, dimensions_changed = 0;
     ProResRAWContext *s = avctx->priv_data;
     DECLARE_ALIGNED(32, uint8_t, qmat)[64];
     memset(qmat, 1, 64);
@@ -390,13 +390,14 @@ static int decode_frame(AVCodecContext *avctx,
                avctx->width, avctx->height, w, h);
         if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
             return ret;
+        dimensions_changed = 1;
     }
 
     avctx->coded_width  = FFALIGN(w, 16);
     avctx->coded_height = FFALIGN(h, 16);
 
     enum AVPixelFormat pix_fmt = AV_PIX_FMT_BAYER_RGGB16;
-    if (pix_fmt != s->pix_fmt) {
+    if (pix_fmt != s->pix_fmt || dimensions_changed) {
         s->pix_fmt = pix_fmt;
 
         ret = get_pixel_format(avctx, pix_fmt);
-- 
2.49.1


>From 6c348e34dba88b9628b5ebcf5be8637b7f14a4c2 Mon Sep 17 00:00:00 2001
From: Lynne <dev@lynne.ee>
Date: Wed, 5 Nov 2025 20:31:23 +0100
Subject: [PATCH 3/9] ffv1dec: call ff_get_format if width and height change

---
 libavcodec/ffv1.h    | 1 +
 libavcodec/ffv1dec.c | 6 +++++-
 2 files changed, 6 insertions(+), 1 deletion(-)

diff --git a/libavcodec/ffv1.h b/libavcodec/ffv1.h
index d6f25737f5..adf76b0644 100644
--- a/libavcodec/ffv1.h
+++ b/libavcodec/ffv1.h
@@ -139,6 +139,7 @@ typedef struct FFV1Context {
     uint32_t crcref;
     enum AVPixelFormat pix_fmt;
     enum AVPixelFormat configured_pix_fmt;
+    int configured_width, configured_height;
 
     const AVFrame *cur_enc_frame;
     int plane_count;
diff --git a/libavcodec/ffv1dec.c b/libavcodec/ffv1dec.c
index 3d67798961..a70cd74af4 100644
--- a/libavcodec/ffv1dec.c
+++ b/libavcodec/ffv1dec.c
@@ -507,11 +507,15 @@ static int read_header(FFV1Context *f, RangeCoder *c)
     if (ret < 0)
         return ret;
 
-    if (f->configured_pix_fmt != f->pix_fmt) {
+    if (f->configured_pix_fmt != f->pix_fmt ||
+        f->configured_width != f->avctx->width ||
+        f->configured_height != f->avctx->height) {
         f->avctx->pix_fmt = get_pixel_format(f);
         if (f->avctx->pix_fmt < 0)
             return AVERROR(EINVAL);
         f->configured_pix_fmt = f->pix_fmt;
+        f->configured_width = f->avctx->width;
+        f->configured_height = f->avctx->height;
     }
 
     ff_dlog(f->avctx, "%d %d %d\n",
-- 
2.49.1


>From edfec408662554950169ffd96a247a0c995e14fb Mon Sep 17 00:00:00 2001
From: Lynne <dev@lynne.ee>
Date: Tue, 4 Nov 2025 11:40:07 +0100
Subject: [PATCH 4/9] vulkan/ffv1: use u32vec2 for slice offsets

Simplifies calculations slightly.
---
 libavcodec/vulkan/ffv1_dec_setup.comp | 4 ++--
 libavcodec/vulkan_ffv1.c              | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)

diff --git a/libavcodec/vulkan/ffv1_dec_setup.comp b/libavcodec/vulkan/ffv1_dec_setup.comp
index 5da09df21c..fd297cb70a 100644
--- a/libavcodec/vulkan/ffv1_dec_setup.comp
+++ b/libavcodec/vulkan/ffv1_dec_setup.comp
@@ -114,8 +114,8 @@ void main(void)
 {
     const uint slice_idx = gl_WorkGroupID.y*gl_NumWorkGroups.x + gl_WorkGroupID.x;
 
-    u8buf bs = u8buf(slice_data + slice_offsets[2*slice_idx + 0]);
-    uint32_t slice_size = slice_offsets[2*slice_idx + 1];
+    u8buf bs = u8buf(slice_data + slice_offsets[slice_idx].x);
+    uint32_t slice_size = slice_offsets[slice_idx].y;
 
     rac_init_dec(slice_ctx[slice_idx].c,
                  bs, slice_size);
diff --git a/libavcodec/vulkan_ffv1.c b/libavcodec/vulkan_ffv1.c
index b02bc71683..1ed9d7dd6c 100644
--- a/libavcodec/vulkan_ffv1.c
+++ b/libavcodec/vulkan_ffv1.c
@@ -697,7 +697,7 @@ static int init_setup_shader(FFV1Context *f, FFVulkanContext *s,
             .type        = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
             .stages      = VK_SHADER_STAGE_COMPUTE_BIT,
             .mem_quali   = "readonly",
-            .buf_content = "uint32_t slice_offsets",
+            .buf_content = "u32vec2 slice_offsets",
             .buf_elems   = 2*f->max_slice_count,
         },
         {
-- 
2.49.1


>From 1ecc34a7d80b4ad2f1702e97e474b6214463d3e6 Mon Sep 17 00:00:00 2001
From: Lynne <dev@lynne.ee>
Date: Tue, 4 Nov 2025 21:02:43 +0100
Subject: [PATCH 5/9] vulkan/ffv1: use uniform buffer for slice offsets

Microoptimization.
---
 libavcodec/vulkan_ffv1.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/libavcodec/vulkan_ffv1.c b/libavcodec/vulkan_ffv1.c
index 1ed9d7dd6c..9ed35f6f37 100644
--- a/libavcodec/vulkan_ffv1.c
+++ b/libavcodec/vulkan_ffv1.c
@@ -208,7 +208,7 @@ static int vk_ffv1_start_frame(AVCodecContext          *avctx,
     /* Allocate slice offsets buffer */
     err = ff_vk_get_pooled_buffer(&ctx->s, &fv->slice_offset_pool,
                                   &fp->slice_offset_buf,
-                                  VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
+                                  VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
                                   VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT,
                                   NULL, 2*f->slice_count*sizeof(uint32_t),
                                   VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
@@ -694,7 +694,7 @@ static int init_setup_shader(FFV1Context *f, FFVulkanContext *s,
         },
         {
             .name        = "slice_offsets_buf",
-            .type        = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
+            .type        = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
             .stages      = VK_SHADER_STAGE_COMPUTE_BIT,
             .mem_quali   = "readonly",
             .buf_content = "u32vec2 slice_offsets",
-- 
2.49.1


>From 06b7bd64f990afa1849ddd515241fbc33b2cd810 Mon Sep 17 00:00:00 2001
From: Lynne <dev@lynne.ee>
Date: Wed, 5 Nov 2025 15:29:57 +0100
Subject: [PATCH 6/9] vulkan: allow arrays of buffers

Could be useful.
---
 libavutil/vulkan.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/libavutil/vulkan.c b/libavutil/vulkan.c
index ef755ad6f7..0f168e017b 100644
--- a/libavutil/vulkan.c
+++ b/libavutil/vulkan.c
@@ -2548,10 +2548,11 @@ print:
                 GLSLA("%s", desc[i].buf_content);
             }
             GLSLA("\n}");
-        } else if (desc[i].elems > 0) {
-            GLSLA("[%i]", desc[i].elems);
         }
 
+        if (desc[i].elems > 0)
+            GLSLA("[%i]", desc[i].elems);
+
         GLSLA(";");
         GLSLA("\n");
     }
-- 
2.49.1


>From 817c6fee1230f7cfa028354916662380e5ed1869 Mon Sep 17 00:00:00 2001
From: Lynne <dev@lynne.ee>
Date: Wed, 29 Oct 2025 13:30:00 +0100
Subject: [PATCH 7/9] dpxdec: add support for frame threading

May help a little.
---
 libavcodec/dpx.c | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/libavcodec/dpx.c b/libavcodec/dpx.c
index 1b1ada316a..63d5b2ef20 100644
--- a/libavcodec/dpx.c
+++ b/libavcodec/dpx.c
@@ -28,6 +28,7 @@
 #include "codec_internal.h"
 #include "decode.h"
 #include "dpx.h"
+#include "thread.h"
 
 static unsigned int read16(const uint8_t **ptr, int is_big)
 {
@@ -594,7 +595,8 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
 
     ff_set_sar(avctx, avctx->sample_aspect_ratio);
 
-    if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
+    ret = ff_thread_get_buffer(avctx, p, 0);
+    if (ret < 0)
         return ret;
 
     // Move pointer to offset from start of file
@@ -739,5 +741,6 @@ const FFCodec ff_dpx_decoder = {
     .p.type         = AVMEDIA_TYPE_VIDEO,
     .p.id           = AV_CODEC_ID_DPX,
     FF_CODEC_DECODE_CB(decode_frame),
-    .p.capabilities = AV_CODEC_CAP_DR1,
+    UPDATE_THREAD_CONTEXT(update_thread_context),
+    .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
 };
-- 
2.49.1


>From 811b5e3c27938379a3123bd1890b05776be1fe0c Mon Sep 17 00:00:00 2001
From: Lynne <dev@lynne.ee>
Date: Wed, 29 Oct 2025 13:44:51 +0100
Subject: [PATCH 8/9] dpx: share DPX context between encoder and decoder

This allows to implement hardware decoding hooks for the decoder.
---
 libavcodec/dpx.c    | 187 +++++++++++++++++++++++++++-----------------
 libavcodec/dpx.h    |  12 +++
 libavcodec/dpxenc.c |   8 --
 3 files changed, 128 insertions(+), 79 deletions(-)

diff --git a/libavcodec/dpx.c b/libavcodec/dpx.c
index 63d5b2ef20..35781c26af 100644
--- a/libavcodec/dpx.c
+++ b/libavcodec/dpx.c
@@ -119,9 +119,22 @@ static uint16_t read12in32(const uint8_t **ptr, uint32_t *lbuf,
     }
 }
 
+static enum AVPixelFormat get_pixel_format(AVCodecContext *avctx,
+                                           enum AVPixelFormat pix_fmt)
+{
+    enum AVPixelFormat pix_fmts[] = {
+        pix_fmt,
+        AV_PIX_FMT_NONE,
+    };
+
+    return ff_get_format(avctx, pix_fmts);
+}
+
 static int decode_frame(AVCodecContext *avctx, AVFrame *p,
                         int *got_frame, AVPacket *avpkt)
 {
+    DPXContext *s = avctx->priv_data;
+
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
     uint8_t *ptr[AV_NUM_DATA_POINTERS];
@@ -130,11 +143,12 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
     char input_device[33] = { 0 };
 
     unsigned int offset;
-    int magic_num, endian;
+    int magic_num;
     int x, y, stride, i, j, ret;
-    int w, h, bits_per_color, descriptor, elements, packing;
+    int w, h, elements, packing;
     int yuv, color_trc, color_spec;
     int encoding, need_align = 0, unpadded_10bit = 0;
+    enum AVPixelFormat pix_fmt;
 
     unsigned int rgbBuffer = 0;
     int n_datum = 0;
@@ -150,15 +164,15 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
     /* Check if the files "magic number" is "SDPX" which means it uses
      * big-endian or XPDS which is for little-endian files */
     if (magic_num == AV_RL32("SDPX")) {
-        endian = 0;
+        s->big_endian = 0;
     } else if (magic_num == AV_RB32("SDPX")) {
-        endian = 1;
+        s->big_endian = 1;
     } else {
         av_log(avctx, AV_LOG_ERROR, "DPX marker not found\n");
         return AVERROR_INVALIDDATA;
     }
 
-    offset = read32(&buf, endian);
+    offset = read32(&buf, s->big_endian);
     if (avpkt->size <= offset) {
         av_log(avctx, AV_LOG_ERROR, "Invalid data start offset\n");
         return AVERROR_INVALIDDATA;
@@ -175,7 +189,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
 
     // Check encryption
     buf = avpkt->data + 660;
-    ret = read32(&buf, endian);
+    ret = read32(&buf, s->big_endian);
     if (ret != 0xFFFFFFFF) {
         avpriv_report_missing_feature(avctx, "Encryption");
         av_log(avctx, AV_LOG_WARNING, "The image is encrypted and may "
@@ -184,37 +198,37 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
 
     // Need to end in 0x304 offset from start of file
     buf = avpkt->data + 0x304;
-    w = read32(&buf, endian);
-    h = read32(&buf, endian);
+    w = read32(&buf, s->big_endian);
+    h = read32(&buf, s->big_endian);
 
     if ((ret = ff_set_dimensions(avctx, w, h)) < 0)
         return ret;
 
     // Need to end in 0x320 to read the descriptor
     buf += 20;
-    descriptor = buf[0];
+    s->descriptor = buf[0];
     color_trc = buf[1];
     color_spec = buf[2];
 
     // Need to end in 0x323 to read the bits per color
     buf += 3;
     avctx->bits_per_raw_sample =
-    bits_per_color = buf[0];
+    s->bits_per_component = buf[0];
     buf++;
-    packing = read16(&buf, endian);
-    encoding = read16(&buf, endian);
+    packing = read16(&buf, s->big_endian);
+    encoding = read16(&buf, s->big_endian);
 
     if (encoding) {
         avpriv_report_missing_feature(avctx, "Encoding %d", encoding);
         return AVERROR_PATCHWELCOME;
     }
 
-    if (bits_per_color > 31)
+    if (s->bits_per_component > 31)
         return AVERROR_INVALIDDATA;
 
     buf += 820;
-    avctx->sample_aspect_ratio.num = read32(&buf, endian);
-    avctx->sample_aspect_ratio.den = read32(&buf, endian);
+    avctx->sample_aspect_ratio.num = read32(&buf, s->big_endian);
+    avctx->sample_aspect_ratio.den = read32(&buf, s->big_endian);
     if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0)
         av_reduce(&avctx->sample_aspect_ratio.num, &avctx->sample_aspect_ratio.den,
                    avctx->sample_aspect_ratio.num,  avctx->sample_aspect_ratio.den,
@@ -225,7 +239,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
     /* preferred frame rate from Motion-picture film header */
     if (offset >= 1724 + 4) {
         buf = avpkt->data + 1724;
-        i = read32(&buf, endian);
+        i = read32(&buf, s->big_endian);
         if(i && i != 0xFFFFFFFF) {
             AVRational q = av_d2q(av_int2float(i), 4096);
             if (q.num > 0 && q.den > 0)
@@ -237,7 +251,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
     if (offset >= 1940 + 4 &&
         !(avctx->framerate.num && avctx->framerate.den)) {
         buf = avpkt->data + 1940;
-        i = read32(&buf, endian);
+        i = read32(&buf, s->big_endian);
         if(i && i != 0xFFFFFFFF) {
             AVRational q = av_d2q(av_int2float(i), 4096);
             if (q.num > 0 && q.den > 0)
@@ -254,7 +268,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
         buf = avpkt->data + 1920;
         // read32 to native endian, av_bswap32 to opposite of native for
         // compatibility with av_timecode_make_smpte_tc_string2 etc
-        tc = av_bswap32(read32(&buf, endian));
+        tc = av_bswap32(read32(&buf, s->big_endian));
 
         if (i != 0xFFFFFFFF) {
             AVFrameSideData *tcside;
@@ -278,27 +292,27 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
     /* color range from television header */
     if (offset >= 1964 + 4) {
         buf = avpkt->data + 1952;
-        i = read32(&buf, endian);
+        i = read32(&buf, s->big_endian);
 
         buf = avpkt->data + 1964;
-        j = read32(&buf, endian);
+        j = read32(&buf, s->big_endian);
 
         if (i != 0xFFFFFFFF && j != 0xFFFFFFFF) {
             float minCV, maxCV;
             minCV = av_int2float(i);
             maxCV = av_int2float(j);
-            if (bits_per_color >= 1 &&
-                minCV == 0.0f && maxCV == ((1U<<bits_per_color) - 1)) {
+            if (s->bits_per_component >= 1 &&
+                minCV == 0.0f && maxCV == ((1U<<s->bits_per_component) - 1)) {
                 avctx->color_range = AVCOL_RANGE_JPEG;
-            } else if (bits_per_color >= 8 &&
-                       minCV == (1  <<(bits_per_color - 4)) &&
-                       maxCV == (235<<(bits_per_color - 8))) {
+            } else if (s->bits_per_component >= 8 &&
+                       minCV == (1  <<(s->bits_per_component - 4)) &&
+                       maxCV == (235<<(s->bits_per_component - 8))) {
                 avctx->color_range = AVCOL_RANGE_MPEG;
             }
         }
     }
 
-    switch (descriptor) {
+    switch (s->descriptor) {
     case 1:  // R
     case 2:  // G
     case 3:  // B
@@ -329,11 +343,11 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
         yuv = 1;
         break;
     default:
-        avpriv_report_missing_feature(avctx, "Descriptor %d", descriptor);
+        avpriv_report_missing_feature(avctx, "Descriptor %d", s->descriptor);
         return AVERROR_PATCHWELCOME;
     }
 
-    switch (bits_per_color) {
+    switch (s->bits_per_component) {
     case 8:
         stride = avctx->width * elements;
         break;
@@ -366,7 +380,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
         break;
     case 1:
     case 64:
-        avpriv_report_missing_feature(avctx, "Depth %d", bits_per_color);
+        avpriv_report_missing_feature(avctx, "Depth %d", s->bits_per_component);
         return AVERROR_PATCHWELCOME;
     default:
         return AVERROR_INVALIDDATA;
@@ -459,7 +473,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
     // Some devices do not pad 10bit samples to whole 32bit words per row
     if (!memcmp(input_device, "Scanity", 7) ||
         !memcmp(creator, "Lasergraphics Inc.", 18)) {
-        if (bits_per_color == 10)
+        if (s->bits_per_component == 10)
             unpadded_10bit = 1;
     }
 
@@ -485,7 +499,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
         stride = FFALIGN(stride, 4);
     }
 
-    switch (1000 * descriptor + 10 * bits_per_color + endian) {
+    switch (1000 * s->descriptor + 10 * s->bits_per_component + s->big_endian) {
     case 1081:
     case 1080:
     case 2081:
@@ -496,103 +510,113 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
     case 4080:
     case 6081:
     case 6080:
-        avctx->pix_fmt = AV_PIX_FMT_GRAY8;
+        pix_fmt = AV_PIX_FMT_GRAY8;
         break;
     case 6121:
     case 6120:
-        avctx->pix_fmt = AV_PIX_FMT_GRAY12;
+        pix_fmt = AV_PIX_FMT_GRAY12;
         break;
     case 1320:
     case 2320:
     case 3320:
     case 4320:
     case 6320:
-        avctx->pix_fmt = AV_PIX_FMT_GRAYF32LE;
+        pix_fmt = AV_PIX_FMT_GRAYF32LE;
         break;
     case 1321:
     case 2321:
     case 3321:
     case 4321:
     case 6321:
-        avctx->pix_fmt = AV_PIX_FMT_GRAYF32BE;
+        pix_fmt = AV_PIX_FMT_GRAYF32BE;
         break;
     case 50081:
     case 50080:
-        avctx->pix_fmt = AV_PIX_FMT_RGB24;
+        pix_fmt = AV_PIX_FMT_RGB24;
         break;
     case 52081:
     case 52080:
-        avctx->pix_fmt = AV_PIX_FMT_ABGR;
+        pix_fmt = AV_PIX_FMT_ABGR;
         break;
     case 51081:
     case 51080:
-        avctx->pix_fmt = AV_PIX_FMT_RGBA;
+        pix_fmt = AV_PIX_FMT_RGBA;
         break;
     case 50100:
     case 50101:
-        avctx->pix_fmt = AV_PIX_FMT_GBRP10;
+        pix_fmt = AV_PIX_FMT_GBRP10;
         break;
     case 51100:
     case 51101:
-        avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
+        pix_fmt = AV_PIX_FMT_GBRAP10;
         break;
     case 50120:
     case 50121:
-        avctx->pix_fmt = AV_PIX_FMT_GBRP12;
+        pix_fmt = AV_PIX_FMT_GBRP12;
         break;
     case 51120:
     case 51121:
-        avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
+        pix_fmt = AV_PIX_FMT_GBRAP12;
         break;
     case 6100:
     case 6101:
-        avctx->pix_fmt = AV_PIX_FMT_GRAY10;
+        pix_fmt = AV_PIX_FMT_GRAY10;
         break;
     case 6161:
-        avctx->pix_fmt = AV_PIX_FMT_GRAY16BE;
+        pix_fmt = AV_PIX_FMT_GRAY16BE;
         break;
     case 6160:
-        avctx->pix_fmt = AV_PIX_FMT_GRAY16LE;
+        pix_fmt = AV_PIX_FMT_GRAY16LE;
         break;
     case 50161:
-        avctx->pix_fmt = AV_PIX_FMT_RGB48BE;
+        pix_fmt = AV_PIX_FMT_RGB48BE;
         break;
     case 50160:
-        avctx->pix_fmt = AV_PIX_FMT_RGB48LE;
+        pix_fmt = AV_PIX_FMT_RGB48LE;
         break;
     case 51161:
-        avctx->pix_fmt = AV_PIX_FMT_RGBA64BE;
+        pix_fmt = AV_PIX_FMT_RGBA64BE;
         break;
     case 51160:
-        avctx->pix_fmt = AV_PIX_FMT_RGBA64LE;
+        pix_fmt = AV_PIX_FMT_RGBA64LE;
         break;
     case 50320:
-        avctx->pix_fmt = AV_PIX_FMT_GBRPF32LE;
+        pix_fmt = AV_PIX_FMT_GBRPF32LE;
         break;
     case 50321:
-        avctx->pix_fmt = AV_PIX_FMT_GBRPF32BE;
+        pix_fmt = AV_PIX_FMT_GBRPF32BE;
         break;
     case 51320:
-        avctx->pix_fmt = AV_PIX_FMT_GBRAPF32LE;
+        pix_fmt = AV_PIX_FMT_GBRAPF32LE;
         break;
     case 51321:
-        avctx->pix_fmt = AV_PIX_FMT_GBRAPF32BE;
+        pix_fmt = AV_PIX_FMT_GBRAPF32BE;
         break;
     case 100081:
-        avctx->pix_fmt = AV_PIX_FMT_UYVY422;
+        pix_fmt = AV_PIX_FMT_UYVY422;
         break;
     case 102081:
-        avctx->pix_fmt = AV_PIX_FMT_YUV444P;
+        pix_fmt = AV_PIX_FMT_YUV444P;
         break;
     case 103081:
-        avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
+        pix_fmt = AV_PIX_FMT_YUVA444P;
         break;
     default:
         av_log(avctx, AV_LOG_ERROR, "Unsupported format %d\n",
-               1000 * descriptor + 10 * bits_per_color + endian);
+               1000 * s->descriptor + 10 * s->bits_per_component + s->big_endian);
         return AVERROR_PATCHWELCOME;
     }
 
+    if (pix_fmt != s->pix_fmt) {
+        s->pix_fmt = pix_fmt;
+
+        ret = get_pixel_format(avctx, pix_fmt);
+        if (ret < 0)
+            return ret;
+
+        avctx->pix_fmt = ret;
+    }
+
     ff_set_sar(avctx, avctx->sample_aspect_ratio);
 
     ret = ff_thread_get_buffer(avctx, p, 0);
@@ -605,7 +629,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
     for (i=0; i<AV_NUM_DATA_POINTERS; i++)
         ptr[i] = p->data[i];
 
-    switch (bits_per_color) {
+    switch (s->bits_per_component) {
     case 10:
         for (x = 0; x < avctx->height; x++) {
             uint16_t *dst[4] = {(uint16_t*)ptr[0],
@@ -616,20 +640,20 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
             for (y = 0; y < avctx->width; y++) {
                 if (elements >= 3)
                     *dst[2]++ = read10in32(&buf, &rgbBuffer,
-                                           &n_datum, endian, shift);
+                                           &n_datum, s->big_endian, shift);
                 if (elements == 1)
                     *dst[0]++ = read10in32_gray(&buf, &rgbBuffer,
-                                                &n_datum, endian, shift);
+                                                &n_datum, s->big_endian, shift);
                 else
                     *dst[0]++ = read10in32(&buf, &rgbBuffer,
-                                           &n_datum, endian, shift);
+                                           &n_datum, s->big_endian, shift);
                 if (elements >= 2)
                     *dst[1]++ = read10in32(&buf, &rgbBuffer,
-                                           &n_datum, endian, shift);
+                                           &n_datum, s->big_endian, shift);
                 if (elements == 4)
                     *dst[3]++ =
                     read10in32(&buf, &rgbBuffer,
-                               &n_datum, endian, shift);
+                               &n_datum, s->big_endian, shift);
             }
             if (!unpadded_10bit)
                 n_datum = 0;
@@ -647,24 +671,24 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
             for (y = 0; y < avctx->width; y++) {
                 if (packing) {
                     if (elements >= 3)
-                        *dst[2]++ = read16(&buf, endian) >> shift & 0xFFF;
-                    *dst[0]++ = read16(&buf, endian) >> shift & 0xFFF;
+                        *dst[2]++ = read16(&buf, s->big_endian) >> shift & 0xFFF;
+                    *dst[0]++ = read16(&buf, s->big_endian) >> shift & 0xFFF;
                     if (elements >= 2)
-                        *dst[1]++ = read16(&buf, endian) >> shift & 0xFFF;
+                        *dst[1]++ = read16(&buf, s->big_endian) >> shift & 0xFFF;
                     if (elements == 4)
-                        *dst[3]++ = read16(&buf, endian) >> shift & 0xFFF;
+                        *dst[3]++ = read16(&buf, s->big_endian) >> shift & 0xFFF;
                 } else {
                     if (elements >= 3)
                         *dst[2]++ = read12in32(&buf, &rgbBuffer,
-                                               &n_datum, endian);
+                                               &n_datum, s->big_endian);
                     *dst[0]++ = read12in32(&buf, &rgbBuffer,
-                                           &n_datum, endian);
+                                           &n_datum, s->big_endian);
                     if (elements >= 2)
                         *dst[1]++ = read12in32(&buf, &rgbBuffer,
-                                               &n_datum, endian);
+                                               &n_datum, s->big_endian);
                     if (elements == 4)
                         *dst[3]++ = read12in32(&buf, &rgbBuffer,
-                                               &n_datum, endian);
+                                               &n_datum, s->big_endian);
                 }
             }
             n_datum = 0;
@@ -735,11 +759,32 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
     return buf_size;
 }
 
+#if HAVE_THREADS
+static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
+{
+    DPXContext *ssrc = src->priv_data;
+    DPXContext *sdst = dst->priv_data;
+
+    sdst->pix_fmt = ssrc->pix_fmt;
+
+    return 0;
+}
+#endif
+
+static av_cold int decode_init(AVCodecContext *avctx)
+{
+    DPXContext *s = avctx->priv_data;
+    s->pix_fmt = AV_PIX_FMT_NONE;
+    return 0;
+}
+
 const FFCodec ff_dpx_decoder = {
     .p.name         = "dpx",
     CODEC_LONG_NAME("DPX (Digital Picture Exchange) image"),
+    .priv_data_size = sizeof(DPXContext),
     .p.type         = AVMEDIA_TYPE_VIDEO,
     .p.id           = AV_CODEC_ID_DPX,
+    .init           = decode_init,
     FF_CODEC_DECODE_CB(decode_frame),
     UPDATE_THREAD_CONTEXT(update_thread_context),
     .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
diff --git a/libavcodec/dpx.h b/libavcodec/dpx.h
index 800c651e5a..701208e09d 100644
--- a/libavcodec/dpx.h
+++ b/libavcodec/dpx.h
@@ -22,6 +22,8 @@
 #ifndef AVCODEC_DPX_H
 #define AVCODEC_DPX_H
 
+#include "libavutil/pixfmt.h"
+
 enum DPX_TRC {
     DPX_TRC_USER_DEFINED       = 0,
     DPX_TRC_PRINTING_DENSITY   = 1,
@@ -54,4 +56,14 @@ enum DPX_COL_SPEC {
     /* 12 = N/A */
 };
 
+typedef struct DPXContext {
+    enum AVPixelFormat pix_fmt;
+
+    int big_endian;
+    int bits_per_component;
+    int num_components;
+    int descriptor;
+    int planar;
+} DPXContext;
+
 #endif /* AVCODEC_DPX_H */
diff --git a/libavcodec/dpxenc.c b/libavcodec/dpxenc.c
index e136c7a249..08b5b330cb 100644
--- a/libavcodec/dpxenc.c
+++ b/libavcodec/dpxenc.c
@@ -28,14 +28,6 @@
 #include "version.h"
 #include "dpx.h"
 
-typedef struct DPXContext {
-    int big_endian;
-    int bits_per_component;
-    int num_components;
-    int descriptor;
-    int planar;
-} DPXContext;
-
 static av_cold int encode_init(AVCodecContext *avctx)
 {
     DPXContext *s = avctx->priv_data;
-- 
2.49.1


>From 32445c434380f1fcbcd9905d40b8430777dda1f2 Mon Sep 17 00:00:00 2001
From: Lynne <dev@lynne.ee>
Date: Wed, 29 Oct 2025 14:16:44 +0100
Subject: [PATCH 9/9] dpxdec: move data copying/unpacking into a separate
 function

---
 libavcodec/dpx.c | 340 ++++++++++++++++++++++++-----------------------
 libavcodec/dpx.h |   5 +
 2 files changed, 179 insertions(+), 166 deletions(-)

diff --git a/libavcodec/dpx.c b/libavcodec/dpx.c
index 35781c26af..5fd05b69bc 100644
--- a/libavcodec/dpx.c
+++ b/libavcodec/dpx.c
@@ -119,6 +119,145 @@ static uint16_t read12in32(const uint8_t **ptr, uint32_t *lbuf,
     }
 }
 
+static void read_frame_data(AVCodecContext *avctx, AVFrame *p, const uint8_t *buf)
+{
+    DPXContext *s = avctx->priv_data;
+
+    int i, x, y;
+    uint8_t *ptr[AV_NUM_DATA_POINTERS];
+    unsigned int rgbBuffer = 0;
+    int n_datum = 0;
+
+    for (i=0; i<AV_NUM_DATA_POINTERS; i++)
+        ptr[i] = p->data[i];
+
+    switch (s->bits_per_component) {
+    case 10:
+        for (x = 0; x < avctx->height; x++) {
+            uint16_t *dst[4] = {(uint16_t*)ptr[0],
+                                (uint16_t*)ptr[1],
+                                (uint16_t*)ptr[2],
+                                (uint16_t*)ptr[3]};
+            int shift = s->num_components > 1 ? s->packing == 1 ? 22 : 20 :
+                                                s->packing == 1 ?  2 :  0;
+            for (y = 0; y < avctx->width; y++) {
+                if (s->num_components >= 3)
+                    *dst[2]++ = read10in32(&buf, &rgbBuffer,
+                                           &n_datum, s->big_endian, shift);
+                if (s->num_components == 1)
+                    *dst[0]++ = read10in32_gray(&buf, &rgbBuffer,
+                                                &n_datum, s->big_endian, shift);
+                else
+                    *dst[0]++ = read10in32(&buf, &rgbBuffer,
+                                           &n_datum, s->big_endian, shift);
+                if (s->num_components >= 2)
+                    *dst[1]++ = read10in32(&buf, &rgbBuffer,
+                                           &n_datum, s->big_endian, shift);
+                if (s->num_components == 4)
+                    *dst[3]++ =
+                    read10in32(&buf, &rgbBuffer,
+                               &n_datum, s->big_endian, shift);
+            }
+            if (!s->unpadded_10bit)
+                n_datum = 0;
+            for (i = 0; i < s->num_components; i++)
+                ptr[i] += p->linesize[i];
+        }
+        break;
+    case 12:
+        for (x = 0; x < avctx->height; x++) {
+            uint16_t *dst[4] = {(uint16_t*)ptr[0],
+                                (uint16_t*)ptr[1],
+                                (uint16_t*)ptr[2],
+                                (uint16_t*)ptr[3]};
+            int shift = s->packing == 1 ? 4 : 0;
+            for (y = 0; y < avctx->width; y++) {
+                if (s->packing) {
+                    if (s->num_components >= 3)
+                        *dst[2]++ = read16(&buf, s->big_endian) >> shift & 0xFFF;
+                    *dst[0]++ = read16(&buf, s->big_endian) >> shift & 0xFFF;
+                    if (s->num_components >= 2)
+                        *dst[1]++ = read16(&buf, s->big_endian) >> shift & 0xFFF;
+                    if (s->num_components == 4)
+                        *dst[3]++ = read16(&buf, s->big_endian) >> shift & 0xFFF;
+                } else {
+                    if (s->num_components >= 3)
+                        *dst[2]++ = read12in32(&buf, &rgbBuffer,
+                                               &n_datum, s->big_endian);
+                    *dst[0]++ = read12in32(&buf, &rgbBuffer,
+                                           &n_datum, s->big_endian);
+                    if (s->num_components >= 2)
+                        *dst[1]++ = read12in32(&buf, &rgbBuffer,
+                                               &n_datum, s->big_endian);
+                    if (s->num_components == 4)
+                        *dst[3]++ = read12in32(&buf, &rgbBuffer,
+                                               &n_datum, s->big_endian);
+                }
+            }
+            n_datum = 0;
+            for (i = 0; i < s->num_components; i++)
+                ptr[i] += p->linesize[i];
+            // Jump to next aligned position
+            buf += s->need_align;
+        }
+        break;
+    case 32:
+        if (s->num_components == 1) {
+            av_image_copy_plane(ptr[0], p->linesize[0],
+                                buf, s->stride,
+                                s->num_components * avctx->width * 4, avctx->height);
+        } else {
+            for (y = 0; y < avctx->height; y++) {
+                ptr[0] = p->data[0] + y * p->linesize[0];
+                ptr[1] = p->data[1] + y * p->linesize[1];
+                ptr[2] = p->data[2] + y * p->linesize[2];
+                ptr[3] = p->data[3] + y * p->linesize[3];
+                for (x = 0; x < avctx->width; x++) {
+                    AV_WN32(ptr[2], AV_RN32(buf));
+                    AV_WN32(ptr[0], AV_RN32(buf + 4));
+                    AV_WN32(ptr[1], AV_RN32(buf + 8));
+                    if (avctx->pix_fmt == AV_PIX_FMT_GBRAPF32BE ||
+                        avctx->pix_fmt == AV_PIX_FMT_GBRAPF32LE) {
+                        AV_WN32(ptr[3], AV_RN32(buf + 12));
+                        buf += 4;
+                        ptr[3] += 4;
+                    }
+
+                    buf += 12;
+                    ptr[2] += 4;
+                    ptr[0] += 4;
+                    ptr[1] += 4;
+                }
+            }
+        }
+        break;
+    case 16:
+        s->num_components *= 2;
+    case 8:
+        if (   avctx->pix_fmt == AV_PIX_FMT_YUVA444P
+            || avctx->pix_fmt == AV_PIX_FMT_YUV444P) {
+            for (x = 0; x < avctx->height; x++) {
+                ptr[0] = p->data[0] + x * p->linesize[0];
+                ptr[1] = p->data[1] + x * p->linesize[1];
+                ptr[2] = p->data[2] + x * p->linesize[2];
+                ptr[3] = p->data[3] + x * p->linesize[3];
+                for (y = 0; y < avctx->width; y++) {
+                    *ptr[1]++ = *buf++;
+                    *ptr[0]++ = *buf++;
+                    *ptr[2]++ = *buf++;
+                    if (avctx->pix_fmt == AV_PIX_FMT_YUVA444P)
+                        *ptr[3]++ = *buf++;
+                }
+            }
+        } else {
+        av_image_copy_plane(ptr[0], p->linesize[0],
+                            buf, s->stride,
+                            s->num_components * avctx->width, avctx->height);
+        }
+        break;
+    }
+}
+
 static enum AVPixelFormat get_pixel_format(AVCodecContext *avctx,
                                            enum AVPixelFormat pix_fmt)
 {
@@ -137,22 +276,17 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
 
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
-    uint8_t *ptr[AV_NUM_DATA_POINTERS];
     uint32_t header_version, version = 0;
     char creator[101] = { 0 };
     char input_device[33] = { 0 };
 
     unsigned int offset;
     int magic_num;
-    int x, y, stride, i, j, ret;
-    int w, h, elements, packing;
+    int i, j, ret;
+    int w, h;
     int yuv, color_trc, color_spec;
-    int encoding, need_align = 0, unpadded_10bit = 0;
     enum AVPixelFormat pix_fmt;
 
-    unsigned int rgbBuffer = 0;
-    int n_datum = 0;
-
     if (avpkt->size <= 1634) {
         av_log(avctx, AV_LOG_ERROR, "Packet too small for DPX header\n");
         return AVERROR_INVALIDDATA;
@@ -215,11 +349,11 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
     avctx->bits_per_raw_sample =
     s->bits_per_component = buf[0];
     buf++;
-    packing = read16(&buf, s->big_endian);
-    encoding = read16(&buf, s->big_endian);
+    s->packing = read16(&buf, s->big_endian);
+    s->encoding = read16(&buf, s->big_endian);
 
-    if (encoding) {
-        avpriv_report_missing_feature(avctx, "Encoding %d", encoding);
+    if (s->encoding) {
+        avpriv_report_missing_feature(avctx, "Encoding %d", s->encoding);
         return AVERROR_PATCHWELCOME;
     }
 
@@ -318,28 +452,28 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
     case 3:  // B
     case 4:  // A
     case 6:  // Y
-        elements = 1;
+        s->num_components = 1;
         yuv = 1;
         break;
     case 50: // RGB
-        elements = 3;
+        s->num_components = 3;
         yuv = 0;
         break;
     case 52: // ABGR
     case 51: // RGBA
-        elements = 4;
+        s->num_components = 4;
         yuv = 0;
         break;
     case 100: // UYVY422
-        elements = 2;
+        s->num_components = 2;
         yuv = 1;
         break;
     case 102: // UYV444
-        elements = 3;
+        s->num_components = 3;
         yuv = 1;
         break;
     case 103: // UYVA4444
-        elements = 4;
+        s->num_components = 4;
         yuv = 1;
         break;
     default:
@@ -349,34 +483,34 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
 
     switch (s->bits_per_component) {
     case 8:
-        stride = avctx->width * elements;
+        s->stride = avctx->width * s->num_components;
         break;
     case 10:
-        if (!packing) {
+        if (!s->packing) {
             av_log(avctx, AV_LOG_ERROR, "Packing to 32bit required\n");
             return -1;
         }
-        stride = (avctx->width * elements + 2) / 3 * 4;
+        s->stride = (avctx->width * s->num_components + 2) / 3 * 4;
         break;
     case 12:
-        stride = avctx->width * elements;
-        if (packing) {
-            stride *= 2;
+        s->stride = avctx->width * s->num_components;
+        if (s->packing) {
+            s->stride *= 2;
         } else {
-            stride *= 3;
-            if (stride % 8) {
-                stride /= 8;
-                stride++;
-                stride *= 8;
+            s->stride *= 3;
+            if (s->stride % 8) {
+                s->stride /= 8;
+                s->stride++;
+                s->stride *= 8;
             }
-            stride /= 2;
+            s->stride /= 2;
         }
         break;
     case 16:
-        stride = 2 * avctx->width * elements;
+        s->stride = 2 * avctx->width * s->num_components;
         break;
     case 32:
-        stride = 4 * avctx->width * elements;
+        s->stride = 4 * avctx->width * s->num_components;
         break;
     case 1:
     case 64:
@@ -474,7 +608,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
     if (!memcmp(input_device, "Scanity", 7) ||
         !memcmp(creator, "Lasergraphics Inc.", 18)) {
         if (s->bits_per_component == 10)
-            unpadded_10bit = 1;
+            s->unpadded_10bit = 1;
     }
 
     // Table 3c: Runs will always break at scan line boundaries. Packing
@@ -482,21 +616,21 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
     // Unfortunately, the encoder produced invalid files, so attempt
     // to detect it
     // Also handle special case with unpadded content
-    need_align = FFALIGN(stride, 4);
-    if (need_align*avctx->height + (int64_t)offset > avpkt->size &&
-        (!unpadded_10bit || (avctx->width * avctx->height * elements + 2) / 3 * 4 + (int64_t)offset > avpkt->size)) {
+    s->need_align = FFALIGN(s->stride, 4);
+    if (s->need_align*avctx->height + (int64_t)offset > avpkt->size &&
+        (!s->unpadded_10bit || (avctx->width * avctx->height * s->num_components + 2) / 3 * 4 + (int64_t)offset > avpkt->size)) {
         // Alignment seems unappliable, try without
-        if (stride*avctx->height + (int64_t)offset > avpkt->size || unpadded_10bit) {
+        if (s->stride*avctx->height + (int64_t)offset > avpkt->size || s->unpadded_10bit) {
             av_log(avctx, AV_LOG_ERROR, "Overread buffer. Invalid header?\n");
             return AVERROR_INVALIDDATA;
         } else {
             av_log(avctx, AV_LOG_INFO, "Decoding DPX without scanline "
                    "alignment.\n");
-            need_align = 0;
+            s->need_align = 0;
         }
     } else {
-        need_align -= stride;
-        stride = FFALIGN(stride, 4);
+        s->need_align -= s->stride;
+        s->stride = FFALIGN(s->stride, 4);
     }
 
     switch (1000 * s->descriptor + 10 * s->bits_per_component + s->big_endian) {
@@ -626,133 +760,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *p,
     // Move pointer to offset from start of file
     buf =  avpkt->data + offset;
 
-    for (i=0; i<AV_NUM_DATA_POINTERS; i++)
-        ptr[i] = p->data[i];
-
-    switch (s->bits_per_component) {
-    case 10:
-        for (x = 0; x < avctx->height; x++) {
-            uint16_t *dst[4] = {(uint16_t*)ptr[0],
-                                (uint16_t*)ptr[1],
-                                (uint16_t*)ptr[2],
-                                (uint16_t*)ptr[3]};
-            int shift = elements > 1 ? packing == 1 ? 22 : 20 : packing == 1 ? 2 : 0;
-            for (y = 0; y < avctx->width; y++) {
-                if (elements >= 3)
-                    *dst[2]++ = read10in32(&buf, &rgbBuffer,
-                                           &n_datum, s->big_endian, shift);
-                if (elements == 1)
-                    *dst[0]++ = read10in32_gray(&buf, &rgbBuffer,
-                                                &n_datum, s->big_endian, shift);
-                else
-                    *dst[0]++ = read10in32(&buf, &rgbBuffer,
-                                           &n_datum, s->big_endian, shift);
-                if (elements >= 2)
-                    *dst[1]++ = read10in32(&buf, &rgbBuffer,
-                                           &n_datum, s->big_endian, shift);
-                if (elements == 4)
-                    *dst[3]++ =
-                    read10in32(&buf, &rgbBuffer,
-                               &n_datum, s->big_endian, shift);
-            }
-            if (!unpadded_10bit)
-                n_datum = 0;
-            for (i = 0; i < elements; i++)
-                ptr[i] += p->linesize[i];
-        }
-        break;
-    case 12:
-        for (x = 0; x < avctx->height; x++) {
-            uint16_t *dst[4] = {(uint16_t*)ptr[0],
-                                (uint16_t*)ptr[1],
-                                (uint16_t*)ptr[2],
-                                (uint16_t*)ptr[3]};
-            int shift = packing == 1 ? 4 : 0;
-            for (y = 0; y < avctx->width; y++) {
-                if (packing) {
-                    if (elements >= 3)
-                        *dst[2]++ = read16(&buf, s->big_endian) >> shift & 0xFFF;
-                    *dst[0]++ = read16(&buf, s->big_endian) >> shift & 0xFFF;
-                    if (elements >= 2)
-                        *dst[1]++ = read16(&buf, s->big_endian) >> shift & 0xFFF;
-                    if (elements == 4)
-                        *dst[3]++ = read16(&buf, s->big_endian) >> shift & 0xFFF;
-                } else {
-                    if (elements >= 3)
-                        *dst[2]++ = read12in32(&buf, &rgbBuffer,
-                                               &n_datum, s->big_endian);
-                    *dst[0]++ = read12in32(&buf, &rgbBuffer,
-                                           &n_datum, s->big_endian);
-                    if (elements >= 2)
-                        *dst[1]++ = read12in32(&buf, &rgbBuffer,
-                                               &n_datum, s->big_endian);
-                    if (elements == 4)
-                        *dst[3]++ = read12in32(&buf, &rgbBuffer,
-                                               &n_datum, s->big_endian);
-                }
-            }
-            n_datum = 0;
-            for (i = 0; i < elements; i++)
-                ptr[i] += p->linesize[i];
-            // Jump to next aligned position
-            buf += need_align;
-        }
-        break;
-    case 32:
-        if (elements == 1) {
-            av_image_copy_plane(ptr[0], p->linesize[0],
-                                buf, stride,
-                                elements * avctx->width * 4, avctx->height);
-        } else {
-            for (y = 0; y < avctx->height; y++) {
-                ptr[0] = p->data[0] + y * p->linesize[0];
-                ptr[1] = p->data[1] + y * p->linesize[1];
-                ptr[2] = p->data[2] + y * p->linesize[2];
-                ptr[3] = p->data[3] + y * p->linesize[3];
-                for (x = 0; x < avctx->width; x++) {
-                    AV_WN32(ptr[2], AV_RN32(buf));
-                    AV_WN32(ptr[0], AV_RN32(buf + 4));
-                    AV_WN32(ptr[1], AV_RN32(buf + 8));
-                    if (avctx->pix_fmt == AV_PIX_FMT_GBRAPF32BE ||
-                        avctx->pix_fmt == AV_PIX_FMT_GBRAPF32LE) {
-                        AV_WN32(ptr[3], AV_RN32(buf + 12));
-                        buf += 4;
-                        ptr[3] += 4;
-                    }
-
-                    buf += 12;
-                    ptr[2] += 4;
-                    ptr[0] += 4;
-                    ptr[1] += 4;
-                }
-            }
-        }
-        break;
-    case 16:
-        elements *= 2;
-    case 8:
-        if (   avctx->pix_fmt == AV_PIX_FMT_YUVA444P
-            || avctx->pix_fmt == AV_PIX_FMT_YUV444P) {
-            for (x = 0; x < avctx->height; x++) {
-                ptr[0] = p->data[0] + x * p->linesize[0];
-                ptr[1] = p->data[1] + x * p->linesize[1];
-                ptr[2] = p->data[2] + x * p->linesize[2];
-                ptr[3] = p->data[3] + x * p->linesize[3];
-                for (y = 0; y < avctx->width; y++) {
-                    *ptr[1]++ = *buf++;
-                    *ptr[0]++ = *buf++;
-                    *ptr[2]++ = *buf++;
-                    if (avctx->pix_fmt == AV_PIX_FMT_YUVA444P)
-                        *ptr[3]++ = *buf++;
-                }
-            }
-        } else {
-        av_image_copy_plane(ptr[0], p->linesize[0],
-                            buf, stride,
-                            elements * avctx->width, avctx->height);
-        }
-        break;
-    }
+    read_frame_data(avctx, p, buf);
 
     *got_frame = 1;
 
diff --git a/libavcodec/dpx.h b/libavcodec/dpx.h
index 701208e09d..e92a9a6f4c 100644
--- a/libavcodec/dpx.h
+++ b/libavcodec/dpx.h
@@ -58,6 +58,11 @@ enum DPX_COL_SPEC {
 
 typedef struct DPXContext {
     enum AVPixelFormat pix_fmt;
+    int unpadded_10bit;
+    int need_align;
+    int encoding;
+    int packing;
+    int stride;
 
     int big_endian;
     int bits_per_component;
-- 
2.49.1

_______________________________________________
ffmpeg-devel mailing list -- ffmpeg-devel@ffmpeg.org
To unsubscribe send an email to ffmpeg-devel-leave@ffmpeg.org

                 reply	other threads:[~2025-11-06 21:29 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=176246452880.25.7236117879275360565@2cb04c0e5124 \
    --to=ffmpeg-devel@ffmpeg.org \
    --cc=code@ffmpeg.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git