From: James Almer <jamrial@gmail.com> To: ffmpeg-devel@ffmpeg.org Subject: [FFmpeg-devel] [PATCH v3 3/3] avcodec/exr: use luma+alpha float pixel formats Date: Thu, 6 Mar 2025 10:21:33 -0300 Message-ID: <20250306132133.1084-1-jamrial@gmail.com> (raw) In-Reply-To: <20250305204413.36950-3-jamrial@gmail.com> Signed-off-by: James Almer <jamrial@gmail.com> --- libavcodec/exr.c | 70 +++++++------------- tests/ref/fate/exr-ya-scanline-zip-half-12x8 | 2 +- 2 files changed, 25 insertions(+), 47 deletions(-) diff --git a/libavcodec/exr.c b/libavcodec/exr.c index b25e9ef397..4482f104d0 100644 --- a/libavcodec/exr.c +++ b/libavcodec/exr.c @@ -1352,77 +1352,61 @@ static int decode_block(AVCodecContext *avctx, void *tdata, data_yoffset = FFABS(FFMIN(0, line)); data_window_offset = (data_yoffset * td->channel_line_size) + data_xoffset; + if (s->channel_offsets[3] >= 0) + channel_buffer[3] = src + (td->xsize * s->channel_offsets[3]) + data_window_offset; if (!s->is_luma) { channel_buffer[0] = src + (td->xsize * s->channel_offsets[0]) + data_window_offset; channel_buffer[1] = src + (td->xsize * s->channel_offsets[1]) + data_window_offset; channel_buffer[2] = src + (td->xsize * s->channel_offsets[2]) + data_window_offset; rgb_channel_count = 3; - } else { /* put y data in the first channel_buffer */ + } else { /* put y data in the first channel_buffer and if needed, alpha in the second */ channel_buffer[0] = src + (td->xsize * s->channel_offsets[1]) + data_window_offset; + if (!(s->desc->flags & AV_PIX_FMT_FLAG_PLANAR)) + channel_buffer[1] = channel_buffer[3]; rgb_channel_count = 1; } - if (s->channel_offsets[3] >= 0) - channel_buffer[3] = src + (td->xsize * s->channel_offsets[3]) + data_window_offset; - if (s->desc->flags & AV_PIX_FMT_FLAG_PLANAR || s->desc->nb_components == 1 ) { - /* todo: change this when a floating point pixel format with luma with alpha is implemented */ - int channel_count = s->channel_offsets[3] >= 0 ? 4 : rgb_channel_count; - if (s->is_luma) { - channel_buffer[1] = channel_buffer[0]; - channel_buffer[2] = channel_buffer[0]; - } - - for (c = 0; c < channel_count; c++) { + if (s->desc->flags & AV_PIX_FMT_FLAG_FLOAT) { + for (c = 0; c < s->desc->nb_components; c++) { int plane = s->desc->comp[c].plane; - ptr = p->data[plane] + window_ymin * p->linesize[plane] + (window_xmin * step); + ptr = p->data[plane] + window_ymin * p->linesize[plane] + (window_xmin * step) + s->desc->comp[c].offset; for (i = 0; i < ysize; i++, ptr += p->linesize[plane]) { - const uint8_t *src; + const uint8_t *src = channel_buffer[c]; + uint8_t *ptr_x = ptr + window_xoffset * step; + + // Zero out the start if xmin is not 0 + if (s->desc->flags & AV_PIX_FMT_FLAG_PLANAR || !c) + memset(ptr, 0, bxmin); if (s->pixel_type == EXR_FLOAT || s->compression == EXR_DWAA || s->compression == EXR_DWAB) { // 32-bit - uint8_t *ptr_x = ptr; - - src = channel_buffer[c]; - - // Zero out the start if xmin is not 0 - memset(ptr_x, 0, bxmin); - ptr_x += 4 * window_xoffset; - - if (trc_func && c < 3) { - for (int x = 0; x < xsize; x++, ptr_x += 4) { + if (trc_func && (!c || (c < 3 && s->desc->flags & AV_PIX_FMT_FLAG_PLANAR))) { + for (int x = 0; x < xsize; x++, ptr_x += step) { float f = av_int2float(bytestream_get_le32(&src)); AV_WN32A(ptr_x, av_float2int(trc_func(f))); } } else if (one_gamma != 1.f) { - for (int x = 0; x < xsize; x++, ptr_x += 4) { + for (int x = 0; x < xsize; x++, ptr_x += step) { float f = av_int2float(bytestream_get_le32(&src)); if (f > 0.0f && c < 3) /* avoid negative values */ f = powf(f, one_gamma); AV_WN32A(ptr_x, av_float2int(f)); } } else { - for (int x = 0; x < xsize; x++, ptr_x += 4) + for (int x = 0; x < xsize; x++, ptr_x += step) AV_WN32A(ptr_x, bytestream_get_le32(&src)); } - memset(ptr_x, 0, axmax); } else if (s->pixel_type == EXR_HALF) { - src = channel_buffer[c]; - - // Zero out the start if xmin is not 0 - memset(ptr, 0, bxmin); - // 16-bit - for (x = window_xoffset; x < xsize + window_xoffset; x++) { - int v = bytestream_get_le16(&src); - AV_WN16(ptr + x * sizeof(uint16_t), v); - } - memset(ptr + x * sizeof(uint16_t), 0, axmax); + for (int x = 0; x < xsize; x++, ptr_x += step) + AV_WN16A(ptr_x, bytestream_get_le16(&src)); } // Zero out the end if xmax+1 is not w + memset(ptr_x, 0, axmax); channel_buffer[c] += td->channel_line_size; } } @@ -2054,8 +2038,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *picture, if (!s->is_luma) { avctx->pix_fmt = AV_PIX_FMT_GBRAPF16; } else { - /* todo: change this when a floating point pixel format with luma with alpha is implemented */ - avctx->pix_fmt = AV_PIX_FMT_GBRAPF16; + avctx->pix_fmt = AV_PIX_FMT_YAF16; } } else { if (!s->is_luma) { @@ -2071,8 +2054,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *picture, if (!s->is_luma) { avctx->pix_fmt = AV_PIX_FMT_GBRAPF32; } else { - /* todo: change this when a floating point pixel format with luma with alpha is implemented */ - avctx->pix_fmt = AV_PIX_FMT_GBRAPF32; + avctx->pix_fmt = AV_PIX_FMT_YAF32; } } else { if (!s->is_luma) { @@ -2151,11 +2133,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *picture, if (!s->desc) return AVERROR_INVALIDDATA; - if (s->desc->flags & AV_PIX_FMT_FLAG_PLANAR) { - planes = s->desc->nb_components; - } else { - planes = 1; - } + planes = av_pix_fmt_count_planes(avctx->pix_fmt); out_line_size = avctx->width * s->desc->comp[0].step; if (s->is_tile) { diff --git a/tests/ref/fate/exr-ya-scanline-zip-half-12x8 b/tests/ref/fate/exr-ya-scanline-zip-half-12x8 index 97ea715962..6c07f19846 100644 --- a/tests/ref/fate/exr-ya-scanline-zip-half-12x8 +++ b/tests/ref/fate/exr-ya-scanline-zip-half-12x8 @@ -3,4 +3,4 @@ #codec_id 0: rawvideo #dimensions 0: 12x8 #sar 0: 1/1 -0, 0, 0, 1, 1536, 0x2b457bd2 +0, 0, 0, 1, 1536, 0xa33f5f69 -- 2.48.1 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
prev parent reply other threads:[~2025-03-06 13:22 UTC|newest] Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top 2025-03-05 20:44 [FFmpeg-devel] [PATCH 1/3] avutil/pixfmt: add YAF16 and YAF32 " James Almer 2025-03-05 20:44 ` [FFmpeg-devel] [PATCH 2/3] swscale/input: add support for YAF16 and YAF32 James Almer 2025-03-05 20:44 ` [FFmpeg-devel] [PATCH 3/3] avcodec/exr: use luma+alpha float pixel formats James Almer 2025-03-05 21:33 ` [FFmpeg-devel] [PATCH v2 " James Almer 2025-03-05 21:41 ` Andreas Rheinhardt 2025-03-05 22:03 ` James Almer 2025-03-06 13:21 ` James Almer [this message]
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20250306132133.1084-1-jamrial@gmail.com \ --to=jamrial@gmail.com \ --cc=ffmpeg-devel@ffmpeg.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel This inbox may be cloned and mirrored by anyone: git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \ ffmpegdev@gitmailbox.com public-inbox-index ffmpegdev Example config snippet for mirrors. AGPL code for this site: git clone https://public-inbox.org/public-inbox.git