* [FFmpeg-devel] [PATCH 4/6] avcodec/threadframe: Add ff_thread_(get|release)_ext_buffer()
2022-02-07 0:37 [FFmpeg-devel] [PATCH 1/6] avcodec/pthread_frame: Don't prematurily presume frame-threading Andreas Rheinhardt
2022-02-07 0:58 ` [FFmpeg-devel] [PATCH 2/6] avcodec/hapdec: Remove always-false check Andreas Rheinhardt
2022-02-07 0:58 ` [FFmpeg-devel] [PATCH 3/6] avcodec/thread: Move ff_thread_(await|report)_progress to new header Andreas Rheinhardt
@ 2022-02-07 0:58 ` Andreas Rheinhardt
2022-02-07 0:58 ` [FFmpeg-devel] [PATCH 5/6] avcodec/thread: Don't use ThreadFrame when unnecessary Andreas Rheinhardt
2022-02-07 0:58 ` [FFmpeg-devel] [PATCH 6/6] avcodec/pthread_frame: Properly unref frame in case of decoding failure Andreas Rheinhardt
4 siblings, 0 replies; 6+ messages in thread
From: Andreas Rheinhardt @ 2022-02-07 0:58 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: Andreas Rheinhardt
These will be used by the codecs that need allocated progress
and is in preparation for no longer using ThreadFrame by the codecs
that don't.
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
---
libavcodec/ffv1.c | 5 +++--
libavcodec/ffv1dec.c | 9 +++++----
libavcodec/h264_picture.c | 4 ++--
libavcodec/h264_slice.c | 6 +++---
libavcodec/hevc_refs.c | 6 +++---
libavcodec/mimic.c | 12 ++++++------
libavcodec/mpegpicture.c | 7 ++++---
libavcodec/pngdec.c | 17 +++++++++--------
libavcodec/pthread_frame.c | 10 ++++++++++
libavcodec/threadframe.h | 24 ++++++++++++++++++++++++
libavcodec/utils.c | 15 ++++++++++++++-
libavcodec/vp3.c | 23 ++++++++++++-----------
libavcodec/vp8.c | 8 ++++----
libavcodec/vp9.c | 18 +++++++++---------
libavcodec/wavpack.c | 18 ++++++++++--------
15 files changed, 118 insertions(+), 64 deletions(-)
diff --git a/libavcodec/ffv1.c b/libavcodec/ffv1.c
index d8a0c39254..6bde984815 100644
--- a/libavcodec/ffv1.c
+++ b/libavcodec/ffv1.c
@@ -37,6 +37,7 @@
#include "rangecoder.h"
#include "mathops.h"
#include "ffv1.h"
+#include "threadframe.h"
av_cold int ff_ffv1_common_init(AVCodecContext *avctx)
{
@@ -204,11 +205,11 @@ av_cold int ff_ffv1_close(AVCodecContext *avctx)
int i, j;
if (s->picture.f)
- ff_thread_release_buffer(avctx, &s->picture);
+ ff_thread_release_ext_buffer(avctx, &s->picture);
av_frame_free(&s->picture.f);
if (s->last_picture.f)
- ff_thread_release_buffer(avctx, &s->last_picture);
+ ff_thread_release_ext_buffer(avctx, &s->last_picture);
av_frame_free(&s->last_picture.f);
for (j = 0; j < s->max_slice_count; j++) {
diff --git a/libavcodec/ffv1dec.c b/libavcodec/ffv1dec.c
index 3f90a8fb55..45ecf3152e 100644
--- a/libavcodec/ffv1dec.c
+++ b/libavcodec/ffv1dec.c
@@ -842,7 +842,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
AVFrame *p;
if (f->last_picture.f)
- ff_thread_release_buffer(avctx, &f->last_picture);
+ ff_thread_release_ext_buffer(avctx, &f->last_picture);
FFSWAP(ThreadFrame, f->picture, f->last_picture);
f->cur = p = f->picture.f;
@@ -874,7 +874,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
p->key_frame = 0;
}
- if ((ret = ff_thread_get_buffer(avctx, &f->picture, AV_GET_BUFFER_FLAG_REF)) < 0)
+ ret = ff_thread_get_ext_buffer(avctx, &f->picture, AV_GET_BUFFER_FLAG_REF);
+ if (ret < 0)
return ret;
if (avctx->debug & FF_DEBUG_PICT_INFO)
@@ -966,7 +967,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
ff_thread_report_progress(&f->picture, INT_MAX, 0);
if (f->last_picture.f)
- ff_thread_release_buffer(avctx, &f->last_picture);
+ ff_thread_release_ext_buffer(avctx, &f->last_picture);
if ((ret = av_frame_ref(data, f->picture.f)) < 0)
return ret;
@@ -1037,7 +1038,7 @@ static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
av_assert1(fdst->max_slice_count == fsrc->max_slice_count);
- ff_thread_release_buffer(dst, &fdst->picture);
+ ff_thread_release_ext_buffer(dst, &fdst->picture);
if (fsrc->picture.f->data[0]) {
if ((ret = ff_thread_ref_frame(&fdst->picture, &fsrc->picture)) < 0)
return ret;
diff --git a/libavcodec/h264_picture.c b/libavcodec/h264_picture.c
index 50a21c260f..c7f5b64b99 100644
--- a/libavcodec/h264_picture.c
+++ b/libavcodec/h264_picture.c
@@ -40,7 +40,7 @@ void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
if (!pic->f || !pic->f->buf[0])
return;
- ff_thread_release_buffer(h->avctx, &pic->tf);
+ ff_thread_release_ext_buffer(h->avctx, &pic->tf);
ff_thread_release_buffer(h->avctx, &pic->tf_grain);
av_buffer_unref(&pic->hwaccel_priv_buf);
@@ -155,7 +155,7 @@ int ff_h264_replace_picture(H264Context *h, H264Picture *dst, const H264Picture
av_assert0(src->tf.f == src->f);
dst->tf.f = dst->f;
- ff_thread_release_buffer(h->avctx, &dst->tf);
+ ff_thread_release_ext_buffer(h->avctx, &dst->tf);
ret = ff_thread_ref_frame(&dst->tf, &src->tf);
if (ret < 0)
goto fail;
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
index 600a109889..32d2e090d5 100644
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@ -191,8 +191,8 @@ static int alloc_picture(H264Context *h, H264Picture *pic)
av_assert0(!pic->f->data[0]);
pic->tf.f = pic->f;
- ret = ff_thread_get_buffer(h->avctx, &pic->tf, pic->reference ?
- AV_GET_BUFFER_FLAG_REF : 0);
+ ret = ff_thread_get_ext_buffer(h->avctx, &pic->tf,
+ pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
if (ret < 0)
goto fail;
@@ -1699,7 +1699,7 @@ static int h264_field_start(H264Context *h, const H264SliceContext *sl,
ff_thread_await_progress(&prev->tf, INT_MAX, 0);
if (prev->field_picture)
ff_thread_await_progress(&prev->tf, INT_MAX, 1);
- ff_thread_release_buffer(h->avctx, &h->short_ref[0]->tf);
+ ff_thread_release_ext_buffer(h->avctx, &h->short_ref[0]->tf);
h->short_ref[0]->tf.f = h->short_ref[0]->f;
ret = ff_thread_ref_frame(&h->short_ref[0]->tf, &prev->tf);
if (ret < 0)
diff --git a/libavcodec/hevc_refs.c b/libavcodec/hevc_refs.c
index d0a7888e21..0dd456bb92 100644
--- a/libavcodec/hevc_refs.c
+++ b/libavcodec/hevc_refs.c
@@ -38,7 +38,7 @@ void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
frame->flags &= ~flags;
if (!frame->flags) {
- ff_thread_release_buffer(s->avctx, &frame->tf);
+ ff_thread_release_ext_buffer(s->avctx, &frame->tf);
ff_thread_release_buffer(s->avctx, &frame->tf_grain);
frame->needs_fg = 0;
@@ -90,8 +90,8 @@ static HEVCFrame *alloc_frame(HEVCContext *s)
if (frame->frame->buf[0])
continue;
- ret = ff_thread_get_buffer(s->avctx, &frame->tf,
- AV_GET_BUFFER_FLAG_REF);
+ ret = ff_thread_get_ext_buffer(s->avctx, &frame->tf,
+ AV_GET_BUFFER_FLAG_REF);
if (ret < 0)
return NULL;
diff --git a/libavcodec/mimic.c b/libavcodec/mimic.c
index 6a51da5748..d3343b5da7 100644
--- a/libavcodec/mimic.c
+++ b/libavcodec/mimic.c
@@ -112,7 +112,7 @@ static av_cold int mimic_decode_end(AVCodecContext *avctx)
for (i = 0; i < FF_ARRAY_ELEMS(ctx->frames); i++) {
if (ctx->frames[i].f)
- ff_thread_release_buffer(avctx, &ctx->frames[i]);
+ ff_thread_release_ext_buffer(avctx, &ctx->frames[i]);
av_frame_free(&ctx->frames[i].f);
}
@@ -164,7 +164,7 @@ static int mimic_decode_update_thread_context(AVCodecContext *avctx, const AVCod
dst->prev_index = src->next_prev_index;
for (i = 0; i < FF_ARRAY_ELEMS(dst->frames); i++) {
- ff_thread_release_buffer(avctx, &dst->frames[i]);
+ ff_thread_release_ext_buffer(avctx, &dst->frames[i]);
if (i != src->next_cur_index && src->frames[i].f->data[0]) {
ret = ff_thread_ref_frame(&dst->frames[i], &src->frames[i]);
if (ret < 0)
@@ -395,11 +395,11 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR_INVALIDDATA;
}
- ff_thread_release_buffer(avctx, &ctx->frames[ctx->cur_index]);
+ ff_thread_release_ext_buffer(avctx, &ctx->frames[ctx->cur_index]);
ctx->frames[ctx->cur_index].f->pict_type = is_pframe ? AV_PICTURE_TYPE_P :
AV_PICTURE_TYPE_I;
- if ((res = ff_thread_get_buffer(avctx, &ctx->frames[ctx->cur_index],
- AV_GET_BUFFER_FLAG_REF)) < 0)
+ if ((res = ff_thread_get_ext_buffer(avctx, &ctx->frames[ctx->cur_index],
+ AV_GET_BUFFER_FLAG_REF)) < 0)
return res;
ctx->next_prev_index = ctx->cur_index;
@@ -420,7 +420,7 @@ static int mimic_decode_frame(AVCodecContext *avctx, void *data,
ff_thread_report_progress(&ctx->frames[ctx->cur_index], INT_MAX, 0);
if (res < 0) {
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
- ff_thread_release_buffer(avctx, &ctx->frames[ctx->cur_index]);
+ ff_thread_release_ext_buffer(avctx, &ctx->frames[ctx->cur_index]);
return res;
}
diff --git a/libavcodec/mpegpicture.c b/libavcodec/mpegpicture.c
index 27e497c404..be6e6967e0 100644
--- a/libavcodec/mpegpicture.c
+++ b/libavcodec/mpegpicture.c
@@ -29,6 +29,7 @@
#include "motion_est.h"
#include "mpegpicture.h"
#include "mpegutils.h"
+#include "threadframe.h"
static void av_noinline free_picture_tables(Picture *pic)
{
@@ -130,8 +131,8 @@ static int alloc_frame_buffer(AVCodecContext *avctx, Picture *pic,
pic->f->height = avctx->height + 2 * EDGE_WIDTH;
}
- r = ff_thread_get_buffer(avctx, &pic->tf,
- pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
+ r = ff_thread_get_ext_buffer(avctx, &pic->tf,
+ pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
} else {
pic->f->width = avctx->width;
pic->f->height = avctx->height;
@@ -321,7 +322,7 @@ void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
if (avctx->codec_id != AV_CODEC_ID_WMV3IMAGE &&
avctx->codec_id != AV_CODEC_ID_VC1IMAGE &&
avctx->codec_id != AV_CODEC_ID_MSS2)
- ff_thread_release_buffer(avctx, &pic->tf);
+ ff_thread_release_ext_buffer(avctx, &pic->tf);
else if (pic->f)
av_frame_unref(pic->f);
diff --git a/libavcodec/pngdec.c b/libavcodec/pngdec.c
index 3b12f4057a..7571def907 100644
--- a/libavcodec/pngdec.c
+++ b/libavcodec/pngdec.c
@@ -721,8 +721,9 @@ static int decode_idat_chunk(AVCodecContext *avctx, PNGDecContext *s,
s->bpp += byte_depth;
}
- ff_thread_release_buffer(avctx, &s->picture);
- if ((ret = ff_thread_get_buffer(avctx, &s->picture, AV_GET_BUFFER_FLAG_REF)) < 0)
+ ff_thread_release_ext_buffer(avctx, &s->picture);
+ if ((ret = ff_thread_get_ext_buffer(avctx, &s->picture,
+ AV_GET_BUFFER_FLAG_REF)) < 0)
return ret;
p->pict_type = AV_PICTURE_TYPE_I;
@@ -1560,7 +1561,7 @@ static int decode_frame_png(AVCodecContext *avctx,
goto the_end;
if (!(avctx->active_thread_type & FF_THREAD_FRAME)) {
- ff_thread_release_buffer(avctx, &s->last_picture);
+ ff_thread_release_ext_buffer(avctx, &s->last_picture);
FFSWAP(ThreadFrame, s->picture, s->last_picture);
}
@@ -1624,9 +1625,9 @@ static int decode_frame_apng(AVCodecContext *avctx,
if (!(avctx->active_thread_type & FF_THREAD_FRAME)) {
if (s->dispose_op == APNG_DISPOSE_OP_PREVIOUS) {
- ff_thread_release_buffer(avctx, &s->picture);
+ ff_thread_release_ext_buffer(avctx, &s->picture);
} else {
- ff_thread_release_buffer(avctx, &s->last_picture);
+ ff_thread_release_ext_buffer(avctx, &s->last_picture);
FFSWAP(ThreadFrame, s->picture, s->last_picture);
}
}
@@ -1677,7 +1678,7 @@ static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
src_frame = psrc->dispose_op == APNG_DISPOSE_OP_PREVIOUS ?
&psrc->last_picture : &psrc->picture;
- ff_thread_release_buffer(dst, &pdst->last_picture);
+ ff_thread_release_ext_buffer(dst, &pdst->last_picture);
if (src_frame && src_frame->f->data[0]) {
ret = ff_thread_ref_frame(&pdst->last_picture, src_frame);
if (ret < 0)
@@ -1712,9 +1713,9 @@ static av_cold int png_dec_end(AVCodecContext *avctx)
{
PNGDecContext *s = avctx->priv_data;
- ff_thread_release_buffer(avctx, &s->last_picture);
+ ff_thread_release_ext_buffer(avctx, &s->last_picture);
av_frame_free(&s->last_picture.f);
- ff_thread_release_buffer(avctx, &s->picture);
+ ff_thread_release_ext_buffer(avctx, &s->picture);
av_frame_free(&s->picture.f);
av_freep(&s->buffer);
s->buffer_size = 0;
diff --git a/libavcodec/pthread_frame.c b/libavcodec/pthread_frame.c
index 73dc4d3669..f405622ca1 100644
--- a/libavcodec/pthread_frame.c
+++ b/libavcodec/pthread_frame.c
@@ -1057,6 +1057,11 @@ int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
return ret;
}
+int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
+{
+ return ff_thread_get_buffer(avctx, f, flags);
+}
+
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
{
#if FF_API_THREAD_SAFE_CALLBACKS
@@ -1126,3 +1131,8 @@ fail:
}
#endif
}
+
+void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
+{
+ ff_thread_release_buffer(avctx, f);
+}
diff --git a/libavcodec/threadframe.h b/libavcodec/threadframe.h
index c2caf511b9..be63e62435 100644
--- a/libavcodec/threadframe.h
+++ b/libavcodec/threadframe.h
@@ -50,4 +50,28 @@ void ff_thread_report_progress(ThreadFrame *f, int progress, int field);
*/
void ff_thread_await_progress(ThreadFrame *f, int progress, int field);
+/**
+ * Wrapper around ff_get_buffer() for frame-multithreaded codecs.
+ * Call this function instead of ff_get_buffer() if you might need
+ * to wait for progress on this frame.
+ * Cannot be called after the codec has called ff_thread_finish_setup().
+ *
+ * @param avctx The current context.
+ * @param f The frame to write into.
+ * @note: It is fine to call this with codecs that do not support
+ * frame threading.
+ */
+int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
+
+/**
+ * Unref a ThreadFrame.
+ *
+ * This is basically a wrapper around av_frame_unref() and should
+ * be called instead of it.
+ *
+ * @param avctx The current context.
+ * @param f The picture being released.
+ */
+void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f);
+
#endif
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index 0fd4469a74..bc16424f91 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -879,7 +879,7 @@ int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src)
if (src->progress &&
!(dst->progress = av_buffer_ref(src->progress))) {
- ff_thread_release_buffer(dst->owner[0], dst);
+ ff_thread_release_ext_buffer(dst->owner[0], dst);
return AVERROR(ENOMEM);
}
@@ -899,12 +899,25 @@ int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
return ff_get_buffer(avctx, f->f, flags);
}
+int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
+{
+ f->owner[0] = f->owner[1] = avctx;
+ return ff_get_buffer(avctx, f->f, flags);
+}
+
void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
{
if (f->f)
av_frame_unref(f->f);
}
+void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
+{
+ f->owner[0] = f->owner[1] = NULL;
+ if (f->f)
+ av_frame_unref(f->f);
+}
+
void ff_thread_finish_setup(AVCodecContext *avctx)
{
}
diff --git a/libavcodec/vp3.c b/libavcodec/vp3.c
index 06b0e873b6..e9ab54d736 100644
--- a/libavcodec/vp3.c
+++ b/libavcodec/vp3.c
@@ -333,11 +333,11 @@ static void vp3_decode_flush(AVCodecContext *avctx)
Vp3DecodeContext *s = avctx->priv_data;
if (s->golden_frame.f)
- ff_thread_release_buffer(avctx, &s->golden_frame);
+ ff_thread_release_ext_buffer(avctx, &s->golden_frame);
if (s->last_frame.f)
- ff_thread_release_buffer(avctx, &s->last_frame);
+ ff_thread_release_ext_buffer(avctx, &s->last_frame);
if (s->current_frame.f)
- ff_thread_release_buffer(avctx, &s->current_frame);
+ ff_thread_release_ext_buffer(avctx, &s->current_frame);
}
static av_cold int vp3_decode_end(AVCodecContext *avctx)
@@ -2507,25 +2507,25 @@ static int update_frames(AVCodecContext *avctx)
int ret = 0;
/* shuffle frames (last = current) */
- ff_thread_release_buffer(avctx, &s->last_frame);
+ ff_thread_release_ext_buffer(avctx, &s->last_frame);
ret = ff_thread_ref_frame(&s->last_frame, &s->current_frame);
if (ret < 0)
goto fail;
if (s->keyframe) {
- ff_thread_release_buffer(avctx, &s->golden_frame);
+ ff_thread_release_ext_buffer(avctx, &s->golden_frame);
ret = ff_thread_ref_frame(&s->golden_frame, &s->current_frame);
}
fail:
- ff_thread_release_buffer(avctx, &s->current_frame);
+ ff_thread_release_ext_buffer(avctx, &s->current_frame);
return ret;
}
#if HAVE_THREADS
static int ref_frame(Vp3DecodeContext *s, ThreadFrame *dst, ThreadFrame *src)
{
- ff_thread_release_buffer(s->avctx, dst);
+ ff_thread_release_ext_buffer(s->avctx, dst);
if (src->f->data[0])
return ff_thread_ref_frame(dst, src);
return 0;
@@ -2675,7 +2675,8 @@ static int vp3_decode_frame(AVCodecContext *avctx,
s->current_frame.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
: AV_PICTURE_TYPE_P;
s->current_frame.f->key_frame = s->keyframe;
- if ((ret = ff_thread_get_buffer(avctx, &s->current_frame, AV_GET_BUFFER_FLAG_REF)) < 0)
+ if ((ret = ff_thread_get_ext_buffer(avctx, &s->current_frame,
+ AV_GET_BUFFER_FLAG_REF)) < 0)
goto error;
if (!s->edge_emu_buffer)
@@ -2734,10 +2735,10 @@ static int vp3_decode_frame(AVCodecContext *avctx,
"vp3: first frame not a keyframe\n");
s->golden_frame.f->pict_type = AV_PICTURE_TYPE_I;
- if ((ret = ff_thread_get_buffer(avctx, &s->golden_frame,
- AV_GET_BUFFER_FLAG_REF)) < 0)
+ if ((ret = ff_thread_get_ext_buffer(avctx, &s->golden_frame,
+ AV_GET_BUFFER_FLAG_REF)) < 0)
goto error;
- ff_thread_release_buffer(avctx, &s->last_frame);
+ ff_thread_release_ext_buffer(avctx, &s->last_frame);
if ((ret = ff_thread_ref_frame(&s->last_frame,
&s->golden_frame)) < 0)
goto error;
diff --git a/libavcodec/vp8.c b/libavcodec/vp8.c
index 6aba0c5feb..40a7e13683 100644
--- a/libavcodec/vp8.c
+++ b/libavcodec/vp8.c
@@ -72,8 +72,8 @@ static void free_buffers(VP8Context *s)
static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
{
int ret;
- if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
- ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
+ if ((ret = ff_thread_get_ext_buffer(s->avctx, &f->tf,
+ ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
return ret;
if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height)))
goto fail;
@@ -90,7 +90,7 @@ static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
fail:
av_buffer_unref(&f->seg_map);
- ff_thread_release_buffer(s->avctx, &f->tf);
+ ff_thread_release_ext_buffer(s->avctx, &f->tf);
return AVERROR(ENOMEM);
}
@@ -99,7 +99,7 @@ static void vp8_release_frame(VP8Context *s, VP8Frame *f)
av_buffer_unref(&f->seg_map);
av_buffer_unref(&f->hwaccel_priv_buf);
f->hwaccel_picture_private = NULL;
- ff_thread_release_buffer(s->avctx, &f->tf);
+ ff_thread_release_ext_buffer(s->avctx, &f->tf);
}
#if CONFIG_VP8_DECODER
diff --git a/libavcodec/vp9.c b/libavcodec/vp9.c
index 1250425dce..7ef10f7a80 100644
--- a/libavcodec/vp9.c
+++ b/libavcodec/vp9.c
@@ -93,7 +93,7 @@ static void vp9_tile_data_free(VP9TileData *td)
static void vp9_frame_unref(AVCodecContext *avctx, VP9Frame *f)
{
- ff_thread_release_buffer(avctx, &f->tf);
+ ff_thread_release_ext_buffer(avctx, &f->tf);
av_buffer_unref(&f->extradata);
av_buffer_unref(&f->hwaccel_priv_buf);
f->segmentation_map = NULL;
@@ -105,7 +105,7 @@ static int vp9_frame_alloc(AVCodecContext *avctx, VP9Frame *f)
VP9Context *s = avctx->priv_data;
int ret, sz;
- ret = ff_thread_get_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF);
+ ret = ff_thread_get_ext_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF);
if (ret < 0)
return ret;
@@ -1237,9 +1237,9 @@ static av_cold int vp9_decode_free(AVCodecContext *avctx)
}
av_buffer_pool_uninit(&s->frame_extradata_pool);
for (i = 0; i < 8; i++) {
- ff_thread_release_buffer(avctx, &s->s.refs[i]);
+ ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
av_frame_free(&s->s.refs[i].f);
- ff_thread_release_buffer(avctx, &s->next_refs[i]);
+ ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
av_frame_free(&s->next_refs[i].f);
}
@@ -1571,7 +1571,7 @@ static int vp9_decode_frame(AVCodecContext *avctx, void *frame,
((AVFrame *)frame)->pkt_dts = pkt->dts;
for (i = 0; i < 8; i++) {
if (s->next_refs[i].f->buf[0])
- ff_thread_release_buffer(avctx, &s->next_refs[i]);
+ ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
if (s->s.refs[i].f->buf[0] &&
(ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.refs[i])) < 0)
return ret;
@@ -1611,7 +1611,7 @@ static int vp9_decode_frame(AVCodecContext *avctx, void *frame,
// ref frame setup
for (i = 0; i < 8; i++) {
if (s->next_refs[i].f->buf[0])
- ff_thread_release_buffer(avctx, &s->next_refs[i]);
+ ff_thread_release_ext_buffer(avctx, &s->next_refs[i]);
if (s->s.h.refreshrefmask & (1 << i)) {
ret = ff_thread_ref_frame(&s->next_refs[i], &s->s.frames[CUR_FRAME].tf);
} else if (s->s.refs[i].f->buf[0]) {
@@ -1760,7 +1760,7 @@ finish:
// ref frame setup
for (i = 0; i < 8; i++) {
if (s->s.refs[i].f->buf[0])
- ff_thread_release_buffer(avctx, &s->s.refs[i]);
+ ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
if (s->next_refs[i].f->buf[0] &&
(ret = ff_thread_ref_frame(&s->s.refs[i], &s->next_refs[i])) < 0)
return ret;
@@ -1783,7 +1783,7 @@ static void vp9_decode_flush(AVCodecContext *avctx)
for (i = 0; i < 3; i++)
vp9_frame_unref(avctx, &s->s.frames[i]);
for (i = 0; i < 8; i++)
- ff_thread_release_buffer(avctx, &s->s.refs[i]);
+ ff_thread_release_ext_buffer(avctx, &s->s.refs[i]);
}
static av_cold int vp9_decode_init(AVCodecContext *avctx)
@@ -1832,7 +1832,7 @@ static int vp9_decode_update_thread_context(AVCodecContext *dst, const AVCodecCo
}
for (i = 0; i < 8; i++) {
if (s->s.refs[i].f->buf[0])
- ff_thread_release_buffer(dst, &s->s.refs[i]);
+ ff_thread_release_ext_buffer(dst, &s->s.refs[i]);
if (ssrc->next_refs[i].f->buf[0]) {
if ((ret = ff_thread_ref_frame(&s->s.refs[i], &ssrc->next_refs[i])) < 0)
return ret;
diff --git a/libavcodec/wavpack.c b/libavcodec/wavpack.c
index 5a338d7658..30790ef16e 100644
--- a/libavcodec/wavpack.c
+++ b/libavcodec/wavpack.c
@@ -1019,7 +1019,7 @@ static int update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
if (dst == src)
return 0;
- ff_thread_release_buffer(dst, &fdst->curr_frame);
+ ff_thread_release_ext_buffer(dst, &fdst->curr_frame);
if (fsrc->curr_frame.f->data[0]) {
if ((ret = ff_thread_ref_frame(&fdst->curr_frame, &fsrc->curr_frame)) < 0)
return ret;
@@ -1066,10 +1066,10 @@ static av_cold int wavpack_decode_end(AVCodecContext *avctx)
av_freep(&s->fdec[i]);
s->fdec_num = 0;
- ff_thread_release_buffer(avctx, &s->curr_frame);
+ ff_thread_release_ext_buffer(avctx, &s->curr_frame);
av_frame_free(&s->curr_frame.f);
- ff_thread_release_buffer(avctx, &s->prev_frame);
+ ff_thread_release_ext_buffer(avctx, &s->prev_frame);
av_frame_free(&s->prev_frame.f);
av_buffer_unref(&s->dsd_ref);
@@ -1539,7 +1539,7 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
av_log(avctx, AV_LOG_ERROR, "Error reinitializing the DSD context\n");
return ret;
}
- ff_thread_release_buffer(avctx, &wc->curr_frame);
+ ff_thread_release_ext_buffer(avctx, &wc->curr_frame);
}
avctx->channels = new_channels;
avctx->channel_layout = new_chmask;
@@ -1547,12 +1547,14 @@ static int wavpack_decode_block(AVCodecContext *avctx, int block_no,
avctx->sample_fmt = sample_fmt;
avctx->bits_per_raw_sample = orig_bpp;
- ff_thread_release_buffer(avctx, &wc->prev_frame);
+ ff_thread_release_ext_buffer(avctx, &wc->prev_frame);
FFSWAP(ThreadFrame, wc->curr_frame, wc->prev_frame);
/* get output buffer */
wc->curr_frame.f->nb_samples = s->samples;
- if ((ret = ff_thread_get_buffer(avctx, &wc->curr_frame, AV_GET_BUFFER_FLAG_REF)) < 0)
+ ret = ff_thread_get_ext_buffer(avctx, &wc->curr_frame,
+ AV_GET_BUFFER_FLAG_REF);
+ if (ret < 0)
return ret;
wc->frame = wc->curr_frame.f;
@@ -1676,7 +1678,7 @@ static int wavpack_decode_frame(AVCodecContext *avctx, void *data,
}
ff_thread_await_progress(&s->prev_frame, INT_MAX, 0);
- ff_thread_release_buffer(avctx, &s->prev_frame);
+ ff_thread_release_ext_buffer(avctx, &s->prev_frame);
if (s->modulation == MODULATION_DSD)
avctx->execute2(avctx, dsd_channel, s->frame, NULL, avctx->channels);
@@ -1693,7 +1695,7 @@ static int wavpack_decode_frame(AVCodecContext *avctx, void *data,
error:
if (s->frame) {
ff_thread_await_progress(&s->prev_frame, INT_MAX, 0);
- ff_thread_release_buffer(avctx, &s->prev_frame);
+ ff_thread_release_ext_buffer(avctx, &s->prev_frame);
ff_thread_report_progress(&s->curr_frame, INT_MAX, 0);
}
--
2.32.0
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 6+ messages in thread
* [FFmpeg-devel] [PATCH 5/6] avcodec/thread: Don't use ThreadFrame when unnecessary
2022-02-07 0:37 [FFmpeg-devel] [PATCH 1/6] avcodec/pthread_frame: Don't prematurily presume frame-threading Andreas Rheinhardt
` (2 preceding siblings ...)
2022-02-07 0:58 ` [FFmpeg-devel] [PATCH 4/6] avcodec/threadframe: Add ff_thread_(get|release)_ext_buffer() Andreas Rheinhardt
@ 2022-02-07 0:58 ` Andreas Rheinhardt
2022-02-07 0:58 ` [FFmpeg-devel] [PATCH 6/6] avcodec/pthread_frame: Properly unref frame in case of decoding failure Andreas Rheinhardt
4 siblings, 0 replies; 6+ messages in thread
From: Andreas Rheinhardt @ 2022-02-07 0:58 UTC (permalink / raw)
To: ffmpeg-devel; +Cc: Andreas Rheinhardt
The majority of frame-threaded decoders (mainly the intra-only)
need exactly one part of ThreadFrame: The AVFrame. They don't
need the owners nor the progress, yet they had to use it because
ff_thread_(get|release)_buffer() requires it.
This commit changes this and makes these functions work with ordinary
AVFrames; the decoders that need the extra fields for progress
use ff_thread_(get|release)_ext_buffer() which work exactly
as ff_thread_(get|release)_buffer() used to do.
This also avoids some unnecessary allocations of progress AVBuffers,
namely for H.264 and HEVC film grain frames: These frames are not
used for synchronization and therefore don't need a ThreadFrame.
Also move the ThreadFrame structure as well as ff_thread_ref_frame()
to threadframe.h, the header for frame-threaded decoders with
inter-frame dependencies.
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
---
In case anyone is wondering why ff_thread_get_ext_buffer() still
checks for FF_CODEC_CAP_ALLOCATE_PROGRESS:
It is due to weirdness in the WebP decoder. The WebP decoder
(which has the AV_CODEC_CAP_FRAME_THREADS set, but not
FF_CODEC_CAP_ALLOCATE_PROGRESS) directly uses the VP8 decoder's
init, decode_frame and close function. According to the spec [1],
this is only to be used with VP8 key frames, yet I don't see a check
that enforces this. If the data would contain non-keyframes,
it would probably (haven't tested it) lead to very broken results
when using frame threading, because there is no synchronisation
whatsoever between the underlying VP8 decode contexts.
Always flushing the VP8 decoder after every frame should be enough
to make it consistent (and error out in case there are non-keyframes).
[1]: https://developers.google.com/speed/webp/docs/riff_container
libavcodec/aic.c | 3 +-
libavcodec/alac.c | 3 +-
libavcodec/av1dec.c | 35 ++++++++--------
libavcodec/av1dec.h | 4 +-
libavcodec/bitpacked_dec.c | 3 +-
libavcodec/cfhd.c | 13 +++---
libavcodec/cllc.c | 7 ++--
libavcodec/cri.c | 3 +-
libavcodec/dnxhddec.c | 3 +-
libavcodec/dvdec.c | 22 +++++-----
libavcodec/dxtory.c | 15 +++----
libavcodec/dxv.c | 11 +++--
libavcodec/dxva2_av1.c | 8 ++--
libavcodec/error_resilience.h | 2 +-
libavcodec/exr.c | 3 +-
libavcodec/ffv1.h | 2 +-
libavcodec/ffv1dec.c | 1 +
libavcodec/flacdec.c | 3 +-
libavcodec/fraps.c | 3 +-
libavcodec/h264_picture.c | 15 +++----
libavcodec/h264_slice.c | 4 +-
libavcodec/h264dec.c | 1 +
libavcodec/h264dec.h | 1 -
libavcodec/hapdec.c | 14 +++----
libavcodec/hevc_refs.c | 2 +-
libavcodec/hevcdec.c | 6 +--
libavcodec/hevcdec.h | 3 +-
libavcodec/hqx.c | 4 +-
libavcodec/huffyuvdec.c | 3 +-
libavcodec/jpeg2000dec.c | 3 +-
libavcodec/lagarith.c | 11 +++--
libavcodec/lcldec.c | 3 +-
libavcodec/libopenjpegdec.c | 3 +-
libavcodec/magicyuv.c | 3 +-
libavcodec/mdec.c | 11 +++--
libavcodec/mpegpicture.h | 2 +-
libavcodec/notchlc.c | 7 ++--
libavcodec/nvdec_av1.c | 6 +--
libavcodec/photocd.c | 3 +-
libavcodec/pixlet.c | 9 ++--
libavcodec/proresdec2.c | 3 +-
libavcodec/pthread_frame.c | 79 +++++++++++++++++++----------------
libavcodec/rv34.c | 1 +
libavcodec/sheervideo.c | 3 +-
libavcodec/takdec.c | 3 +-
libavcodec/thread.h | 14 +------
libavcodec/threadframe.h | 12 +++++-
libavcodec/tiff.c | 9 ++--
libavcodec/tta.c | 3 +-
libavcodec/utils.c | 11 +++--
libavcodec/utvideodec.c | 76 ++++++++++++++++-----------------
libavcodec/v210dec.c | 3 +-
libavcodec/v410dec.c | 3 +-
libavcodec/vaapi_av1.c | 44 +++++++++----------
libavcodec/vble.c | 3 +-
libavcodec/vp8.h | 2 +-
libavcodec/vp9shared.h | 2 +-
libavcodec/webp.c | 3 +-
libavcodec/ylc.c | 3 +-
59 files changed, 252 insertions(+), 283 deletions(-)
diff --git a/libavcodec/aic.c b/libavcodec/aic.c
index c95bdae1ed..552e7e9c10 100644
--- a/libavcodec/aic.c
+++ b/libavcodec/aic.c
@@ -391,7 +391,6 @@ static int aic_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
uint32_t off;
int x, y, ret;
int slice_size;
- ThreadFrame frame = { .f = data };
ctx->frame = data;
ctx->frame->pict_type = AV_PICTURE_TYPE_I;
@@ -410,7 +409,7 @@ static int aic_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
return ret;
}
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, ctx->frame, 0)) < 0)
return ret;
bytestream2_init(&gb, buf + AIC_HDR_SIZE,
diff --git a/libavcodec/alac.c b/libavcodec/alac.c
index 67fc2a3e41..bac7543020 100644
--- a/libavcodec/alac.c
+++ b/libavcodec/alac.c
@@ -270,10 +270,9 @@ static int decode_element(AVCodecContext *avctx, AVFrame *frame, int ch_index,
return AVERROR_INVALIDDATA;
}
if (!alac->nb_samples) {
- ThreadFrame tframe = { .f = frame };
/* get output buffer */
frame->nb_samples = output_samples;
- if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
} else if (output_samples != alac->nb_samples) {
av_log(avctx, AV_LOG_ERROR, "sample count mismatch: %"PRIu32" != %d\n",
diff --git a/libavcodec/av1dec.c b/libavcodec/av1dec.c
index 62d459cded..b354d8d03c 100644
--- a/libavcodec/av1dec.c
+++ b/libavcodec/av1dec.c
@@ -27,6 +27,7 @@
#include "hwconfig.h"
#include "internal.h"
#include "profiles.h"
+#include "thread.h"
/**< same with Div_Lut defined in spec 7.11.3.7 */
static const uint16_t div_lut[AV1_DIV_LUT_NUM] = {
@@ -569,7 +570,7 @@ static int get_pixel_format(AVCodecContext *avctx)
static void av1_frame_unref(AVCodecContext *avctx, AV1Frame *f)
{
- ff_thread_release_buffer(avctx, &f->tf);
+ ff_thread_release_buffer(avctx, f->f);
av_buffer_unref(&f->hwaccel_priv_buf);
f->hwaccel_picture_private = NULL;
av_buffer_unref(&f->header_ref);
@@ -591,10 +592,10 @@ static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *s
dst->raw_frame_header = src->raw_frame_header;
- if (!src->tf.f->buf[0])
+ if (!src->f->buf[0])
return 0;
- ret = ff_thread_ref_frame(&dst->tf, &src->tf);
+ ret = av_frame_ref(dst->f, src->f);
if (ret < 0)
goto fail;
@@ -637,10 +638,10 @@ static av_cold int av1_decode_free(AVCodecContext *avctx)
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
av1_frame_unref(avctx, &s->ref[i]);
- av_frame_free(&s->ref[i].tf.f);
+ av_frame_free(&s->ref[i].f);
}
av1_frame_unref(avctx, &s->cur_frame);
- av_frame_free(&s->cur_frame.tf.f);
+ av_frame_free(&s->cur_frame.f);
av_buffer_unref(&s->seq_ref);
av_buffer_unref(&s->header_ref);
@@ -741,16 +742,16 @@ static av_cold int av1_decode_init(AVCodecContext *avctx)
s->pix_fmt = AV_PIX_FMT_NONE;
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
- s->ref[i].tf.f = av_frame_alloc();
- if (!s->ref[i].tf.f) {
+ s->ref[i].f = av_frame_alloc();
+ if (!s->ref[i].f) {
av_log(avctx, AV_LOG_ERROR,
"Failed to allocate reference frame buffer %d.\n", i);
return AVERROR(ENOMEM);
}
}
- s->cur_frame.tf.f = av_frame_alloc();
- if (!s->cur_frame.tf.f) {
+ s->cur_frame.f = av_frame_alloc();
+ if (!s->cur_frame.f) {
av_log(avctx, AV_LOG_ERROR,
"Failed to allocate current frame buffer.\n");
return AVERROR(ENOMEM);
@@ -803,10 +804,10 @@ static int av1_frame_alloc(AVCodecContext *avctx, AV1Frame *f)
return ret;
}
- if ((ret = ff_thread_get_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, f->f, AV_GET_BUFFER_FLAG_REF)) < 0)
goto fail;
- frame = f->tf.f;
+ frame = f->f;
frame->key_frame = header->frame_type == AV1_FRAME_KEY;
switch (header->frame_type) {
@@ -905,7 +906,7 @@ static int set_output_frame(AVCodecContext *avctx, AVFrame *frame,
const AVPacket *pkt, int *got_frame)
{
AV1DecContext *s = avctx->priv_data;
- const AVFrame *srcframe = s->cur_frame.tf.f;
+ const AVFrame *srcframe = s->cur_frame.f;
int ret;
// TODO: all layers
@@ -1101,7 +1102,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
goto end;
}
- if (s->cur_frame.tf.f->buf[0]) {
+ if (s->cur_frame.f->buf[0]) {
ret = set_output_frame(avctx, frame, pkt, got_frame);
if (ret < 0)
av_log(avctx, AV_LOG_ERROR, "Set output frame error.\n");
@@ -1121,7 +1122,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
s->cur_frame.spatial_id = header->spatial_id;
s->cur_frame.temporal_id = header->temporal_id;
- if (avctx->hwaccel && s->cur_frame.tf.f->buf[0]) {
+ if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
ret = avctx->hwaccel->start_frame(avctx, unit->data,
unit->data_size);
if (ret < 0) {
@@ -1148,7 +1149,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
if (ret < 0)
goto end;
- if (avctx->hwaccel && s->cur_frame.tf.f->buf[0]) {
+ if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
ret = avctx->hwaccel->decode_slice(avctx,
raw_tile_group->tile_data.data,
raw_tile_group->tile_data.data_size);
@@ -1171,7 +1172,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
}
if (raw_tile_group && (s->tile_num == raw_tile_group->tg_end + 1)) {
- if (avctx->hwaccel && s->cur_frame.tf.f->buf[0]) {
+ if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
ret = avctx->hwaccel->end_frame(avctx);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "HW accel end frame fail.\n");
@@ -1185,7 +1186,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
goto end;
}
- if (s->raw_frame_header->show_frame && s->cur_frame.tf.f->buf[0]) {
+ if (s->raw_frame_header->show_frame && s->cur_frame.f->buf[0]) {
ret = set_output_frame(avctx, frame, pkt, got_frame);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Set output frame error\n");
diff --git a/libavcodec/av1dec.h b/libavcodec/av1dec.h
index 4e140588b9..82c7084e99 100644
--- a/libavcodec/av1dec.h
+++ b/libavcodec/av1dec.h
@@ -24,14 +24,14 @@
#include <stdint.h>
#include "libavutil/buffer.h"
+#include "libavutil/frame.h"
#include "libavutil/pixfmt.h"
#include "avcodec.h"
#include "cbs.h"
#include "cbs_av1.h"
-#include "thread.h"
typedef struct AV1Frame {
- ThreadFrame tf;
+ AVFrame *f;
AVBufferRef *hwaccel_priv_buf;
void *hwaccel_picture_private;
diff --git a/libavcodec/bitpacked_dec.c b/libavcodec/bitpacked_dec.c
index 69fea9c366..fa554d99af 100644
--- a/libavcodec/bitpacked_dec.c
+++ b/libavcodec/bitpacked_dec.c
@@ -65,12 +65,11 @@ static int bitpacked_decode_yuv422p10(AVCodecContext *avctx, AVFrame *frame,
{
uint64_t frame_size = (uint64_t)avctx->width * (uint64_t)avctx->height * 20;
uint64_t packet_size = (uint64_t)avpkt->size * 8;
- ThreadFrame tframe = { .f = frame };
GetBitContext bc;
uint16_t *y, *u, *v;
int ret, i, j;
- ret = ff_thread_get_buffer(avctx, &tframe, 0);
+ ret = ff_thread_get_buffer(avctx, frame, 0);
if (ret < 0)
return ret;
diff --git a/libavcodec/cfhd.c b/libavcodec/cfhd.c
index ac7826250f..ff38106fac 100644
--- a/libavcodec/cfhd.c
+++ b/libavcodec/cfhd.c
@@ -378,8 +378,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
CFHDContext *s = avctx->priv_data;
CFHDDSPContext *dsp = &s->dsp;
GetByteContext gb;
- ThreadFrame frame = { .f = data };
- AVFrame *pic = data;
+ AVFrame *const pic = data;
int ret = 0, i, j, plane, got_buffer = 0;
int16_t *coeff_data;
@@ -681,10 +680,9 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
return AVERROR_INVALIDDATA;
avctx->height = height;
}
- frame.f->width =
- frame.f->height = 0;
+ pic->width = pic->height = 0;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
s->coded_width = 0;
@@ -692,10 +690,9 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
s->coded_format = AV_PIX_FMT_NONE;
got_buffer = 1;
} else if (tag == FrameIndex && data == 1 && s->sample_type == 1 && s->frame_type == 2) {
- frame.f->width =
- frame.f->height = 0;
+ pic->width = pic->height = 0;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
s->coded_width = 0;
s->coded_height = 0;
diff --git a/libavcodec/cllc.c b/libavcodec/cllc.c
index 41b3a4ef83..2ad5d77adc 100644
--- a/libavcodec/cllc.c
+++ b/libavcodec/cllc.c
@@ -360,7 +360,6 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
{
CLLCContext *ctx = avctx->priv_data;
AVFrame *pic = data;
- ThreadFrame frame = { .f = data };
uint8_t *src = avpkt->data;
uint32_t info_tag, info_offset;
int data_size;
@@ -424,7 +423,7 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
avctx->bits_per_raw_sample = 8;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
ret = decode_yuv_frame(ctx, &gb, pic);
@@ -437,7 +436,7 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
avctx->pix_fmt = AV_PIX_FMT_RGB24;
avctx->bits_per_raw_sample = 8;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
ret = decode_rgb24_frame(ctx, &gb, pic);
@@ -449,7 +448,7 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
avctx->pix_fmt = AV_PIX_FMT_ARGB;
avctx->bits_per_raw_sample = 8;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
ret = decode_argb_frame(ctx, &gb, pic);
diff --git a/libavcodec/cri.c b/libavcodec/cri.c
index dfea24d979..3b2c955dd8 100644
--- a/libavcodec/cri.c
+++ b/libavcodec/cri.c
@@ -174,7 +174,6 @@ static int cri_decode_frame(AVCodecContext *avctx, void *data,
{
CRIContext *s = avctx->priv_data;
GetByteContext *gb = &s->gb;
- ThreadFrame frame = { .f = data };
int ret, bps, hflip = 0, vflip = 0;
AVFrameSideData *rotation;
int compressed = 0;
@@ -318,7 +317,7 @@ skip:
if (!s->data || !s->data_size)
return AVERROR_INVALIDDATA;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
avctx->bits_per_raw_sample = bps;
diff --git a/libavcodec/dnxhddec.c b/libavcodec/dnxhddec.c
index 9ecd220e7f..78c7b72730 100644
--- a/libavcodec/dnxhddec.c
+++ b/libavcodec/dnxhddec.c
@@ -618,7 +618,6 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
DNXHDContext *ctx = avctx->priv_data;
- ThreadFrame frame = { .f = data };
AVFrame *picture = data;
int first_field = 1;
int ret, i;
@@ -650,7 +649,7 @@ decode_coding_unit:
return ret;
if (first_field) {
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, picture, 0)) < 0)
return ret;
picture->pict_type = AV_PICTURE_TYPE_I;
picture->key_frame = 1;
diff --git a/libavcodec/dvdec.c b/libavcodec/dvdec.c
index b72a67d01c..03249d6fa3 100644
--- a/libavcodec/dvdec.c
+++ b/libavcodec/dvdec.c
@@ -612,7 +612,7 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, void *data,
uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
DVVideoContext *s = avctx->priv_data;
- ThreadFrame frame = { .f = data };
+ AVFrame *const frame = data;
const uint8_t *vsc_pack;
int apt, is16_9, ret;
const AVDVProfile *sys;
@@ -633,9 +633,9 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, void *data,
s->sys = sys;
}
- s->frame = frame.f;
- frame.f->key_frame = 1;
- frame.f->pict_type = AV_PICTURE_TYPE_I;
+ s->frame = frame;
+ frame->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
avctx->pix_fmt = s->sys->pix_fmt;
avctx->framerate = av_inv_q(s->sys->time_base);
@@ -652,20 +652,20 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, void *data,
ff_set_sar(avctx, s->sys->sar[is16_9]);
}
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
/* Determine the codec's field order from the packet */
if ( *vsc_pack == dv_video_control ) {
if (avctx->height == 720) {
- frame.f->interlaced_frame = 0;
- frame.f->top_field_first = 0;
+ frame->interlaced_frame = 0;
+ frame->top_field_first = 0;
} else if (avctx->height == 1080) {
- frame.f->interlaced_frame = 1;
- frame.f->top_field_first = (vsc_pack[3] & 0x40) == 0x40;
+ frame->interlaced_frame = 1;
+ frame->top_field_first = (vsc_pack[3] & 0x40) == 0x40;
} else {
- frame.f->interlaced_frame = (vsc_pack[3] & 0x10) == 0x10;
- frame.f->top_field_first = !(vsc_pack[3] & 0x40);
+ frame->interlaced_frame = (vsc_pack[3] & 0x10) == 0x10;
+ frame->top_field_first = !(vsc_pack[3] & 0x40);
}
}
diff --git a/libavcodec/dxtory.c b/libavcodec/dxtory.c
index 914131e742..ff40f29b86 100644
--- a/libavcodec/dxtory.c
+++ b/libavcodec/dxtory.c
@@ -93,7 +93,6 @@ static int dxtory_decode_v1_rgb(AVCodecContext *avctx, AVFrame *pic,
const uint8_t *src, int src_size,
int id, int bpp, uint32_t vflipped)
{
- ThreadFrame frame = { .f = pic };
int h;
uint8_t *dst;
int ret;
@@ -104,7 +103,7 @@ static int dxtory_decode_v1_rgb(AVCodecContext *avctx, AVFrame *pic,
}
avctx->pix_fmt = id;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
do_vflip(avctx, pic, vflipped);
@@ -125,7 +124,6 @@ static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic,
const uint8_t *src, int src_size,
uint32_t vflipped)
{
- ThreadFrame frame = { .f = pic };
int h, w;
uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
int height, width, hmargin, vmargin;
@@ -138,7 +136,7 @@ static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic,
}
avctx->pix_fmt = AV_PIX_FMT_YUV410P;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
do_vflip(avctx, pic, vflipped);
@@ -221,7 +219,6 @@ static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic,
const uint8_t *src, int src_size,
uint32_t vflipped)
{
- ThreadFrame frame = { .f = pic };
int h, w;
uint8_t *Y1, *Y2, *U, *V;
int height, width, hmargin, vmargin;
@@ -234,7 +231,7 @@ static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic,
}
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
do_vflip(avctx, pic, vflipped);
@@ -294,7 +291,6 @@ static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic,
const uint8_t *src, int src_size,
uint32_t vflipped)
{
- ThreadFrame frame = { .f = pic };
int h, w;
uint8_t *Y, *U, *V;
int ret;
@@ -305,7 +301,7 @@ static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic,
}
avctx->pix_fmt = AV_PIX_FMT_YUV444P;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
do_vflip(avctx, pic, vflipped);
@@ -430,7 +426,6 @@ static int dxtory_decode_v2(AVCodecContext *avctx, AVFrame *pic,
enum AVPixelFormat fmt,
uint32_t vflipped)
{
- ThreadFrame frame = { .f = pic };
GetByteContext gb, gb_check;
GetBitContext gb2;
int nslices, slice, line = 0;
@@ -457,7 +452,7 @@ static int dxtory_decode_v2(AVCodecContext *avctx, AVFrame *pic,
return AVERROR_INVALIDDATA;
avctx->pix_fmt = fmt;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
do_vflip(avctx, pic, vflipped);
diff --git a/libavcodec/dxv.c b/libavcodec/dxv.c
index 5f17347913..e2c0cde85e 100644
--- a/libavcodec/dxv.c
+++ b/libavcodec/dxv.c
@@ -1042,7 +1042,7 @@ static int dxv_decode(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
DXVContext *ctx = avctx->priv_data;
- ThreadFrame tframe;
+ AVFrame *const frame = data;
GetByteContext *gbc = &ctx->gbc;
int (*decompress_tex)(AVCodecContext *avctx);
const char *msgcomp, *msgtext;
@@ -1211,18 +1211,17 @@ static int dxv_decode(AVCodecContext *avctx, void *data,
return AVERROR_INVALIDDATA;
}
- tframe.f = data;
- ret = ff_thread_get_buffer(avctx, &tframe, 0);
+ ret = ff_thread_get_buffer(avctx, frame, 0);
if (ret < 0)
return ret;
/* Now decompress the texture with the standard functions. */
avctx->execute2(avctx, decompress_texture_thread,
- tframe.f, NULL, ctx->slice_count);
+ frame, NULL, ctx->slice_count);
/* Frame is ready to be output. */
- tframe.f->pict_type = AV_PICTURE_TYPE_I;
- tframe.f->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
*got_frame = 1;
return avpkt->size;
diff --git a/libavcodec/dxva2_av1.c b/libavcodec/dxva2_av1.c
index 8a912bf6c1..7b4483f855 100644
--- a/libavcodec/dxva2_av1.c
+++ b/libavcodec/dxva2_av1.c
@@ -72,7 +72,7 @@ static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *c
pp->max_width = seq->max_frame_width_minus_1 + 1;
pp->max_height = seq->max_frame_height_minus_1 + 1;
- pp->CurrPicTextureIndex = ff_dxva2_get_surface_index(avctx, ctx, h->cur_frame.tf.f);
+ pp->CurrPicTextureIndex = ff_dxva2_get_surface_index(avctx, ctx, h->cur_frame.f);
pp->superres_denom = frame_header->use_superres ? frame_header->coded_denom + AV1_SUPERRES_DENOM_MIN : AV1_SUPERRES_NUM;
pp->bitdepth = get_bit_depth_from_seq(seq);
pp->seq_profile = seq->seq_profile;
@@ -132,7 +132,7 @@ static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *c
memset(pp->RefFrameMapTextureIndex, 0xFF, sizeof(pp->RefFrameMapTextureIndex));
for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
int8_t ref_idx = frame_header->ref_frame_idx[i];
- AVFrame *ref_frame = h->ref[ref_idx].tf.f;
+ AVFrame *ref_frame = h->ref[ref_idx].f;
pp->frame_refs[i].width = ref_frame->width;
pp->frame_refs[i].height = ref_frame->height;
@@ -146,7 +146,7 @@ static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *c
}
}
for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
- AVFrame *ref_frame = h->ref[i].tf.f;
+ AVFrame *ref_frame = h->ref[i].f;
if (ref_frame->buf[0])
pp->RefFrameMapTextureIndex[i] = ff_dxva2_get_surface_index(avctx, ctx, ref_frame);
}
@@ -436,7 +436,7 @@ static int dxva2_av1_end_frame(AVCodecContext *avctx)
if (ctx_pic->bitstream_size <= 0)
return -1;
- ret = ff_dxva2_common_end_frame(avctx, h->cur_frame.tf.f,
+ ret = ff_dxva2_common_end_frame(avctx, h->cur_frame.f,
&ctx_pic->pp, sizeof(ctx_pic->pp),
NULL, 0,
commit_bitstream_and_slice_buffer);
diff --git a/libavcodec/error_resilience.h b/libavcodec/error_resilience.h
index 664a765659..bb770ff674 100644
--- a/libavcodec/error_resilience.h
+++ b/libavcodec/error_resilience.h
@@ -24,7 +24,7 @@
#include "avcodec.h"
#include "me_cmp.h"
-#include "thread.h"
+#include "threadframe.h"
///< current MB is the first after a resync marker
#define VP_START 1
diff --git a/libavcodec/exr.c b/libavcodec/exr.c
index 0d5b3467d1..8b04fab951 100644
--- a/libavcodec/exr.c
+++ b/libavcodec/exr.c
@@ -2027,7 +2027,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
{
EXRContext *s = avctx->priv_data;
GetByteContext *gb = &s->gb;
- ThreadFrame frame = { .f = data };
AVFrame *picture = data;
uint8_t *ptr;
@@ -2149,7 +2148,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
s->scan_lines_per_block;
}
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, picture, 0)) < 0)
return ret;
if (bytestream2_get_bytes_left(gb)/8 < nb_blocks)
diff --git a/libavcodec/ffv1.h b/libavcodec/ffv1.h
index 8ffe5ab433..b1585dbd03 100644
--- a/libavcodec/ffv1.h
+++ b/libavcodec/ffv1.h
@@ -38,7 +38,7 @@
#include "mathops.h"
#include "put_bits.h"
#include "rangecoder.h"
-#include "thread.h"
+#include "threadframe.h"
#ifdef __INTEL_COMPILER
#undef av_flatten
diff --git a/libavcodec/ffv1dec.c b/libavcodec/ffv1dec.c
index 45ecf3152e..201630167d 100644
--- a/libavcodec/ffv1dec.c
+++ b/libavcodec/ffv1dec.c
@@ -37,6 +37,7 @@
#include "golomb.h"
#include "mathops.h"
#include "ffv1.h"
+#include "thread.h"
#include "threadframe.h"
static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state,
diff --git a/libavcodec/flacdec.c b/libavcodec/flacdec.c
index 09051cc663..f224fa7621 100644
--- a/libavcodec/flacdec.c
+++ b/libavcodec/flacdec.c
@@ -559,7 +559,6 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
AVFrame *frame = data;
- ThreadFrame tframe = { .f = data };
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
FLACContext *s = avctx->priv_data;
@@ -618,7 +617,7 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */
frame->nb_samples = s->blocksize;
- if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
s->dsp.decorrelate[s->ch_mode](frame->data, s->decoded,
diff --git a/libavcodec/fraps.c b/libavcodec/fraps.c
index b3262a4b23..18e85eed41 100644
--- a/libavcodec/fraps.c
+++ b/libavcodec/fraps.c
@@ -140,7 +140,6 @@ static int decode_frame(AVCodecContext *avctx,
FrapsContext * const s = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- ThreadFrame frame = { .f = data };
AVFrame * const f = data;
uint32_t header;
unsigned int version,header_size;
@@ -227,7 +226,7 @@ static int decode_frame(AVCodecContext *avctx,
: AVCOL_RANGE_JPEG;
avctx->colorspace = version & 1 ? AVCOL_SPC_UNSPECIFIED : AVCOL_SPC_BT709;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, f, 0)) < 0)
return ret;
switch (version) {
diff --git a/libavcodec/h264_picture.c b/libavcodec/h264_picture.c
index c7f5b64b99..2661ff4698 100644
--- a/libavcodec/h264_picture.c
+++ b/libavcodec/h264_picture.c
@@ -30,18 +30,19 @@
#include "avcodec.h"
#include "h264dec.h"
#include "mpegutils.h"
+#include "thread.h"
#include "threadframe.h"
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
{
- int off = offsetof(H264Picture, tf_grain) + sizeof(pic->tf_grain);
+ int off = offsetof(H264Picture, f_grain) + sizeof(pic->f_grain);
int i;
if (!pic->f || !pic->f->buf[0])
return;
ff_thread_release_ext_buffer(h->avctx, &pic->tf);
- ff_thread_release_buffer(h->avctx, &pic->tf_grain);
+ ff_thread_release_buffer(h->avctx, pic->f_grain);
av_buffer_unref(&pic->hwaccel_priv_buf);
av_buffer_unref(&pic->qscale_table_buf);
@@ -102,9 +103,7 @@ int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
goto fail;
if (src->needs_fg) {
- av_assert0(src->tf_grain.f == src->f_grain);
- dst->tf_grain.f = dst->f_grain;
- ret = ff_thread_ref_frame(&dst->tf_grain, &src->tf_grain);
+ ret = av_frame_ref(dst->f_grain, src->f_grain);
if (ret < 0)
goto fail;
}
@@ -161,10 +160,8 @@ int ff_h264_replace_picture(H264Context *h, H264Picture *dst, const H264Picture
goto fail;
if (src->needs_fg) {
- av_assert0(src->tf_grain.f == src->f_grain);
- dst->tf_grain.f = dst->f_grain;
- ff_thread_release_buffer(h->avctx, &dst->tf_grain);
- ret = ff_thread_ref_frame(&dst->tf_grain, &src->tf_grain);
+ ff_thread_release_buffer(h->avctx, dst->f_grain);
+ ret = av_frame_ref(dst->f_grain, src->f_grain);
if (ret < 0)
goto fail;
}
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
index 32d2e090d5..110a41772a 100644
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@ -45,6 +45,7 @@
#include "mathops.h"
#include "mpegutils.h"
#include "rectangle.h"
+#include "thread.h"
#include "threadframe.h"
static const uint8_t field_scan[16+1] = {
@@ -197,11 +198,10 @@ static int alloc_picture(H264Context *h, H264Picture *pic)
goto fail;
if (pic->needs_fg) {
- pic->tf_grain.f = pic->f_grain;
pic->f_grain->format = pic->f->format;
pic->f_grain->width = pic->f->width;
pic->f_grain->height = pic->f->height;
- ret = ff_thread_get_buffer(h->avctx, &pic->tf_grain, 0);
+ ret = ff_thread_get_buffer(h->avctx, pic->f_grain, 0);
if (ret < 0)
goto fail;
}
diff --git a/libavcodec/h264dec.c b/libavcodec/h264dec.c
index 1d648f04b7..856fbca680 100644
--- a/libavcodec/h264dec.c
+++ b/libavcodec/h264dec.c
@@ -46,6 +46,7 @@
#include "mpegutils.h"
#include "profiles.h"
#include "rectangle.h"
+#include "thread.h"
#include "threadframe.h"
const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
diff --git a/libavcodec/h264dec.h b/libavcodec/h264dec.h
index 8168c8e97b..f18adda2d1 100644
--- a/libavcodec/h264dec.h
+++ b/libavcodec/h264dec.h
@@ -109,7 +109,6 @@ typedef struct H264Picture {
ThreadFrame tf;
AVFrame *f_grain;
- ThreadFrame tf_grain;
AVBufferRef *qscale_table_buf;
int8_t *qscale_table;
diff --git a/libavcodec/hapdec.c b/libavcodec/hapdec.c
index 45c44ad78d..9f8dadc43d 100644
--- a/libavcodec/hapdec.c
+++ b/libavcodec/hapdec.c
@@ -305,7 +305,7 @@ static int hap_decode(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
HapContext *ctx = avctx->priv_data;
- ThreadFrame tframe;
+ AVFrame *const frame = data;
int ret, i, t;
int section_size;
enum HapSectionType section_type;
@@ -330,8 +330,7 @@ static int hap_decode(AVCodecContext *avctx, void *data,
}
/* Get the output frame ready to receive data */
- tframe.f = data;
- ret = ff_thread_get_buffer(avctx, &tframe, 0);
+ ret = ff_thread_get_buffer(avctx, frame, 0);
if (ret < 0)
return ret;
@@ -383,16 +382,15 @@ static int hap_decode(AVCodecContext *avctx, void *data,
/* Use the decompress function on the texture, one block per thread */
if (t == 0){
- avctx->execute2(avctx, decompress_texture_thread, tframe.f, NULL, ctx->slice_count);
+ avctx->execute2(avctx, decompress_texture_thread, frame, NULL, ctx->slice_count);
} else{
- tframe.f = data;
- avctx->execute2(avctx, decompress_texture2_thread, tframe.f, NULL, ctx->slice_count);
+ avctx->execute2(avctx, decompress_texture2_thread, frame, NULL, ctx->slice_count);
}
}
/* Frame is ready to be output */
- tframe.f->pict_type = AV_PICTURE_TYPE_I;
- tframe.f->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
*got_frame = 1;
return avpkt->size;
diff --git a/libavcodec/hevc_refs.c b/libavcodec/hevc_refs.c
index 0dd456bb92..1eb5daf157 100644
--- a/libavcodec/hevc_refs.c
+++ b/libavcodec/hevc_refs.c
@@ -39,7 +39,7 @@ void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
frame->flags &= ~flags;
if (!frame->flags) {
ff_thread_release_ext_buffer(s->avctx, &frame->tf);
- ff_thread_release_buffer(s->avctx, &frame->tf_grain);
+ ff_thread_release_buffer(s->avctx, frame->frame_grain);
frame->needs_fg = 0;
av_buffer_unref(&frame->tab_mvf_buf);
diff --git a/libavcodec/hevcdec.c b/libavcodec/hevcdec.c
index 6d86a5bfdb..fdff450a5e 100644
--- a/libavcodec/hevcdec.c
+++ b/libavcodec/hevcdec.c
@@ -45,6 +45,7 @@
#include "hevcdec.h"
#include "hwconfig.h"
#include "profiles.h"
+#include "thread.h"
#include "threadframe.h"
const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
@@ -3026,7 +3027,7 @@ static int hevc_frame_start(HEVCContext *s)
s->ref->frame_grain->format = s->ref->frame->format;
s->ref->frame_grain->width = s->ref->frame->width;
s->ref->frame_grain->height = s->ref->frame->height;
- if ((ret = ff_thread_get_buffer(s->avctx, &s->ref->tf_grain, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(s->avctx, s->ref->frame_grain, 0)) < 0)
goto fail;
}
@@ -3533,7 +3534,7 @@ static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
return ret;
if (src->needs_fg) {
- ret = ff_thread_ref_frame(&dst->tf_grain, &src->tf_grain);
+ ret = av_frame_ref(dst->frame_grain, src->frame_grain);
if (ret < 0)
return ret;
dst->needs_fg = 1;
@@ -3652,7 +3653,6 @@ static av_cold int hevc_init_context(AVCodecContext *avctx)
s->DPB[i].frame_grain = av_frame_alloc();
if (!s->DPB[i].frame_grain)
goto fail;
- s->DPB[i].tf_grain.f = s->DPB[i].frame_grain;
}
s->max_ra = INT_MAX;
diff --git a/libavcodec/hevcdec.h b/libavcodec/hevcdec.h
index 157bc6926f..bf390664d4 100644
--- a/libavcodec/hevcdec.h
+++ b/libavcodec/hevcdec.h
@@ -42,7 +42,7 @@
#include "hevcdsp.h"
#include "h274.h"
#include "internal.h"
-#include "thread.h"
+#include "threadframe.h"
#include "videodsp.h"
#define SHIFT_CTB_WPP 2
@@ -396,7 +396,6 @@ typedef struct HEVCFrame {
AVFrame *frame;
AVFrame *frame_grain;
ThreadFrame tf;
- ThreadFrame tf_grain;
int needs_fg; /* 1 if grain needs to be applied by the decoder */
MvField *tab_mvf;
RefPicList *refPicList;
diff --git a/libavcodec/hqx.c b/libavcodec/hqx.c
index 3a4db1b6c3..09310b69e8 100644
--- a/libavcodec/hqx.c
+++ b/libavcodec/hqx.c
@@ -404,7 +404,7 @@ static int hqx_decode_frame(AVCodecContext *avctx, void *data,
int *got_picture_ptr, AVPacket *avpkt)
{
HQXContext *ctx = avctx->priv_data;
- ThreadFrame frame = { .f = data };
+ AVFrame *const frame = data;
uint8_t *src = avpkt->data;
uint32_t info_tag;
int data_start;
@@ -499,7 +499,7 @@ static int hqx_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR_INVALIDDATA;
}
- ret = ff_thread_get_buffer(avctx, &frame, 0);
+ ret = ff_thread_get_buffer(avctx, frame, 0);
if (ret < 0)
return ret;
diff --git a/libavcodec/huffyuvdec.c b/libavcodec/huffyuvdec.c
index 0abf73b21b..a5e0ef5818 100644
--- a/libavcodec/huffyuvdec.c
+++ b/libavcodec/huffyuvdec.c
@@ -1185,7 +1185,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
HYuvContext *s = avctx->priv_data;
const int width = s->width;
const int height = s->height;
- ThreadFrame frame = { .f = data };
AVFrame *const p = data;
int slice, table_size = 0, ret, nb_slices;
unsigned slices_info_offset;
@@ -1203,7 +1202,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
s->bdsp.bswap_buf((uint32_t *) s->bitstream_buffer,
(const uint32_t *) buf, buf_size / 4);
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
if (s->context) {
diff --git a/libavcodec/jpeg2000dec.c b/libavcodec/jpeg2000dec.c
index a7eb809f30..d96ec3a275 100644
--- a/libavcodec/jpeg2000dec.c
+++ b/libavcodec/jpeg2000dec.c
@@ -2476,7 +2476,6 @@ static int jpeg2000_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
Jpeg2000DecoderContext *s = avctx->priv_data;
- ThreadFrame frame = { .f = data };
AVFrame *picture = data;
int ret;
@@ -2517,7 +2516,7 @@ static int jpeg2000_decode_frame(AVCodecContext *avctx, void *data,
goto end;
/* get picture buffer */
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, picture, 0)) < 0)
goto end;
picture->pict_type = AV_PICTURE_TYPE_I;
picture->key_frame = 1;
diff --git a/libavcodec/lagarith.c b/libavcodec/lagarith.c
index 7220648bc4..b8f330cf34 100644
--- a/libavcodec/lagarith.c
+++ b/libavcodec/lagarith.c
@@ -540,7 +540,6 @@ static int lag_decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data;
unsigned int buf_size = avpkt->size;
LagarithContext *l = avctx->priv_data;
- ThreadFrame frame = { .f = data };
AVFrame *const p = data;
uint8_t frametype;
uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
@@ -569,7 +568,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
planes = 4;
}
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
if (frametype == FRAME_SOLID_RGBA) {
@@ -593,7 +592,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
avctx->pix_fmt = AV_PIX_FMT_GBRAP;
}
- if ((ret = ff_thread_get_buffer(avctx, &frame,0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p,0)) < 0)
return ret;
for (i = 0; i < avctx->height; i++) {
@@ -614,7 +613,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
avctx->pix_fmt = AV_PIX_FMT_GBRP;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
offs[0] = offset_bv;
@@ -650,7 +649,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
case FRAME_ARITH_YUY2:
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
if (offset_ry >= buf_size ||
@@ -678,7 +677,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
case FRAME_ARITH_YV12:
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
if (offset_ry >= buf_size ||
diff --git a/libavcodec/lcldec.c b/libavcodec/lcldec.c
index a47e3bd780..f3b7a8ac1b 100644
--- a/libavcodec/lcldec.c
+++ b/libavcodec/lcldec.c
@@ -158,7 +158,6 @@ static int zlib_decomp(AVCodecContext *avctx, const uint8_t *src, int src_len, i
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{
AVFrame *frame = data;
- ThreadFrame tframe = { .f = data };
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
LclDecContext * const c = avctx->priv_data;
@@ -175,7 +174,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
unsigned int len = buf_size;
int linesize, offset;
- if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
outptr = frame->data[0]; // Output image pointer
diff --git a/libavcodec/libopenjpegdec.c b/libavcodec/libopenjpegdec.c
index 8982d21be4..c9496d886e 100644
--- a/libavcodec/libopenjpegdec.c
+++ b/libavcodec/libopenjpegdec.c
@@ -324,7 +324,6 @@ static int libopenjpeg_decode_frame(AVCodecContext *avctx,
uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
LibOpenJPEGContext *ctx = avctx->priv_data;
- ThreadFrame frame = { .f = data };
AVFrame *picture = data;
const AVPixFmtDescriptor *desc;
int width, height, ret;
@@ -417,7 +416,7 @@ static int libopenjpeg_decode_frame(AVCodecContext *avctx,
if (image->comps[i].prec > avctx->bits_per_raw_sample)
avctx->bits_per_raw_sample = image->comps[i].prec;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, picture, 0)) < 0)
goto done;
ret = !opj_decode(dec, stream, image);
diff --git a/libavcodec/magicyuv.c b/libavcodec/magicyuv.c
index 594196063b..46515780fc 100644
--- a/libavcodec/magicyuv.c
+++ b/libavcodec/magicyuv.c
@@ -431,7 +431,6 @@ static int magy_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
MagicYUVContext *s = avctx->priv_data;
- ThreadFrame frame = { .f = data };
AVFrame *p = data;
GetByteContext gb;
uint32_t first_offset, offset, next_offset, header_size, slice_width;
@@ -641,7 +640,7 @@ static int magy_decode_frame(AVCodecContext *avctx, void *data,
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
s->buf = avpkt->data;
diff --git a/libavcodec/mdec.c b/libavcodec/mdec.c
index 007e7fada8..9f61256bc4 100644
--- a/libavcodec/mdec.c
+++ b/libavcodec/mdec.c
@@ -42,7 +42,6 @@ typedef struct MDECContext {
BlockDSPContext bdsp;
BswapDSPContext bbdsp;
IDCTDSPContext idsp;
- ThreadFrame frame;
GetBitContext gb;
ScanTable scantable;
int version;
@@ -174,13 +173,13 @@ static int decode_frame(AVCodecContext *avctx,
MDECContext * const a = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- ThreadFrame frame = { .f = data };
+ AVFrame *const frame = data;
int ret;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
- frame.f->pict_type = AV_PICTURE_TYPE_I;
- frame.f->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
av_fast_padded_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size);
if (!a->bitstream_buffer)
@@ -202,7 +201,7 @@ static int decode_frame(AVCodecContext *avctx,
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
- idct_put(a, frame.f, a->mb_x, a->mb_y);
+ idct_put(a, frame, a->mb_x, a->mb_y);
}
}
diff --git a/libavcodec/mpegpicture.h b/libavcodec/mpegpicture.h
index e1e9f8d7e0..c79693caba 100644
--- a/libavcodec/mpegpicture.h
+++ b/libavcodec/mpegpicture.h
@@ -27,7 +27,7 @@
#include "avcodec.h"
#include "motion_est.h"
-#include "thread.h"
+#include "threadframe.h"
#define MPEGVIDEO_MAX_PLANES 4
#define MAX_PICTURE_COUNT 36
diff --git a/libavcodec/notchlc.c b/libavcodec/notchlc.c
index be28161995..9dd3bf95a0 100644
--- a/libavcodec/notchlc.c
+++ b/libavcodec/notchlc.c
@@ -146,7 +146,7 @@ static int lz4_decompress(AVCodecContext *avctx,
return bytestream2_tell_p(pb);
}
-static int decode_blocks(AVCodecContext *avctx, AVFrame *p, ThreadFrame *frame,
+static int decode_blocks(AVCodecContext *avctx, AVFrame *p,
unsigned uncompressed_size)
{
NotchLCContext *s = avctx->priv_data;
@@ -221,7 +221,7 @@ static int decode_blocks(AVCodecContext *avctx, AVFrame *p, ThreadFrame *frame,
return AVERROR_INVALIDDATA;
s->uv_count_offset = s->y_data_offset - s->a_data_offset;
- if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
rgb = *gb;
@@ -464,7 +464,6 @@ static int decode_frame(AVCodecContext *avctx,
AVPacket *avpkt)
{
NotchLCContext *s = avctx->priv_data;
- ThreadFrame frame = { .f = data };
GetByteContext *gb = &s->gb;
PutByteContext *pb = &s->pb;
unsigned uncompressed_size;
@@ -513,7 +512,7 @@ static int decode_frame(AVCodecContext *avctx,
bytestream2_init(gb, s->uncompressed_buffer, uncompressed_size);
}
- ret = decode_blocks(avctx, p, &frame, uncompressed_size);
+ ret = decode_blocks(avctx, p, uncompressed_size);
if (ret < 0)
return ret;
diff --git a/libavcodec/nvdec_av1.c b/libavcodec/nvdec_av1.c
index 1ce846a60d..3bbcd76123 100644
--- a/libavcodec/nvdec_av1.c
+++ b/libavcodec/nvdec_av1.c
@@ -49,7 +49,7 @@ static int nvdec_av1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, u
CUVIDAV1PICPARAMS *ppc = &pp->CodecSpecific.av1;
FrameDecodeData *fdd;
NVDECFrame *cf;
- AVFrame *cur_frame = s->cur_frame.tf.f;
+ AVFrame *cur_frame = s->cur_frame.f;
unsigned char remap_lr_type[4] = { AV1_RESTORE_NONE, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ };
@@ -233,7 +233,7 @@ static int nvdec_av1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, u
ppc->loop_filter_ref_deltas[i] = frame_header->loop_filter_ref_deltas[i];
/* Reference Frames */
- ppc->ref_frame_map[i] = ff_nvdec_get_ref_idx(s->ref[i].tf.f);
+ ppc->ref_frame_map[i] = ff_nvdec_get_ref_idx(s->ref[i].f);
}
if (frame_header->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
@@ -246,7 +246,7 @@ static int nvdec_av1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, u
for (i = 0; i < AV1_REFS_PER_FRAME; ++i) {
/* Ref Frame List */
int8_t ref_idx = frame_header->ref_frame_idx[i];
- AVFrame *ref_frame = s->ref[ref_idx].tf.f;
+ AVFrame *ref_frame = s->ref[ref_idx].f;
ppc->ref_frame[i].index = ppc->ref_frame_map[ref_idx];
ppc->ref_frame[i].width = ref_frame->width;
diff --git a/libavcodec/photocd.c b/libavcodec/photocd.c
index 50b465c1cd..23a8994ca3 100644
--- a/libavcodec/photocd.c
+++ b/libavcodec/photocd.c
@@ -293,7 +293,6 @@ static int photocd_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
PhotoCDContext *s = avctx->priv_data;
- ThreadFrame frame = { .f = data };
const uint8_t *buf = avpkt->data;
GetByteContext *gb = &s->gb;
AVFrame *p = data;
@@ -326,7 +325,7 @@ static int photocd_decode_frame(AVCodecContext *avctx, void *data,
if (ret < 0)
return ret;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
p->pict_type = AV_PICTURE_TYPE_I;
diff --git a/libavcodec/pixlet.c b/libavcodec/pixlet.c
index 5361bc42bf..a07690c6d0 100644
--- a/libavcodec/pixlet.c
+++ b/libavcodec/pixlet.c
@@ -606,7 +606,6 @@ static int pixlet_decode_frame(AVCodecContext *avctx, void *data,
PixletContext *ctx = avctx->priv_data;
int i, w, h, width, height, ret, version;
AVFrame *p = data;
- ThreadFrame frame = { .f = data };
uint32_t pktsize, depth;
bytestream2_init(&ctx->gb, avpkt->data, avpkt->size);
@@ -673,20 +672,20 @@ static int pixlet_decode_frame(AVCodecContext *avctx, void *data,
p->key_frame = 1;
p->color_range = AVCOL_RANGE_JPEG;
- ret = ff_thread_get_buffer(avctx, &frame, 0);
+ ret = ff_thread_get_buffer(avctx, p, 0);
if (ret < 0)
return ret;
for (i = 0; i < 3; i++) {
- ret = decode_plane(avctx, i, avpkt, frame.f);
+ ret = decode_plane(avctx, i, avpkt, p);
if (ret < 0)
return ret;
if (avctx->flags & AV_CODEC_FLAG_GRAY)
break;
}
- postprocess_luma(avctx, frame.f, ctx->w, ctx->h, ctx->depth);
- postprocess_chroma(frame.f, ctx->w >> 1, ctx->h >> 1, ctx->depth);
+ postprocess_luma(avctx, p, ctx->w, ctx->h, ctx->depth);
+ postprocess_chroma(p, ctx->w >> 1, ctx->h >> 1, ctx->depth);
*got_frame = 1;
diff --git a/libavcodec/proresdec2.c b/libavcodec/proresdec2.c
index d2d881a3dd..3ab6666f74 100644
--- a/libavcodec/proresdec2.c
+++ b/libavcodec/proresdec2.c
@@ -779,7 +779,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt)
{
ProresContext *ctx = avctx->priv_data;
- ThreadFrame tframe = { .f = data };
AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@@ -805,7 +804,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
buf += frame_hdr_size;
buf_size -= frame_hdr_size;
- if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
ff_thread_finish_setup(avctx);
diff --git a/libavcodec/pthread_frame.c b/libavcodec/pthread_frame.c
index f405622ca1..75b70a17ec 100644
--- a/libavcodec/pthread_frame.c
+++ b/libavcodec/pthread_frame.c
@@ -948,15 +948,13 @@ FF_ENABLE_DEPRECATION_WARNINGS
return 1;
}
-static int thread_get_buffer_internal(AVCodecContext *avctx, ThreadFrame *f, int flags)
+static int thread_get_buffer_internal(AVCodecContext *avctx, AVFrame *f, int flags)
{
PerThreadContext *p;
int err;
- f->owner[0] = f->owner[1] = avctx;
-
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
- return ff_get_buffer(avctx, f->f, flags);
+ return ff_get_buffer(avctx, f, flags);
p = avctx->internal->thread_ctx;
FF_DISABLE_DEPRECATION_WARNINGS
@@ -971,28 +969,16 @@ FF_ENABLE_DEPRECATION_WARNINGS
return -1;
}
- if (avctx->codec->caps_internal & FF_CODEC_CAP_ALLOCATE_PROGRESS) {
- atomic_int *progress;
- f->progress = av_buffer_alloc(2 * sizeof(*progress));
- if (!f->progress) {
- return AVERROR(ENOMEM);
- }
- progress = (atomic_int*)f->progress->data;
-
- atomic_init(&progress[0], -1);
- atomic_init(&progress[1], -1);
- }
-
pthread_mutex_lock(&p->parent->buffer_mutex);
#if !FF_API_THREAD_SAFE_CALLBACKS
err = ff_get_buffer(avctx, f->f, flags);
#else
FF_DISABLE_DEPRECATION_WARNINGS
if (THREAD_SAFE_CALLBACKS(avctx)) {
- err = ff_get_buffer(avctx, f->f, flags);
+ err = ff_get_buffer(avctx, f, flags);
} else {
pthread_mutex_lock(&p->progress_mutex);
- p->requested_frame = f->f;
+ p->requested_frame = f;
p->requested_flags = flags;
atomic_store_explicit(&p->state, STATE_GET_BUFFER, memory_order_release);
pthread_cond_broadcast(&p->progress_cond);
@@ -1009,8 +995,6 @@ FF_DISABLE_DEPRECATION_WARNINGS
ff_thread_finish_setup(avctx);
FF_ENABLE_DEPRECATION_WARNINGS
#endif
- if (err)
- av_buffer_unref(&f->progress);
pthread_mutex_unlock(&p->parent->buffer_mutex);
@@ -1049,7 +1033,7 @@ enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixe
FF_ENABLE_DEPRECATION_WARNINGS
#endif
-int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
+int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
{
int ret = thread_get_buffer_internal(avctx, f, flags);
if (ret < 0)
@@ -1059,10 +1043,36 @@ int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
{
- return ff_thread_get_buffer(avctx, f, flags);
+ int ret;
+
+ f->owner[0] = f->owner[1] = avctx;
+ /* Hint: It is possible for this function to be called with codecs
+ * that don't support frame threading at all, namely in case
+ * a frame-threaded decoder shares code with codecs that are not.
+ * This currently affects non-MPEG-4 mpegvideo codecs and and VP7.
+ * The following check will always be true for them. */
+ if (!(avctx->active_thread_type & FF_THREAD_FRAME))
+ return ff_get_buffer(avctx, f->f, flags);
+
+ if (avctx->codec->caps_internal & FF_CODEC_CAP_ALLOCATE_PROGRESS) {
+ atomic_int *progress;
+ f->progress = av_buffer_alloc(2 * sizeof(*progress));
+ if (!f->progress) {
+ return AVERROR(ENOMEM);
+ }
+ progress = (atomic_int*)f->progress->data;
+
+ atomic_init(&progress[0], -1);
+ atomic_init(&progress[1], -1);
+ }
+
+ ret = ff_thread_get_buffer(avctx, f->f, flags);
+ if (ret)
+ av_buffer_unref(&f->progress);
+ return ret;
}
-void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
+void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
{
#if FF_API_THREAD_SAFE_CALLBACKS
FF_DISABLE_DEPRECATION_WARNINGS
@@ -1075,21 +1085,18 @@ FF_DISABLE_DEPRECATION_WARNINGS
FF_ENABLE_DEPRECATION_WARNINGS
#endif
- if (!f->f)
+ if (!f)
return;
if (avctx->debug & FF_DEBUG_BUFFERS)
av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
- av_buffer_unref(&f->progress);
- f->owner[0] = f->owner[1] = NULL;
-
#if !FF_API_THREAD_SAFE_CALLBACKS
av_frame_unref(f->f);
#else
// when the frame buffers are not allocated, just reset it to clean state
- if (can_direct_free || !f->f->buf[0]) {
- av_frame_unref(f->f);
+ if (can_direct_free || !f->buf[0]) {
+ av_frame_unref(f);
return;
}
@@ -1113,7 +1120,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
dst = p->released_buffers[p->num_released_buffers];
- av_frame_move_ref(dst, f->f);
+ av_frame_move_ref(dst, f);
p->num_released_buffers++;
@@ -1124,15 +1131,17 @@ fail:
// this leaks, but it is better than crashing
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not queue a frame for freeing, this will leak\n");
- memset(f->f->buf, 0, sizeof(f->f->buf));
- if (f->f->extended_buf)
- memset(f->f->extended_buf, 0, f->f->nb_extended_buf * sizeof(*f->f->extended_buf));
- av_frame_unref(f->f);
+ memset(f->buf, 0, sizeof(f->buf));
+ if (f->extended_buf)
+ memset(f->extended_buf, 0, f->nb_extended_buf * sizeof(*f->extended_buf));
+ av_frame_unref(f);
}
#endif
}
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
{
- ff_thread_release_buffer(avctx, f);
+ av_buffer_unref(&f->progress);
+ f->owner[0] = f->owner[1] = NULL;
+ ff_thread_release_buffer(avctx, f->f);
}
diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c
index febd41f577..1a0ca6b520 100644
--- a/libavcodec/rv34.c
+++ b/libavcodec/rv34.c
@@ -41,6 +41,7 @@
#include "mpeg_er.h"
#include "qpeldsp.h"
#include "rectangle.h"
+#include "thread.h"
#include "threadframe.h"
#include "rv34vlc.h"
diff --git a/libavcodec/sheervideo.c b/libavcodec/sheervideo.c
index fb5a39303e..8505d124c4 100644
--- a/libavcodec/sheervideo.c
+++ b/libavcodec/sheervideo.c
@@ -1810,7 +1810,6 @@ static int decode_frame(AVCodecContext *avctx,
AVPacket *avpkt)
{
SheerVideoContext *s = avctx->priv_data;
- ThreadFrame frame = { .f = data };
const SheerTable *table;
AVFrame *p = data;
GetBitContext gb;
@@ -1982,7 +1981,7 @@ static int decode_frame(AVCodecContext *avctx,
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
if ((ret = init_get_bits8(&gb, avpkt->data + 20, avpkt->size - 20)) < 0)
diff --git a/libavcodec/takdec.c b/libavcodec/takdec.c
index 926dbf611e..19a3d75d2b 100644
--- a/libavcodec/takdec.c
+++ b/libavcodec/takdec.c
@@ -679,7 +679,6 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
{
TAKDecContext *s = avctx->priv_data;
AVFrame *frame = data;
- ThreadFrame tframe = { .f = data };
GetBitContext *gb = &s->gb;
int chan, i, ret, hsize;
@@ -742,7 +741,7 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
: s->ti.frame_samples;
frame->nb_samples = s->nb_samples;
- if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
ff_thread_finish_setup(avctx);
diff --git a/libavcodec/thread.h b/libavcodec/thread.h
index 091dc8a35e..716301b29f 100644
--- a/libavcodec/thread.h
+++ b/libavcodec/thread.h
@@ -31,14 +31,6 @@
#include "avcodec.h"
-typedef struct ThreadFrame {
- AVFrame *f;
- AVCodecContext *owner[2];
- // progress->data is an array of 2 ints holding progress for top/bottom
- // fields
- AVBufferRef *progress;
-} ThreadFrame;
-
/**
* Wait for decoding threads to finish and reset internal state.
* Called by avcodec_flush_buffers().
@@ -92,7 +84,7 @@ enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixe
* @param avctx The current context.
* @param f The frame to write into.
*/
-int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
+int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags);
/**
* Wrapper around release_buffer() frame-for multithreaded codecs.
@@ -105,9 +97,7 @@ int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
* @param avctx The current context.
* @param f The picture being released.
*/
-void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f);
-
-int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src);
+void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f);
int ff_thread_init(AVCodecContext *s);
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx,
diff --git a/libavcodec/threadframe.h b/libavcodec/threadframe.h
index be63e62435..dea4dadc6d 100644
--- a/libavcodec/threadframe.h
+++ b/libavcodec/threadframe.h
@@ -21,8 +21,16 @@
#ifndef AVCODEC_THREADFRAME_H
#define AVCODEC_THREADFRAME_H
+#include "libavutil/frame.h"
#include "avcodec.h"
-#include "thread.h"
+
+typedef struct ThreadFrame {
+ AVFrame *f;
+ AVCodecContext *owner[2];
+ // progress->data is an array of 2 ints holding progress for top/bottom
+ // fields
+ AVBufferRef *progress;
+} ThreadFrame;
/**
* Notify later decoding threads when part of their reference picture is ready.
@@ -74,4 +82,6 @@ int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
*/
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f);
+int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src);
+
#endif
diff --git a/libavcodec/tiff.c b/libavcodec/tiff.c
index fd85d104dc..923f85d07f 100644
--- a/libavcodec/tiff.c
+++ b/libavcodec/tiff.c
@@ -1016,7 +1016,7 @@ static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame,
return avpkt->size;
}
-static int init_image(TiffContext *s, ThreadFrame *frame)
+static int init_image(TiffContext *s, AVFrame *frame)
{
int ret;
int create_gray_palette = 0;
@@ -1177,11 +1177,11 @@ static int init_image(TiffContext *s, ThreadFrame *frame)
return ret;
if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
if (!create_gray_palette)
- memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
+ memcpy(frame->data[1], s->palette, sizeof(s->palette));
else {
/* make default grayscale pal */
int i;
- uint32_t *pal = (uint32_t *)frame->f->data[1];
+ uint32_t *pal = (uint32_t *)frame->data[1];
for (i = 0; i < 1<<s->bpp; i++)
pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
}
@@ -1743,7 +1743,6 @@ static int decode_frame(AVCodecContext *avctx,
{
TiffContext *const s = avctx->priv_data;
AVFrame *const p = data;
- ThreadFrame frame = { .f = data };
unsigned off, last_off;
int le, ret, plane, planes;
int i, j, entries, stride;
@@ -1894,7 +1893,7 @@ again:
}
/* now we have the data and may start decoding */
- if ((ret = init_image(s, &frame)) < 0)
+ if ((ret = init_image(s, p)) < 0)
return ret;
if (!s->is_tiled || has_strip_bits) {
diff --git a/libavcodec/tta.c b/libavcodec/tta.c
index 17b4ca9032..7ffae3d0ba 100644
--- a/libavcodec/tta.c
+++ b/libavcodec/tta.c
@@ -222,7 +222,6 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
AVFrame *frame = data;
- ThreadFrame tframe = { .f = data };
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TTAContext *s = avctx->priv_data;
@@ -242,7 +241,7 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */
frame->nb_samples = framelen;
- if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
// decode directly to output buffer for 24-bit sample format
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index bc16424f91..2561854c47 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -893,10 +893,9 @@ enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixe
return ff_get_format(avctx, fmt);
}
-int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
+int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
{
- f->owner[0] = f->owner[1] = avctx;
- return ff_get_buffer(avctx, f->f, flags);
+ return ff_get_buffer(avctx, f, flags);
}
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
@@ -905,10 +904,10 @@ int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
return ff_get_buffer(avctx, f->f, flags);
}
-void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
+void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
{
- if (f->f)
- av_frame_unref(f->f);
+ if (f)
+ av_frame_unref(f);
}
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
diff --git a/libavcodec/utvideodec.c b/libavcodec/utvideodec.c
index b39d8a7948..a824112415 100644
--- a/libavcodec/utvideodec.c
+++ b/libavcodec/utvideodec.c
@@ -563,14 +563,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
UtvideoContext *c = avctx->priv_data;
+ AVFrame *const frame = data;
int i, j;
const uint8_t *plane_start[5];
int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
int ret;
GetByteContext gb;
- ThreadFrame frame = { .f = data };
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
/* parse plane structure to get frame flags and validate slice offsets */
@@ -709,80 +709,80 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
case AV_PIX_FMT_GBRP:
case AV_PIX_FMT_GBRAP:
for (i = 0; i < c->planes; i++) {
- ret = decode_plane(c, i, frame.f->data[i],
- frame.f->linesize[i], avctx->width,
+ ret = decode_plane(c, i, frame->data[i],
+ frame->linesize[i], avctx->width,
avctx->height, plane_start[i],
c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
- restore_median_planar(c, frame.f->data[i],
- frame.f->linesize[i], avctx->width,
+ restore_median_planar(c, frame->data[i],
+ frame->linesize[i], avctx->width,
avctx->height, c->slices, 0);
} else {
- restore_median_planar_il(c, frame.f->data[i],
- frame.f->linesize[i],
+ restore_median_planar_il(c, frame->data[i],
+ frame->linesize[i],
avctx->width, avctx->height, c->slices,
0);
}
} else if (c->frame_pred == PRED_GRADIENT) {
if (!c->interlaced) {
- restore_gradient_planar(c, frame.f->data[i],
- frame.f->linesize[i], avctx->width,
+ restore_gradient_planar(c, frame->data[i],
+ frame->linesize[i], avctx->width,
avctx->height, c->slices, 0);
} else {
- restore_gradient_planar_il(c, frame.f->data[i],
- frame.f->linesize[i],
+ restore_gradient_planar_il(c, frame->data[i],
+ frame->linesize[i],
avctx->width, avctx->height, c->slices,
0);
}
}
}
- c->utdsp.restore_rgb_planes(frame.f->data[2], frame.f->data[0], frame.f->data[1],
- frame.f->linesize[2], frame.f->linesize[0], frame.f->linesize[1],
+ c->utdsp.restore_rgb_planes(frame->data[2], frame->data[0], frame->data[1],
+ frame->linesize[2], frame->linesize[0], frame->linesize[1],
avctx->width, avctx->height);
break;
case AV_PIX_FMT_GBRAP10:
case AV_PIX_FMT_GBRP10:
for (i = 0; i < c->planes; i++) {
- ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i],
- frame.f->linesize[i] / 2, avctx->width,
+ ret = decode_plane10(c, i, (uint16_t *)frame->data[i],
+ frame->linesize[i] / 2, avctx->width,
avctx->height, plane_start[i],
plane_start[i + 1] - 1024,
c->frame_pred == PRED_LEFT);
if (ret)
return ret;
}
- c->utdsp.restore_rgb_planes10((uint16_t *)frame.f->data[2], (uint16_t *)frame.f->data[0], (uint16_t *)frame.f->data[1],
- frame.f->linesize[2] / 2, frame.f->linesize[0] / 2, frame.f->linesize[1] / 2,
+ c->utdsp.restore_rgb_planes10((uint16_t *)frame->data[2], (uint16_t *)frame->data[0], (uint16_t *)frame->data[1],
+ frame->linesize[2] / 2, frame->linesize[0] / 2, frame->linesize[1] / 2,
avctx->width, avctx->height);
break;
case AV_PIX_FMT_YUV420P:
for (i = 0; i < 3; i++) {
- ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
+ ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height >> !!i,
plane_start[i], c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
- restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
+ restore_median_planar(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height >> !!i,
c->slices, !i);
} else {
- restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+ restore_median_planar_il(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i,
avctx->height >> !!i,
c->slices, !i);
}
} else if (c->frame_pred == PRED_GRADIENT) {
if (!c->interlaced) {
- restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
+ restore_gradient_planar(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height >> !!i,
c->slices, !i);
} else {
- restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+ restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i,
avctx->height >> !!i,
c->slices, !i);
@@ -792,28 +792,28 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
break;
case AV_PIX_FMT_YUV422P:
for (i = 0; i < 3; i++) {
- ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
+ ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height,
plane_start[i], c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
- restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
+ restore_median_planar(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
} else {
- restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+ restore_median_planar_il(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
}
} else if (c->frame_pred == PRED_GRADIENT) {
if (!c->interlaced) {
- restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
+ restore_gradient_planar(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
} else {
- restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+ restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
}
@@ -822,28 +822,28 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
break;
case AV_PIX_FMT_YUV444P:
for (i = 0; i < 3; i++) {
- ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
+ ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
avctx->width, avctx->height,
plane_start[i], c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
- restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
+ restore_median_planar(c, frame->data[i], frame->linesize[i],
avctx->width, avctx->height,
c->slices, 0);
} else {
- restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+ restore_median_planar_il(c, frame->data[i], frame->linesize[i],
avctx->width, avctx->height,
c->slices, 0);
}
} else if (c->frame_pred == PRED_GRADIENT) {
if (!c->interlaced) {
- restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
+ restore_gradient_planar(c, frame->data[i], frame->linesize[i],
avctx->width, avctx->height,
c->slices, 0);
} else {
- restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+ restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
avctx->width, avctx->height,
c->slices, 0);
}
@@ -852,7 +852,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
break;
case AV_PIX_FMT_YUV420P10:
for (i = 0; i < 3; i++) {
- ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], frame.f->linesize[i] / 2,
+ ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2,
avctx->width >> !!i, avctx->height >> !!i,
plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
if (ret)
@@ -861,7 +861,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
break;
case AV_PIX_FMT_YUV422P10:
for (i = 0; i < 3; i++) {
- ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], frame.f->linesize[i] / 2,
+ ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2,
avctx->width >> !!i, avctx->height,
plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
if (ret)
@@ -870,9 +870,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
break;
}
- frame.f->key_frame = 1;
- frame.f->pict_type = AV_PICTURE_TYPE_I;
- frame.f->interlaced_frame = !!c->interlaced;
+ frame->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->interlaced_frame = !!c->interlaced;
*got_frame = 1;
diff --git a/libavcodec/v210dec.c b/libavcodec/v210dec.c
index 0c95728781..19dd4eeeec 100644
--- a/libavcodec/v210dec.c
+++ b/libavcodec/v210dec.c
@@ -142,7 +142,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
V210DecContext *s = avctx->priv_data;
ThreadData td;
int ret, stride, aligned_input;
- ThreadFrame frame = { .f = data };
AVFrame *pic = data;
const uint8_t *psrc = avpkt->data;
@@ -177,7 +176,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
ff_v210dec_init(s);
}
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
pic->pict_type = AV_PICTURE_TYPE_I;
diff --git a/libavcodec/v410dec.c b/libavcodec/v410dec.c
index c23f84942d..ecf3ae2053 100644
--- a/libavcodec/v410dec.c
+++ b/libavcodec/v410dec.c
@@ -89,7 +89,6 @@ static int v410_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
ThreadData td;
- ThreadFrame frame = { .f = data };
AVFrame *pic = data;
uint8_t *src = avpkt->data;
int ret;
@@ -101,7 +100,7 @@ static int v410_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR(EINVAL);
}
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
pic->key_frame = 1;
diff --git a/libavcodec/vaapi_av1.c b/libavcodec/vaapi_av1.c
index 5985493b8d..63374c31c9 100644
--- a/libavcodec/vaapi_av1.c
+++ b/libavcodec/vaapi_av1.c
@@ -18,14 +18,16 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/frame.h"
#include "libavutil/pixdesc.h"
#include "hwconfig.h"
#include "vaapi_decode.h"
#include "internal.h"
#include "av1dec.h"
+#include "thread.h"
typedef struct VAAPIAV1FrameRef {
- ThreadFrame frame;
+ AVFrame *frame;
int valid;
} VAAPIAV1FrameRef;
@@ -40,13 +42,13 @@ typedef struct VAAPIAV1DecContext {
* used to apply film grain and push to downstream.
*/
VAAPIAV1FrameRef ref_tab[AV1_NUM_REF_FRAMES];
- ThreadFrame tmp_frame;
+ AVFrame *tmp_frame;
} VAAPIAV1DecContext;
static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
{
if (vf)
- return ff_vaapi_get_surface_id(vf->tf.f);
+ return ff_vaapi_get_surface_id(vf->f);
else
return VA_INVALID_SURFACE;
}
@@ -73,16 +75,16 @@ static int vaapi_av1_decode_init(AVCodecContext *avctx)
{
VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
- ctx->tmp_frame.f = av_frame_alloc();
- if (!ctx->tmp_frame.f) {
+ ctx->tmp_frame = av_frame_alloc();
+ if (!ctx->tmp_frame) {
av_log(avctx, AV_LOG_ERROR,
"Failed to allocate frame.\n");
return AVERROR(ENOMEM);
}
for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
- ctx->ref_tab[i].frame.f = av_frame_alloc();
- if (!ctx->ref_tab[i].frame.f) {
+ ctx->ref_tab[i].frame = av_frame_alloc();
+ if (!ctx->ref_tab[i].frame) {
av_log(avctx, AV_LOG_ERROR,
"Failed to allocate reference table frame %d.\n", i);
return AVERROR(ENOMEM);
@@ -97,14 +99,14 @@ static int vaapi_av1_decode_uninit(AVCodecContext *avctx)
{
VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
- if (ctx->tmp_frame.f->buf[0])
- ff_thread_release_buffer(avctx, &ctx->tmp_frame);
- av_frame_free(&ctx->tmp_frame.f);
+ if (ctx->tmp_frame->buf[0])
+ ff_thread_release_buffer(avctx, ctx->tmp_frame);
+ av_frame_free(&ctx->tmp_frame);
for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
- if (ctx->ref_tab[i].frame.f->buf[0])
- ff_thread_release_buffer(avctx, &ctx->ref_tab[i].frame);
- av_frame_free(&ctx->ref_tab[i].frame.f);
+ if (ctx->ref_tab[i].frame->buf[0])
+ ff_thread_release_buffer(avctx, ctx->ref_tab[i].frame);
+ av_frame_free(&ctx->ref_tab[i].frame);
}
return ff_vaapi_decode_uninit(avctx);
@@ -135,12 +137,12 @@ static int vaapi_av1_start_frame(AVCodecContext *avctx,
goto fail;
if (apply_grain) {
- if (ctx->tmp_frame.f->buf[0])
- ff_thread_release_buffer(avctx, &ctx->tmp_frame);
- err = ff_thread_get_buffer(avctx, &ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF);
+ if (ctx->tmp_frame->buf[0])
+ ff_thread_release_buffer(avctx, ctx->tmp_frame);
+ err = ff_thread_get_buffer(avctx, ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF);
if (err < 0)
goto fail;
- pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame.f);
+ pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame);
} else {
pic->output_surface = vaapi_av1_surface_id(&s->cur_frame);
}
@@ -276,7 +278,7 @@ static int vaapi_av1_start_frame(AVCodecContext *avctx,
pic_param.ref_frame_map[i] = VA_INVALID_ID;
else
pic_param.ref_frame_map[i] = ctx->ref_tab[i].valid ?
- ff_vaapi_get_surface_id(ctx->ref_tab[i].frame.f) :
+ ff_vaapi_get_surface_id(ctx->ref_tab[i].frame) :
vaapi_av1_surface_id(&s->ref[i]);
}
for (int i = 0; i < AV1_REFS_PER_FRAME; i++) {
@@ -380,11 +382,11 @@ static int vaapi_av1_end_frame(AVCodecContext *avctx)
for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
if (header->refresh_frame_flags & (1 << i)) {
- if (ctx->ref_tab[i].frame.f->buf[0])
- ff_thread_release_buffer(avctx, &ctx->ref_tab[i].frame);
+ if (ctx->ref_tab[i].frame->buf[0])
+ ff_thread_release_buffer(avctx, ctx->ref_tab[i].frame);
if (apply_grain) {
- ret = ff_thread_ref_frame(&ctx->ref_tab[i].frame, &ctx->tmp_frame);
+ ret = av_frame_ref(ctx->ref_tab[i].frame, ctx->tmp_frame);
if (ret < 0)
return ret;
ctx->ref_tab[i].valid = 1;
diff --git a/libavcodec/vble.c b/libavcodec/vble.c
index f1400959e0..bb542bef42 100644
--- a/libavcodec/vble.c
+++ b/libavcodec/vble.c
@@ -125,7 +125,6 @@ static int vble_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
int offset = 0;
int width_uv = avctx->width / 2, height_uv = avctx->height / 2;
int ret;
- ThreadFrame frame = { .f = data };
if (avpkt->size < 4 || avpkt->size - 4 > INT_MAX/8) {
av_log(avctx, AV_LOG_ERROR, "Invalid packet size\n");
@@ -133,7 +132,7 @@ static int vble_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
}
/* Allocate buffer */
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
/* Set flags */
diff --git a/libavcodec/vp8.h b/libavcodec/vp8.h
index e6091dfe04..fc2662bf0d 100644
--- a/libavcodec/vp8.h
+++ b/libavcodec/vp8.h
@@ -33,7 +33,7 @@
#include "libavutil/thread.h"
#include "h264pred.h"
-#include "thread.h"
+#include "threadframe.h"
#include "vp56.h"
#include "vp8dsp.h"
diff --git a/libavcodec/vp9shared.h b/libavcodec/vp9shared.h
index 54726df742..ebaa11d2c1 100644
--- a/libavcodec/vp9shared.h
+++ b/libavcodec/vp9shared.h
@@ -28,7 +28,7 @@
#include <stdint.h>
#include "vp9.h"
-#include "thread.h"
+#include "threadframe.h"
#include "vp56.h"
enum BlockPartition {
diff --git a/libavcodec/webp.c b/libavcodec/webp.c
index d5a81fd527..9e642e050a 100644
--- a/libavcodec/webp.c
+++ b/libavcodec/webp.c
@@ -568,8 +568,7 @@ static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
img->frame->height = h;
if (role == IMAGE_ROLE_ARGB && !img->is_alpha_primary) {
- ThreadFrame pt = { .f = img->frame };
- ret = ff_thread_get_buffer(s->avctx, &pt, 0);
+ ret = ff_thread_get_buffer(s->avctx, img->frame, 0);
} else
ret = av_frame_get_buffer(img->frame, 1);
if (ret < 0)
diff --git a/libavcodec/ylc.c b/libavcodec/ylc.c
index 1c2b5000fe..cbe33fe174 100644
--- a/libavcodec/ylc.c
+++ b/libavcodec/ylc.c
@@ -285,7 +285,6 @@ static int decode_frame(AVCodecContext *avctx,
int TL[4] = { 128, 128, 128, 128 };
int L[4] = { 128, 128, 128, 128 };
YLCContext *s = avctx->priv_data;
- ThreadFrame frame = { .f = data };
const uint8_t *buf = avpkt->data;
int ret, x, y, toffset, boffset;
AVFrame * const p = data;
@@ -307,7 +306,7 @@ static int decode_frame(AVCodecContext *avctx,
if (toffset >= boffset || boffset >= avpkt->size)
return AVERROR_INVALIDDATA;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
av_fast_malloc(&s->buffer, &s->buffer_size,
--
2.32.0
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
^ permalink raw reply [flat|nested] 6+ messages in thread