Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
 help / color / mirror / Atom feed
From: Nicolas George <george@nsup.org>
To: FFmpeg development discussions and patches <ffmpeg-devel@ffmpeg.org>
Cc: Timo Rothenpieler <timo@rothenpieler.org>
Subject: Re: [FFmpeg-devel] [RFC] avdevice/lavfi: output wrapped AVFrames
Date: Sun, 3 Jul 2022 15:19:06 +0200
Message-ID: <YsGXSobpFvAQrKSO@phare.normalesup.org> (raw)
In-Reply-To: <20220629195250.29341-1-timo@rothenpieler.org>


[-- Attachment #1.1: Type: text/plain, Size: 10963 bytes --]

Timo Rothenpieler (12022-06-29):
> The lavfi avdevice as it is right now can't output "special frames"
> like hardware frames.
> This patch makes the lavfi avdevice output wrapped AVFrames instead
> of copying the actual data, greatly simplifying it in the process.

Thanks for the patch. I am not familiar with the hardware frame
infrastructure, but this patch makes the whole code much simpler, and
assumedly more efficient to boot. That makes it worth it for that reason
alone.

If it also allows to use hardware frames, even in a limited or fragile
way, it is a bonus. I do not see how anybody could object.

> I am not at all sure if this has some unexpected consequences.
> It works just fine with ffmpeg.c, but it might be an ABI break for
> potential library consumers?

I think the case can be made for both positions: on one hand, an
application should not assume a demuxer will output any kind of frame
and should be ready for wrapped AVFrame; on the other hand, devices are
special and somebody who uses a specific device on purpose may make
assumptions on its behavior.

I think this case is fringe enough that we can accept the break, at
least if the device only outputs normal frames by default, not hardware
frames.

> 
>  libavdevice/lavfi.c | 108 +++++++++++++-------------------------------
>  1 file changed, 31 insertions(+), 77 deletions(-)
> 
> diff --git a/libavdevice/lavfi.c b/libavdevice/lavfi.c
> index db5d0b94de..6bfdb5033e 100644
> --- a/libavdevice/lavfi.c
> +++ b/libavdevice/lavfi.c
> @@ -54,32 +54,10 @@ typedef struct {
>      int *sink_eof;
>      int *stream_sink_map;
>      int *sink_stream_subcc_map;
> -    AVFrame *decoded_frame;
>      int nb_sinks;
>      AVPacket subcc_packet;
>  } LavfiContext;
>  
> -static int *create_all_formats(int n)
> -{
> -    int i, j, *fmts, count = 0;
> -
> -    for (i = 0; i < n; i++) {
> -        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
> -        if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
> -            count++;
> -    }
> -
> -    if (!(fmts = av_malloc_array(count + 1, sizeof(*fmts))))
> -        return NULL;
> -    for (j = 0, i = 0; i < n; i++) {
> -        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
> -        if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
> -            fmts[j++] = i;
> -    }
> -    fmts[j] = AV_PIX_FMT_NONE;
> -    return fmts;
> -}
> -
>  av_cold static int lavfi_read_close(AVFormatContext *avctx)
>  {
>      LavfiContext *lavfi = avctx->priv_data;
> @@ -90,7 +68,6 @@ av_cold static int lavfi_read_close(AVFormatContext *avctx)
>      av_freep(&lavfi->sink_stream_subcc_map);
>      av_freep(&lavfi->sinks);
>      avfilter_graph_free(&lavfi->graph);
> -    av_frame_free(&lavfi->decoded_frame);
>  
>      return 0;
>  }
> @@ -125,15 +102,11 @@ av_cold static int lavfi_read_header(AVFormatContext *avctx)
>      LavfiContext *lavfi = avctx->priv_data;
>      AVFilterInOut *input_links = NULL, *output_links = NULL, *inout;
>      const AVFilter *buffersink, *abuffersink;
> -    int *pix_fmts = create_all_formats(AV_PIX_FMT_NB);
>      enum AVMediaType type;
>      int ret = 0, i, n;
>  
>  #define FAIL(ERR) { ret = ERR; goto end; }
>  
> -    if (!pix_fmts)
> -        FAIL(AVERROR(ENOMEM));
> -
>      buffersink = avfilter_get_by_name("buffersink");
>      abuffersink = avfilter_get_by_name("abuffersink");
>  
> @@ -264,22 +237,12 @@ av_cold static int lavfi_read_header(AVFormatContext *avctx)
>              ret = avfilter_graph_create_filter(&sink, buffersink,
>                                                 inout->name, NULL,
>                                                 NULL, lavfi->graph);
> -            if (ret >= 0)
> -                ret = av_opt_set_int_list(sink, "pix_fmts", pix_fmts,  AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
>              if (ret < 0)
>                  goto end;
>          } else if (type == AVMEDIA_TYPE_AUDIO) {
> -            static const enum AVSampleFormat sample_fmts[] = {
> -                AV_SAMPLE_FMT_U8,  AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32,
> -                AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL,
> -            };
> -
>              ret = avfilter_graph_create_filter(&sink, abuffersink,
>                                                 inout->name, NULL,
>                                                 NULL, lavfi->graph);
> -            if (ret >= 0)
> -                ret = av_opt_set_bin(sink, "sample_fmts", (const uint8_t*)sample_fmts,
> -                                     sizeof(sample_fmts), AV_OPT_SEARCH_CHILDREN);
>              if (ret < 0)
>                  goto end;
>              ret = av_opt_set_int(sink, "all_channel_counts", 1,
> @@ -320,39 +283,25 @@ av_cold static int lavfi_read_header(AVFormatContext *avctx)
>          AVCodecParameters *const par = st->codecpar;
>          avpriv_set_pts_info(st, 64, time_base.num, time_base.den);
>          par->codec_type = av_buffersink_get_type(sink);
> +        par->codec_id   = AV_CODEC_ID_WRAPPED_AVFRAME;
> +        par->format     = av_buffersink_get_format(sink);
>          if (par->codec_type == AVMEDIA_TYPE_VIDEO) {
> -            int64_t probesize;
> -            par->codec_id   = AV_CODEC_ID_RAWVIDEO;
> -            par->format     = av_buffersink_get_format(sink);
>              par->width      = av_buffersink_get_w(sink);
>              par->height     = av_buffersink_get_h(sink);
> -            probesize       = par->width * par->height * 30 *
> -                              av_get_padded_bits_per_pixel(av_pix_fmt_desc_get(par->format));
> -            avctx->probesize = FFMAX(avctx->probesize, probesize);
> -            st       ->sample_aspect_ratio =
> +            st ->sample_aspect_ratio =
>              par->sample_aspect_ratio = av_buffersink_get_sample_aspect_ratio(sink);
>          } else if (par->codec_type == AVMEDIA_TYPE_AUDIO) {
>              par->sample_rate = av_buffersink_get_sample_rate(sink);
>              ret = av_buffersink_get_ch_layout(sink, &par->ch_layout);
>              if (ret < 0)
>                  goto end;
> -            par->format      = av_buffersink_get_format(sink);
> -            par->codec_id    = av_get_pcm_codec(par->format, -1);
> -            if (par->codec_id == AV_CODEC_ID_NONE)
> -                av_log(avctx, AV_LOG_ERROR,
> -                       "Could not find PCM codec for sample format %s.\n",
> -                       av_get_sample_fmt_name(par->format));
>          }
>      }
>  
>      if ((ret = create_subcc_streams(avctx)) < 0)
>          goto end;
>  
> -    if (!(lavfi->decoded_frame = av_frame_alloc()))
> -        FAIL(AVERROR(ENOMEM));
> -
>  end:
> -    av_free(pix_fmts);
>      avfilter_inout_free(&input_links);
>      avfilter_inout_free(&output_links);
>      return ret;
> @@ -378,22 +327,31 @@ static int create_subcc_packet(AVFormatContext *avctx, AVFrame *frame,
>      return 0;
>  }
>  
> +static void lavfi_free_frame(void *opaque, uint8_t *data)
> +{
> +    AVFrame *frame = (AVFrame*)data;
> +    av_frame_free(&frame);
> +}
> +
>  static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
>  {
>      LavfiContext *lavfi = avctx->priv_data;
>      double min_pts = DBL_MAX;
>      int stream_idx, min_pts_sink_idx = 0;
> -    AVFrame *frame = lavfi->decoded_frame;
> +    AVFrame *frame;
>      AVDictionary *frame_metadata;
>      int ret, i;
>      int size = 0;
> -    AVStream *st;
>  
>      if (lavfi->subcc_packet.size) {
>          av_packet_move_ref(pkt, &lavfi->subcc_packet);
>          return pkt->size;
>      }
>  
> +    frame = av_frame_alloc();
> +    if (!frame)
> +        return AVERROR(ENOMEM);
> +
>      /* iterate through all the graph sinks. Select the sink with the
>       * minimum PTS */
>      for (i = 0; i < lavfi->nb_sinks; i++) {
> @@ -411,7 +369,7 @@ static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
>              lavfi->sink_eof[i] = 1;
>              continue;
>          } else if (ret < 0)
> -            return ret;
> +            goto fail;
>          d = av_rescale_q_rnd(frame->pts, tb, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
>          ff_dlog(avctx, "sink_idx:%d time:%f\n", i, d);
>          av_frame_unref(frame);
> @@ -421,29 +379,15 @@ static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
>              min_pts_sink_idx = i;
>          }
>      }
> -    if (min_pts == DBL_MAX)
> -        return AVERROR_EOF;
> +    if (min_pts == DBL_MAX) {
> +        ret = AVERROR_EOF;
> +        goto fail;
> +    }
>  
>      ff_dlog(avctx, "min_pts_sink_idx:%i\n", min_pts_sink_idx);
>  
>      av_buffersink_get_frame_flags(lavfi->sinks[min_pts_sink_idx], frame, 0);
>      stream_idx = lavfi->sink_stream_map[min_pts_sink_idx];
> -    st = avctx->streams[stream_idx];
> -
> -    if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
> -        size = av_image_get_buffer_size(frame->format, frame->width, frame->height, 1);
> -        if ((ret = av_new_packet(pkt, size)) < 0)
> -            goto fail;
> -
> -        av_image_copy_to_buffer(pkt->data, size, (const uint8_t **)frame->data, frame->linesize,
> -                                frame->format, frame->width, frame->height, 1);
> -    } else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
> -        size = frame->nb_samples * av_get_bytes_per_sample(frame->format) *
> -                                   frame->ch_layout.nb_channels;
> -        if ((ret = av_new_packet(pkt, size)) < 0)
> -            goto fail;
> -        memcpy(pkt->data, frame->data[0], size);
> -    }
>  
>      frame_metadata = frame->metadata;
>      if (frame_metadata) {
> @@ -465,13 +409,23 @@ static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
>          goto fail;
>      }
>  

> +    pkt->buf = av_buffer_create((uint8_t*)frame, sizeof(*frame),
> +                                &lavfi_free_frame, NULL, 0);
> +    if (!pkt->buf) {
> +        ret = AVERROR(ENOMEM);
> +        goto fail;
> +    }

This is using sizeof(AVFrame) outside of libavutil, it is not allowed.

I was about to suggest you make it a separate utility function, but it
already exists: wrapped_avframe_encode(); please use it instead.

(OTOH, wrapped_avframe_encode() also uses sizeof(AVFrame), it needs
fixing.)

> +
> +    pkt->data   = pkt->buf->data;
> +    pkt->size   = pkt->buf->size;
> +    pkt->flags |= AV_PKT_FLAG_TRUSTED;
> +
>      pkt->stream_index = stream_idx;
>      pkt->pts = frame->pts;
>      pkt->pos = frame->pkt_pos;
> -    av_frame_unref(frame);
>      return size;
>  fail:
> -    av_frame_unref(frame);
> +    av_frame_free(&frame);
>      return ret;
>  
>  }

Regards,

-- 
  Nicolas George

[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

[-- Attachment #2: Type: text/plain, Size: 251 bytes --]

_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

To unsubscribe, visit link above, or email
ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".

  parent reply	other threads:[~2022-07-03 13:19 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-29 19:52 Timo Rothenpieler
2022-06-30  8:46 ` Anton Khirnov
2022-06-30  9:50   ` Timo Rothenpieler
2022-07-02 17:26     ` Nicolas George
2022-07-02 20:06       ` Timo Rothenpieler
2022-07-03 13:19 ` Nicolas George [this message]
2022-07-03 13:40   ` Timo Rothenpieler

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=YsGXSobpFvAQrKSO@phare.normalesup.org \
    --to=george@nsup.org \
    --cc=ffmpeg-devel@ffmpeg.org \
    --cc=timo@rothenpieler.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link

Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel

This inbox may be cloned and mirrored by anyone:

	git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git

	# If you have public-inbox 1.1+ installed, you may
	# initialize and index your mirror using the following commands:
	public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \
		ffmpegdev@gitmailbox.com
	public-inbox-index ffmpegdev

Example config snippet for mirrors.


AGPL code for this site: git clone https://public-inbox.org/public-inbox.git