From: Ramiro Polla <ramiro.polla@gmail.com> To: FFmpeg development discussions and patches <ffmpeg-devel@ffmpeg.org> Cc: Alexander Strasser <eclipse7@gmx.net>, stephan@ecshi.net Subject: Re: [FFmpeg-devel] [PATCH] avdevice/v4l2: add limited support for multiplanar API Date: Mon, 1 Jul 2024 19:38:32 +0200 Message-ID: <CALweWgAn_DDCFQ0nu=MV0TdXVzyP7PPy99Lfj+x=pxsJiDXmQw@mail.gmail.com> (raw) In-Reply-To: <CALweWgDmZ0fawyjqifyfeoBhi+mq_8NTh6+YQxfZ0LGfCxtBFg@mail.gmail.com> [-- Attachment #1: Type: text/plain, Size: 455 bytes --] On Fri, Jun 28, 2024 at 2:54 PM Ramiro Polla <ramiro.polla@gmail.com> wrote: > I'll apply this after the weekend if there are no objections. I realized this patch would have caused a regression on buffers with corrupted data or an unexpected size. New patch attached. Alexander, Stephen, since you worked on this before, could you check that this patch indeed doesn't cause this regression? I'll apply in a few days if there are no comments. [-- Attachment #2: v2-0001-avdevice-v4l2-add-limited-support-for-multiplanar.patch --] [-- Type: text/x-patch, Size: 10406 bytes --] From 957ec47689e4f4e37a25bd8739a3ec2548284436 Mon Sep 17 00:00:00 2001 From: Ramiro Polla <ramiro.polla@gmail.com> Date: Thu, 20 Jun 2024 17:40:39 +0200 Subject: [PATCH v2] avdevice/v4l2: add limited support for multiplanar API This commit adds support for V4L2's multiplanar API, but only when the number of planes is 1. Adding full support for the multiplanar API would require a device that actually uses more than 1 plane, which I have not found yet. --- libavdevice/v4l2.c | 91 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 64 insertions(+), 27 deletions(-) diff --git a/libavdevice/v4l2.c b/libavdevice/v4l2.c index 74f43ef6a9..42d4b97c8f 100644 --- a/libavdevice/v4l2.c +++ b/libavdevice/v4l2.c @@ -92,6 +92,9 @@ struct video_data { TimeFilter *timefilter; int64_t last_time_m; + int multiplanar; + enum v4l2_buf_type buf_type; + int buffers; atomic_int buffers_queued; void **buf_start; @@ -182,7 +185,13 @@ static int device_open(AVFormatContext *ctx, const char* device_path) av_log(ctx, AV_LOG_VERBOSE, "fd:%d capabilities:%x\n", fd, cap.capabilities); - if (!(cap.capabilities & V4L2_CAP_VIDEO_CAPTURE)) { + if (cap.capabilities & V4L2_CAP_VIDEO_CAPTURE) { + s->multiplanar = 0; + s->buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + } else if (cap.capabilities & V4L2_CAP_VIDEO_CAPTURE_MPLANE) { + s->multiplanar = 1; + s->buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; + } else { av_log(ctx, AV_LOG_ERROR, "Not a video capture device.\n"); err = AVERROR(ENODEV); goto fail; @@ -206,7 +215,7 @@ static int device_init(AVFormatContext *ctx, int *width, int *height, uint32_t pixelformat) { struct video_data *s = ctx->priv_data; - struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE }; + struct v4l2_format fmt = { .type = s->buf_type }; int res = 0; fmt.fmt.pix.width = *width; @@ -288,7 +297,7 @@ static void list_framesizes(AVFormatContext *ctx, uint32_t pixelformat) static void list_formats(AVFormatContext *ctx, int type) { const struct video_data *s = ctx->priv_data; - struct v4l2_fmtdesc vfd = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE }; + struct v4l2_fmtdesc vfd = { .type = s->buf_type }; while(!v4l2_ioctl(s->fd, VIDIOC_ENUM_FMT, &vfd)) { enum AVCodecID codec_id = ff_fmt_v4l2codec(vfd.pixelformat); @@ -352,7 +361,7 @@ static int mmap_init(AVFormatContext *ctx) int i, res; struct video_data *s = ctx->priv_data; struct v4l2_requestbuffers req = { - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .type = s->buf_type, .count = desired_video_buffers, .memory = V4L2_MEMORY_MMAP }; @@ -381,10 +390,14 @@ static int mmap_init(AVFormatContext *ctx) } for (i = 0; i < req.count; i++) { + unsigned int buf_length, buf_offset; + struct v4l2_plane planes[VIDEO_MAX_PLANES]; struct v4l2_buffer buf = { - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .type = s->buf_type, .index = i, - .memory = V4L2_MEMORY_MMAP + .memory = V4L2_MEMORY_MMAP, + .m.planes = s->multiplanar ? planes : NULL, + .length = s->multiplanar ? VIDEO_MAX_PLANES : 0, }; if (v4l2_ioctl(s->fd, VIDIOC_QUERYBUF, &buf) < 0) { res = AVERROR(errno); @@ -392,16 +405,28 @@ static int mmap_init(AVFormatContext *ctx) return res; } - s->buf_len[i] = buf.length; + if (s->multiplanar) { + if (buf.length != 1) { + av_log(ctx, AV_LOG_ERROR, "multiplanar only supported when buf.length == 1\n"); + return AVERROR_PATCHWELCOME; + } + buf_length = buf.m.planes[0].length; + buf_offset = buf.m.planes[0].m.mem_offset; + } else { + buf_length = buf.length; + buf_offset = buf.m.offset; + } + + s->buf_len[i] = buf_length; if (s->frame_size > 0 && s->buf_len[i] < s->frame_size) { av_log(ctx, AV_LOG_ERROR, "buf_len[%d] = %d < expected frame size %d\n", i, s->buf_len[i], s->frame_size); return AVERROR(ENOMEM); } - s->buf_start[i] = v4l2_mmap(NULL, buf.length, + s->buf_start[i] = v4l2_mmap(NULL, buf_length, PROT_READ | PROT_WRITE, MAP_SHARED, - s->fd, buf.m.offset); + s->fd, buf_offset); if (s->buf_start[i] == MAP_FAILED) { res = AVERROR(errno); @@ -429,13 +454,16 @@ static int enqueue_buffer(struct video_data *s, struct v4l2_buffer *buf) static void mmap_release_buffer(void *opaque, uint8_t *data) { + struct v4l2_plane planes[VIDEO_MAX_PLANES]; struct v4l2_buffer buf = { 0 }; struct buff_data *buf_descriptor = opaque; struct video_data *s = buf_descriptor->s; - buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + buf.type = s->buf_type; buf.memory = V4L2_MEMORY_MMAP; buf.index = buf_descriptor->index; + buf.m.planes = s->multiplanar ? planes : NULL; + buf.length = s->multiplanar ? VIDEO_MAX_PLANES : 0; av_free(buf_descriptor); enqueue_buffer(s, &buf); @@ -505,11 +533,15 @@ static int convert_timestamp(AVFormatContext *ctx, int64_t *ts) static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt) { struct video_data *s = ctx->priv_data; + struct v4l2_plane planes[VIDEO_MAX_PLANES]; struct v4l2_buffer buf = { - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, - .memory = V4L2_MEMORY_MMAP + .type = s->buf_type, + .memory = V4L2_MEMORY_MMAP, + .m.planes = s->multiplanar ? planes : NULL, + .length = s->multiplanar ? VIDEO_MAX_PLANES : 0, }; struct timeval buf_ts; + unsigned int bytesused; int res; pkt->size = 0; @@ -536,38 +568,40 @@ static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt) // always keep at least one buffer queued av_assert0(atomic_load(&s->buffers_queued) >= 1); + bytesused = s->multiplanar ? buf.m.planes[0].bytesused : buf.bytesused; + #ifdef V4L2_BUF_FLAG_ERROR if (buf.flags & V4L2_BUF_FLAG_ERROR) { av_log(ctx, AV_LOG_WARNING, "Dequeued v4l2 buffer contains corrupted data (%d bytes).\n", - buf.bytesused); - buf.bytesused = 0; + bytesused); + bytesused = 0; } else #endif { /* CPIA is a compressed format and we don't know the exact number of bytes * used by a frame, so set it here as the driver announces it. */ if (ctx->video_codec_id == AV_CODEC_ID_CPIA) - s->frame_size = buf.bytesused; + s->frame_size = bytesused; - if (s->frame_size > 0 && buf.bytesused != s->frame_size) { + if (s->frame_size > 0 && bytesused != s->frame_size) { av_log(ctx, AV_LOG_WARNING, "Dequeued v4l2 buffer contains %d bytes, but %d were expected. Flags: 0x%08X.\n", - buf.bytesused, s->frame_size, buf.flags); - buf.bytesused = 0; + bytesused, s->frame_size, buf.flags); + bytesused = 0; } } /* Image is at s->buff_start[buf.index] */ if (atomic_load(&s->buffers_queued) == FFMAX(s->buffers / 8, 1)) { /* when we start getting low on queued buffers, fall back on copying data */ - res = av_new_packet(pkt, buf.bytesused); + res = av_new_packet(pkt, bytesused); if (res < 0) { av_log(ctx, AV_LOG_ERROR, "Error allocating a packet.\n"); enqueue_buffer(s, &buf); return res; } - memcpy(pkt->data, s->buf_start[buf.index], buf.bytesused); + memcpy(pkt->data, s->buf_start[buf.index], bytesused); res = enqueue_buffer(s, &buf); if (res) { @@ -578,7 +612,7 @@ static int mmap_read_frame(AVFormatContext *ctx, AVPacket *pkt) struct buff_data *buf_descriptor; pkt->data = s->buf_start[buf.index]; - pkt->size = buf.bytesused; + pkt->size = bytesused; buf_descriptor = av_malloc(sizeof(struct buff_data)); if (!buf_descriptor) { @@ -615,10 +649,13 @@ static int mmap_start(AVFormatContext *ctx) int i, res; for (i = 0; i < s->buffers; i++) { + struct v4l2_plane planes[VIDEO_MAX_PLANES]; struct v4l2_buffer buf = { - .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, + .type = s->buf_type, .index = i, - .memory = V4L2_MEMORY_MMAP + .memory = V4L2_MEMORY_MMAP, + .m.planes = s->multiplanar ? planes : NULL, + .length = s->multiplanar ? VIDEO_MAX_PLANES : 0, }; if (v4l2_ioctl(s->fd, VIDIOC_QBUF, &buf) < 0) { @@ -630,7 +667,7 @@ static int mmap_start(AVFormatContext *ctx) } atomic_store(&s->buffers_queued, s->buffers); - type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + type = s->buf_type; if (v4l2_ioctl(s->fd, VIDIOC_STREAMON, &type) < 0) { res = AVERROR(errno); av_log(ctx, AV_LOG_ERROR, "ioctl(VIDIOC_STREAMON): %s\n", @@ -646,7 +683,7 @@ static void mmap_close(struct video_data *s) enum v4l2_buf_type type; int i; - type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + type = s->buf_type; /* We do not check for the result, because we could * not do anything about it anyway... */ @@ -733,7 +770,7 @@ static int v4l2_set_parameters(AVFormatContext *ctx) tpf = &streamparm.parm.capture.timeperframe; } - streamparm.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + streamparm.type = s->buf_type; if (v4l2_ioctl(s->fd, VIDIOC_G_PARM, &streamparm) < 0) { ret = AVERROR(errno); av_log(ctx, AV_LOG_WARNING, "ioctl(VIDIOC_G_PARM): %s\n", av_err2str(ret)); @@ -921,7 +958,7 @@ static int v4l2_read_header(AVFormatContext *ctx) } if (!s->width && !s->height) { - struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE }; + struct v4l2_format fmt = { .type = s->buf_type }; av_log(ctx, AV_LOG_VERBOSE, "Querying the device for the current frame size\n"); -- 2.30.2 [-- Attachment #3: Type: text/plain, Size: 251 bytes --] _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
prev parent reply other threads:[~2024-07-01 17:38 UTC|newest] Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top 2024-06-20 15:40 Ramiro Polla 2024-06-25 9:19 ` Anton Khirnov 2024-06-25 11:56 ` Ramiro Polla 2024-06-27 14:13 ` Ramiro Polla 2024-06-27 16:05 ` Anton Khirnov 2024-06-28 11:47 ` Ramiro Polla 2024-07-01 14:15 ` Anton Khirnov 2024-06-28 12:54 ` Ramiro Polla 2024-07-01 17:38 ` Ramiro Polla [this message]
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to='CALweWgAn_DDCFQ0nu=MV0TdXVzyP7PPy99Lfj+x=pxsJiDXmQw@mail.gmail.com' \ --to=ramiro.polla@gmail.com \ --cc=eclipse7@gmx.net \ --cc=ffmpeg-devel@ffmpeg.org \ --cc=stephan@ecshi.net \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel This inbox may be cloned and mirrored by anyone: git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \ ffmpegdev@gitmailbox.com public-inbox-index ffmpegdev Example config snippet for mirrors. AGPL code for this site: git clone https://public-inbox.org/public-inbox.git