From: Vignesh Venkatasubramanian <vigneshv-at-google.com@ffmpeg.org> To: ffmpeg-devel@ffmpeg.org Cc: Vignesh Venkatasubramanian <vigneshv@google.com> Subject: [FFmpeg-devel] [PATCH] avformat/movenc: Support alpha channel for AVIF Date: Wed, 18 May 2022 14:56:20 -0700 Message-ID: <20220518215620.1718203-1-vigneshv@google.com> (raw) AVIF specification allows for alpha channel as an auxillary item (in case of still images) or as an auxillary track (in case of animated images). Add support for both of these. The AVIF muxer will take exactly two streams (when alpha is present) as input (first one being the YUV planes and the second one being the alpha plane). The input has to come from two different images (one of it color and the other one being alpha), or it can come from a single file source with the alpha channel extracted using the "alphaextract" filter. Example using alphaextract: ffmpeg -i rgba.png -filter_complex "[0:v]alphaextract[a]" -map 0 -map "[a]" -still-picture 1 avif_with_alpha.avif Example using two sources (first source can be in any pixel format and the second source has to be in monochrome grey pixel format): ffmpeg -i color.avif -i grey.avif -map 0 -map 1 -c copy avif_with_alpha.avif The generated files pass the compliance checks in Compliance Warden: https://github.com/gpac/ComplianceWarden libavif (the reference avif library) is able to decode the files generated using this patch. They also play back properly (with transparent background) in: 1) Chrome 2) Firefox (only still AVIF, no animation support) Signed-off-by: Vignesh Venkatasubramanian <vigneshv@google.com> --- libavformat/movenc.c | 185 +++++++++++++++++++++++++++++-------------- libavformat/movenc.h | 4 +- 2 files changed, 128 insertions(+), 61 deletions(-) diff --git a/libavformat/movenc.c b/libavformat/movenc.c index de971f94e8..00e42b7abb 100644 --- a/libavformat/movenc.c +++ b/libavformat/movenc.c @@ -2852,7 +2852,7 @@ static int mov_write_hdlr_tag(AVFormatContext *s, AVIOContext *pb, MOVTrack *tra hdlr = (track->mode == MODE_MOV) ? "mhlr" : "\0\0\0\0"; if (track->par->codec_type == AVMEDIA_TYPE_VIDEO) { if (track->mode == MODE_AVIF) { - hdlr_type = "pict"; + hdlr_type = (track == &mov->tracks[0]) ? "pict" : "auxv"; descr = "PictureHandler"; } else { hdlr_type = "vide"; @@ -2940,57 +2940,83 @@ static int mov_write_iloc_tag(AVIOContext *pb, MOVMuxContext *mov, AVFormatConte avio_wb32(pb, 0); /* Version & flags */ avio_w8(pb, (4 << 4) + 4); /* offset_size(4) and length_size(4) */ avio_w8(pb, 0); /* base_offset_size(4) and reserved(4) */ - avio_wb16(pb, 1); /* item_count */ + avio_wb16(pb, s->nb_streams); /* item_count */ - avio_wb16(pb, 1); /* item_id */ - avio_wb16(pb, 0); /* data_reference_index */ - avio_wb16(pb, 1); /* extent_count */ - mov->avif_extent_pos = avio_tell(pb); - avio_wb32(pb, 0); /* extent_offset (written later) */ - // For animated AVIF, we simply write the first packet's size. - avio_wb32(pb, mov->avif_extent_length); /* extent_length */ + for (int i = 0; i < s->nb_streams; i++) { + avio_wb16(pb, i + 1); /* item_id */ + avio_wb16(pb, 0); /* data_reference_index */ + avio_wb16(pb, 1); /* extent_count */ + mov->avif_extent_pos[i] = avio_tell(pb); + avio_wb32(pb, 0); /* extent_offset (written later) */ + // For animated AVIF, we simply write the first packet's size. + avio_wb32(pb, mov->avif_extent_length[i]); /* extent_length */ + } return update_size(pb, pos); } static int mov_write_iinf_tag(AVIOContext *pb, MOVMuxContext *mov, AVFormatContext *s) { - int64_t infe_pos; int64_t iinf_pos = avio_tell(pb); avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, "iinf"); avio_wb32(pb, 0); /* Version & flags */ - avio_wb16(pb, 1); /* entry_count */ + avio_wb16(pb, s->nb_streams); /* entry_count */ - infe_pos = avio_tell(pb); - avio_wb32(pb, 0); /* size */ - ffio_wfourcc(pb, "infe"); - avio_w8(pb, 0x2); /* Version */ - avio_wb24(pb, 0); /* flags */ - avio_wb16(pb, 1); /* item_id */ - avio_wb16(pb, 0); /* item_protection_index */ - avio_write(pb, "av01", 4); /* item_type */ - avio_write(pb, "Color\0", 6); /* item_name */ - update_size(pb, infe_pos); + for (int i = 0; i < s->nb_streams; i++) { + int64_t infe_pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "infe"); + avio_w8(pb, 0x2); /* Version */ + avio_wb24(pb, 0); /* flags */ + avio_wb16(pb, i + 1); /* item_id */ + avio_wb16(pb, 0); /* item_protection_index */ + avio_write(pb, "av01", 4); /* item_type */ + avio_write(pb, !i ? "Color\0" : "Alpha\0", 6); /* item_name */ + update_size(pb, infe_pos); + } return update_size(pb, iinf_pos); } -static int mov_write_ispe_tag(AVIOContext *pb, MOVMuxContext *mov, AVFormatContext *s) + +static int mov_write_iref_tag(AVIOContext *pb, MOVMuxContext *mov, AVFormatContext *s) +{ + int64_t auxl_pos; + int64_t iref_pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "iref"); + avio_wb32(pb, 0); /* Version & flags */ + + auxl_pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "auxl"); + avio_wb16(pb, 2); /* from_item_ID */ + avio_wb16(pb, 1); /* reference_count */ + avio_wb16(pb, 1); /* to_item_ID */ + update_size(pb, auxl_pos); + + return update_size(pb, iref_pos); +} + +static int mov_write_ispe_tag(AVIOContext *pb, MOVMuxContext *mov, AVFormatContext *s, + int stream_index) { int64_t pos = avio_tell(pb); avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, "ispe"); avio_wb32(pb, 0); /* Version & flags */ - avio_wb32(pb, s->streams[0]->codecpar->width); /* image_width */ - avio_wb32(pb, s->streams[0]->codecpar->height); /* image_height */ + avio_wb32(pb, s->streams[stream_index]->codecpar->width); /* image_width */ + avio_wb32(pb, s->streams[stream_index]->codecpar->height); /* image_height */ return update_size(pb, pos); } -static int mov_write_pixi_tag(AVIOContext *pb, MOVMuxContext *mov, AVFormatContext *s) +static int mov_write_pixi_tag(AVIOContext *pb, MOVMuxContext *mov, AVFormatContext *s, + int stream_index) { int64_t pos = avio_tell(pb); - const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(s->streams[0]->codecpar->format); + const AVPixFmtDescriptor *pixdesc = + av_pix_fmt_desc_get(s->streams[stream_index]->codecpar->format); avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, "pixi"); avio_wb32(pb, 0); /* Version & flags */ @@ -3001,15 +3027,30 @@ static int mov_write_pixi_tag(AVIOContext *pb, MOVMuxContext *mov, AVFormatConte return update_size(pb, pos); } +static int mov_write_auxC_tag(AVIOContext *pb) +{ + int64_t pos = avio_tell(pb); + avio_wb32(pb, 0); /* size */ + ffio_wfourcc(pb, "auxC"); + avio_wb32(pb, 0); /* Version & flags */ + avio_write(pb, "urn:mpeg:mpegB:cicp:systems:auxiliary:alpha\0", 44); + return update_size(pb, pos); +} + static int mov_write_ipco_tag(AVIOContext *pb, MOVMuxContext *mov, AVFormatContext *s) { int64_t pos = avio_tell(pb); avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, "ipco"); - mov_write_ispe_tag(pb, mov, s); - mov_write_pixi_tag(pb, mov, s); - mov_write_av1c_tag(pb, &mov->tracks[0]); - mov_write_colr_tag(pb, &mov->tracks[0], 0); + for (int i = 0; i < s->nb_streams; i++) { + mov_write_ispe_tag(pb, mov, s, i); + mov_write_pixi_tag(pb, mov, s, i); + mov_write_av1c_tag(pb, &mov->tracks[i]); + if (!i) + mov_write_colr_tag(pb, &mov->tracks[0], 0); + else + mov_write_auxC_tag(pb); + } return update_size(pb, pos); } @@ -3019,18 +3060,21 @@ static int mov_write_ipma_tag(AVIOContext *pb, MOVMuxContext *mov, AVFormatConte avio_wb32(pb, 0); /* size */ ffio_wfourcc(pb, "ipma"); avio_wb32(pb, 0); /* Version & flags */ - avio_wb32(pb, 1); /* entry_count */ - avio_wb16(pb, 1); /* item_ID */ - avio_w8(pb, 4); /* association_count */ - - // ispe association. - avio_w8(pb, 1); /* essential and property_index */ - // pixi association. - avio_w8(pb, 2); /* essential and property_index */ - // av1C association. - avio_w8(pb, 0x80 | 3); /* essential and property_index */ - // colr association. - avio_w8(pb, 4); /* essential and property_index */ + avio_wb32(pb, s->nb_streams); /* entry_count */ + + for (int i = 0, index = 1; i < s->nb_streams; i++) { + avio_wb16(pb, i + 1); /* item_ID */ + avio_w8(pb, 4); /* association_count */ + + // ispe association. + avio_w8(pb, index++); /* essential and property_index */ + // pixi association. + avio_w8(pb, index++); /* essential and property_index */ + // av1C association. + avio_w8(pb, 0x80 | index++); /* essential and property_index */ + // colr/auxC association. + avio_w8(pb, index++); /* essential and property_index */ + } return update_size(pb, pos); } @@ -4112,6 +4156,8 @@ static int mov_write_meta_tag(AVIOContext *pb, MOVMuxContext *mov, mov_write_pitm_tag(pb, 1); mov_write_iloc_tag(pb, mov, s); mov_write_iinf_tag(pb, mov, s); + if (s->nb_streams > 1) + mov_write_iref_tag(pb, mov, s); mov_write_iprp_tag(pb, mov, s); } else { /* iTunes metadata tag */ @@ -6040,8 +6086,8 @@ int ff_mov_write_packet(AVFormatContext *s, AVPacket *pkt) avio_write(pb, reformatted_data, size); } else { size = ff_av1_filter_obus(pb, pkt->data, pkt->size); - if (trk->mode == MODE_AVIF && !mov->avif_extent_length) { - mov->avif_extent_length = size; + if (trk->mode == MODE_AVIF && !mov->avif_extent_length[pkt->stream_index]) { + mov->avif_extent_length[pkt->stream_index] = size; } } @@ -6874,14 +6920,23 @@ static int mov_init(AVFormatContext *s) /* AVIF output must have exactly one video stream */ if (mov->mode == MODE_AVIF) { - if (s->nb_streams > 1) { - av_log(s, AV_LOG_ERROR, "AVIF output requires exactly one stream\n"); + if (s->nb_streams > 2) { + av_log(s, AV_LOG_ERROR, "AVIF output requires exactly one or two streams\n"); return AVERROR(EINVAL); } - if (s->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO) { - av_log(s, AV_LOG_ERROR, "AVIF output requires one video stream\n"); + if (s->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO && + (s->nb_streams > 1 && s->streams[1]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)) { + av_log(s, AV_LOG_ERROR, "AVIF output supports only video streams\n"); return AVERROR(EINVAL); } + if (s->nb_streams > 1) { + const AVPixFmtDescriptor *pixdesc = + av_pix_fmt_desc_get(s->streams[1]->codecpar->format); + if (pixdesc->nb_components != 1) { + av_log(s, AV_LOG_ERROR, "Second stream for AVIF (alpha) output must have exactly one plane\n"); + return AVERROR(EINVAL); + } + } s->streams[0]->disposition |= AV_DISPOSITION_DEFAULT; } @@ -7543,18 +7598,25 @@ static int avif_write_trailer(AVFormatContext *s) { AVIOContext *pb = s->pb; MOVMuxContext *mov = s->priv_data; - int64_t pos_backup, mdat_pos; + int64_t pos_backup, extent_offsets[2]; uint8_t *buf; - int buf_size, moov_size; + int buf_size, moov_size, i; if (mov->moov_written) return 0; mov->is_animated_avif = s->streams[0]->nb_frames > 1; + if (mov->is_animated_avif && s->nb_streams > 1) { + // For animated avif with alpha channel, we need to write a the tref + // tag with type "auxl". + mov->tracks[1].tref_tag = MKTAG('a', 'u', 'x', 'l'); + mov->tracks[1].tref_id = 1; + } mov_write_identification(pb, s); mov_write_meta_tag(pb, mov, s); moov_size = get_moov_size(s); - mov->tracks[0].data_offset = avio_tell(pb) + moov_size + 8; + for (i = 0; i < s->nb_streams; i++) + mov->tracks[i].data_offset = avio_tell(pb) + moov_size + 8; if (mov->is_animated_avif) { int ret; @@ -7565,19 +7627,24 @@ static int avif_write_trailer(AVFormatContext *s) buf_size = avio_get_dyn_buf(mov->mdat_buf, &buf); avio_wb32(pb, buf_size + 8); ffio_wfourcc(pb, "mdat"); - mdat_pos = avio_tell(pb); - if (mdat_pos != (uint32_t)mdat_pos) { - av_log(s, AV_LOG_ERROR, "mdat offset does not fit in 32 bits\n"); - return AVERROR_INVALIDDATA; - } + // The offset for the YUV planes is the starting position of mdat. + extent_offsets[0] = avio_tell(pb); + // The offset for alpha plane is YUV offset + YUV size. + extent_offsets[1] = extent_offsets[0] + mov->avif_extent_length[0]; avio_write(pb, buf, buf_size); - // write extent offset. + // write extent offsets. pos_backup = avio_tell(pb); - avio_seek(pb, mov->avif_extent_pos, SEEK_SET); - avio_wb32(pb, mdat_pos); /* rewrite offset */ + for (i = 0; i < s->nb_streams; i++) { + if (extent_offsets[i] != (uint32_t)extent_offsets[i]) { + av_log(s, AV_LOG_ERROR, "extent offset does not fit in 32 bits\n"); + return AVERROR_INVALIDDATA; + } + avio_seek(pb, mov->avif_extent_pos[i], SEEK_SET); + avio_wb32(pb, extent_offsets[i]); /* rewrite offset */ + } avio_seek(pb, pos_backup, SEEK_SET); return 0; diff --git a/libavformat/movenc.h b/libavformat/movenc.h index 281576cc66..e4550f7900 100644 --- a/libavformat/movenc.h +++ b/libavformat/movenc.h @@ -246,8 +246,8 @@ typedef struct MOVMuxContext { int empty_hdlr_name; int movie_timescale; - int64_t avif_extent_pos; - int avif_extent_length; + int64_t avif_extent_pos[2]; // index 0 is YUV and 1 is Alpha. + int avif_extent_length[2]; // index 0 is YUV and 1 is Alpha. int is_animated_avif; } MOVMuxContext; -- 2.36.1.124.g0e6072fb45-goog _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
next reply other threads:[~2022-05-18 21:56 UTC|newest] Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top 2022-05-18 21:56 Vignesh Venkatasubramanian [this message] 2022-05-19 2:34 ` Bang He 2022-05-19 2:36 ` Bang He 2022-05-19 16:08 ` Vignesh Venkatasubramanian 2022-05-31 19:12 ` Vignesh Venkatasubramanian 2022-06-01 17:52 ` James Zern 2022-06-01 18:05 ` Vignesh Venkatasubramanian 2022-06-01 18:05 ` Vignesh Venkatasubramanian 2022-06-01 18:52 ` xyesbee 2022-06-01 19:47 ` Vignesh Venkatasubramanian 2022-06-06 16:29 ` Vignesh Venkatasubramanian 2022-06-13 16:30 ` Vignesh Venkatasubramanian 2022-06-13 17:17 ` James Zern 2022-06-21 17:12 ` Vignesh Venkatasubramanian 2022-06-27 16:43 ` Vignesh Venkatasubramanian 2022-06-27 16:48 ` James Almer 2022-06-27 17:17 ` Vignesh Venkatasubramanian 2022-06-27 19:46 ` James Zern
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=20220518215620.1718203-1-vigneshv@google.com \ --to=vigneshv-at-google.com@ffmpeg.org \ --cc=ffmpeg-devel@ffmpeg.org \ --cc=vigneshv@google.com \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel This inbox may be cloned and mirrored by anyone: git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \ ffmpegdev@gitmailbox.com public-inbox-index ffmpegdev Example config snippet for mirrors. AGPL code for this site: git clone https://public-inbox.org/public-inbox.git