From: Andreas Rheinhardt <andreas.rheinhardt@outlook.com> To: ffmpeg-devel@ffmpeg.org Cc: Andreas Rheinhardt <andreas.rheinhardt@outlook.com> Subject: [FFmpeg-devel] [PATCH v2 34/71] avcodec/rv30, rv34, rv40: Avoid indirection Date: Sat, 11 May 2024 22:50:58 +0200 Message-ID: <GV1P250MB073761EE9C7D00D475AFEC1B8FE02@GV1P250MB0737.EURP250.PROD.OUTLOOK.COM> (raw) In-Reply-To: <AS8P250MB074471DDEA29072B2586F0EF8FE02@AS8P250MB0744.EURP250.PROD.OUTLOOK.COM> Use the cached values from MpegEncContext.(cur|last|next)_pic instead of the corresponding *_pic_ptr. Also do the same in wmv2dec.c and mpegvideo_enc.c. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com> --- libavcodec/mpegvideo_enc.c | 2 +- libavcodec/rv30.c | 18 +++--- libavcodec/rv34.c | 122 +++++++++++++++++++------------------ libavcodec/rv40.c | 10 +-- libavcodec/wmv2dec.c | 7 +-- 5 files changed, 80 insertions(+), 79 deletions(-) diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c index 2f6aaad1c7..f84a05d674 100644 --- a/libavcodec/mpegvideo_enc.c +++ b/libavcodec/mpegvideo_enc.c @@ -2145,7 +2145,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, update_qscale(s); if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) { - s->qscale = s->cur_pic_ptr->qscale_table[mb_xy]; + s->qscale = s->cur_pic.qscale_table[mb_xy]; s->dquant = s->qscale - last_qp; if (s->out_format == FMT_H263) { diff --git a/libavcodec/rv30.c b/libavcodec/rv30.c index a4e38edf54..9c8bb966e9 100644 --- a/libavcodec/rv30.c +++ b/libavcodec/rv30.c @@ -160,7 +160,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row) mb_pos = row * s->mb_stride; for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){ - int mbtype = s->cur_pic_ptr->mb_type[mb_pos]; + int mbtype = s->cur_pic.mb_type[mb_pos]; if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype)) r->deblock_coefs[mb_pos] = 0xFFFF; if(IS_INTRA(mbtype)) @@ -172,11 +172,11 @@ static void rv30_loop_filter(RV34DecContext *r, int row) */ mb_pos = row * s->mb_stride; for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){ - cur_lim = rv30_loop_filt_lim[s->cur_pic_ptr->qscale_table[mb_pos]]; + cur_lim = rv30_loop_filt_lim[s->cur_pic.qscale_table[mb_pos]]; if(mb_x) - left_lim = rv30_loop_filt_lim[s->cur_pic_ptr->qscale_table[mb_pos - 1]]; + left_lim = rv30_loop_filt_lim[s->cur_pic.qscale_table[mb_pos - 1]]; for(j = 0; j < 16; j += 4){ - Y = s->cur_pic_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x; + Y = s->cur_pic.data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x; for(i = !mb_x; i < 4; i++, Y += 4){ int ij = i + j; loc_lim = 0; @@ -196,7 +196,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row) if(mb_x) left_cbp = (r->cbp_chroma[mb_pos - 1] >> (k*4)) & 0xF; for(j = 0; j < 8; j += 4){ - C = s->cur_pic_ptr->f->data[k + 1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x; + C = s->cur_pic.data[k + 1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x; for(i = !mb_x; i < 2; i++, C += 4){ int ij = i + (j >> 1); loc_lim = 0; @@ -214,11 +214,11 @@ static void rv30_loop_filter(RV34DecContext *r, int row) } mb_pos = row * s->mb_stride; for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){ - cur_lim = rv30_loop_filt_lim[s->cur_pic_ptr->qscale_table[mb_pos]]; + cur_lim = rv30_loop_filt_lim[s->cur_pic.qscale_table[mb_pos]]; if(row) - top_lim = rv30_loop_filt_lim[s->cur_pic_ptr->qscale_table[mb_pos - s->mb_stride]]; + top_lim = rv30_loop_filt_lim[s->cur_pic.qscale_table[mb_pos - s->mb_stride]]; for(j = 4*!row; j < 16; j += 4){ - Y = s->cur_pic_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize; + Y = s->cur_pic.data[0] + mb_x*16 + (row*16 + j) * s->linesize; for(i = 0; i < 4; i++, Y += 4){ int ij = i + j; loc_lim = 0; @@ -238,7 +238,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row) if(row) top_cbp = (r->cbp_chroma[mb_pos - s->mb_stride] >> (k*4)) & 0xF; for(j = 4*!row; j < 8; j += 4){ - C = s->cur_pic_ptr->f->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize; + C = s->cur_pic.data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize; for(i = 0; i < 2; i++, C += 4){ int ij = i + (j >> 1); loc_lim = 0; diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c index 467a6ab5a1..941d983501 100644 --- a/libavcodec/rv34.c +++ b/libavcodec/rv34.c @@ -367,7 +367,7 @@ static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types) r->is16 = get_bits1(gb); if(r->is16){ - s->cur_pic_ptr->mb_type[mb_pos] = MB_TYPE_INTRA16x16; + s->cur_pic.mb_type[mb_pos] = MB_TYPE_INTRA16x16; r->block_type = RV34_MB_TYPE_INTRA16x16; t = get_bits(gb, 2); fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0])); @@ -377,7 +377,7 @@ static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types) if(!get_bits1(gb)) av_log(s->avctx, AV_LOG_ERROR, "Need DQUANT\n"); } - s->cur_pic_ptr->mb_type[mb_pos] = MB_TYPE_INTRA; + s->cur_pic.mb_type[mb_pos] = MB_TYPE_INTRA; r->block_type = RV34_MB_TYPE_INTRA; if(r->decode_intra_types(r, gb, intra_types) < 0) return -1; @@ -403,7 +403,7 @@ static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types) r->block_type = r->decode_mb_info(r); if(r->block_type == -1) return -1; - s->cur_pic_ptr->mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type]; + s->cur_pic.mb_type[mb_pos] = rv34_mb_type_to_lavc[r->block_type]; r->mb_type[mb_pos] = r->block_type; if(r->block_type == RV34_MB_SKIP){ if(s->pict_type == AV_PICTURE_TYPE_P) @@ -411,7 +411,7 @@ static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types) if(s->pict_type == AV_PICTURE_TYPE_B) r->mb_type[mb_pos] = RV34_MB_B_DIRECT; } - r->is16 = !!IS_INTRA16x16(s->cur_pic_ptr->mb_type[mb_pos]); + r->is16 = !!IS_INTRA16x16(s->cur_pic.mb_type[mb_pos]); if (rv34_decode_mv(r, r->block_type) < 0) return -1; if(r->block_type == RV34_MB_SKIP){ @@ -421,7 +421,7 @@ static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types) r->chroma_vlc = 1; r->luma_vlc = 0; - if(IS_INTRA(s->cur_pic_ptr->mb_type[mb_pos])){ + if (IS_INTRA(s->cur_pic.mb_type[mb_pos])) { if(r->is16){ t = get_bits(gb, 2); fill_rectangle(intra_types, 4, 4, r->intra_types_stride, t, sizeof(intra_types[0])); @@ -480,33 +480,34 @@ static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int int mx, my; int* avail = r->avail_cache + avail_indexes[subblock_no]; int c_off = part_sizes_w[block_type]; + int16_t (*motion_val)[2] = s->cur_pic.motion_val[0]; mv_pos += (subblock_no & 1) + (subblock_no >> 1)*s->b8_stride; if(subblock_no == 3) c_off = -1; if(avail[-1]){ - A[0] = s->cur_pic_ptr->motion_val[0][mv_pos-1][0]; - A[1] = s->cur_pic_ptr->motion_val[0][mv_pos-1][1]; + A[0] = motion_val[mv_pos-1][0]; + A[1] = motion_val[mv_pos-1][1]; } if(avail[-4]){ - B[0] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride][0]; - B[1] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride][1]; + B[0] = motion_val[mv_pos-s->b8_stride][0]; + B[1] = motion_val[mv_pos-s->b8_stride][1]; }else{ B[0] = A[0]; B[1] = A[1]; } if(!avail[c_off-4]){ if(avail[-4] && (avail[-1] || r->rv30)){ - C[0] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride-1][0]; - C[1] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride-1][1]; + C[0] = motion_val[mv_pos-s->b8_stride-1][0]; + C[1] = motion_val[mv_pos-s->b8_stride-1][1]; }else{ C[0] = A[0]; C[1] = A[1]; } }else{ - C[0] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][0]; - C[1] = s->cur_pic_ptr->motion_val[0][mv_pos-s->b8_stride+c_off][1]; + C[0] = motion_val[mv_pos-s->b8_stride+c_off][0]; + C[1] = motion_val[mv_pos-s->b8_stride+c_off][1]; } mx = mid_pred(A[0], B[0], C[0]); my = mid_pred(A[1], B[1], C[1]); @@ -514,8 +515,8 @@ static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int my += r->dmv[dmv_no][1]; for(j = 0; j < part_sizes_h[block_type]; j++){ for(i = 0; i < part_sizes_w[block_type]; i++){ - s->cur_pic_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][0] = mx; - s->cur_pic_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][1] = my; + motion_val[mv_pos + i + j*s->b8_stride][0] = mx; + motion_val[mv_pos + i + j*s->b8_stride][1] = my; } } } @@ -564,7 +565,7 @@ static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir) int has_A = 0, has_B = 0, has_C = 0; int mx, my; int i, j; - Picture *cur_pic = s->cur_pic_ptr; + Picture *cur_pic = &s->cur_pic; const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0; int type = cur_pic->mb_type[mb_pos]; @@ -617,27 +618,27 @@ static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir) int* avail = r->avail_cache + avail_indexes[0]; if(avail[-1]){ - A[0] = s->cur_pic_ptr->motion_val[0][mv_pos - 1][0]; - A[1] = s->cur_pic_ptr->motion_val[0][mv_pos - 1][1]; + A[0] = s->cur_pic.motion_val[0][mv_pos - 1][0]; + A[1] = s->cur_pic.motion_val[0][mv_pos - 1][1]; } if(avail[-4]){ - B[0] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride][0]; - B[1] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride][1]; + B[0] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride][0]; + B[1] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride][1]; }else{ B[0] = A[0]; B[1] = A[1]; } if(!avail[-4 + 2]){ if(avail[-4] && (avail[-1])){ - C[0] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride - 1][0]; - C[1] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride - 1][1]; + C[0] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride - 1][0]; + C[1] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride - 1][1]; }else{ C[0] = A[0]; C[1] = A[1]; } }else{ - C[0] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride + 2][0]; - C[1] = s->cur_pic_ptr->motion_val[0][mv_pos - s->b8_stride + 2][1]; + C[0] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride + 2][0]; + C[1] = s->cur_pic.motion_val[0][mv_pos - s->b8_stride + 2][1]; } mx = mid_pred(A[0], B[0], C[0]); my = mid_pred(A[1], B[1], C[1]); @@ -646,8 +647,8 @@ static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir) for(j = 0; j < 2; j++){ for(i = 0; i < 2; i++){ for(k = 0; k < 2; k++){ - s->cur_pic_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx; - s->cur_pic_ptr->motion_val[k][mv_pos + i + j*s->b8_stride][1] = my; + s->cur_pic.motion_val[k][mv_pos + i + j*s->b8_stride][0] = mx; + s->cur_pic.motion_val[k][mv_pos + i + j*s->b8_stride][1] = my; } } } @@ -683,27 +684,28 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type, int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off; int is16x16 = 1; int emu = 0; + int16_t *motion_val = s->cur_pic.motion_val[dir][mv_pos]; if(thirdpel){ int chroma_mx, chroma_my; - mx = (s->cur_pic_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24); - my = (s->cur_pic_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24); - lx = (s->cur_pic_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3; - ly = (s->cur_pic_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3; - chroma_mx = s->cur_pic_ptr->motion_val[dir][mv_pos][0] / 2; - chroma_my = s->cur_pic_ptr->motion_val[dir][mv_pos][1] / 2; + mx = (motion_val[0] + (3 << 24)) / 3 - (1 << 24); + my = (motion_val[1] + (3 << 24)) / 3 - (1 << 24); + lx = (motion_val[0] + (3 << 24)) % 3; + ly = (motion_val[1] + (3 << 24)) % 3; + chroma_mx = motion_val[0] / 2; + chroma_my = motion_val[1] / 2; umx = (chroma_mx + (3 << 24)) / 3 - (1 << 24); umy = (chroma_my + (3 << 24)) / 3 - (1 << 24); uvmx = chroma_coeffs[(chroma_mx + (3 << 24)) % 3]; uvmy = chroma_coeffs[(chroma_my + (3 << 24)) % 3]; }else{ int cx, cy; - mx = s->cur_pic_ptr->motion_val[dir][mv_pos][0] >> 2; - my = s->cur_pic_ptr->motion_val[dir][mv_pos][1] >> 2; - lx = s->cur_pic_ptr->motion_val[dir][mv_pos][0] & 3; - ly = s->cur_pic_ptr->motion_val[dir][mv_pos][1] & 3; - cx = s->cur_pic_ptr->motion_val[dir][mv_pos][0] / 2; - cy = s->cur_pic_ptr->motion_val[dir][mv_pos][1] / 2; + mx = motion_val[0] >> 2; + my = motion_val[1] >> 2; + lx = motion_val[0] & 3; + ly = motion_val[1] & 3; + cx = motion_val[0] / 2; + cy = motion_val[1] / 2; umx = cx >> 2; umy = cy >> 2; uvmx = (cx & 3) << 1; @@ -721,9 +723,9 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type, } dxy = ly*4 + lx; - srcY = dir ? s->next_pic_ptr->f->data[0] : s->last_pic_ptr->f->data[0]; - srcU = dir ? s->next_pic_ptr->f->data[1] : s->last_pic_ptr->f->data[1]; - srcV = dir ? s->next_pic_ptr->f->data[2] : s->last_pic_ptr->f->data[2]; + srcY = dir ? s->next_pic.data[0] : s->last_pic.data[0]; + srcU = dir ? s->next_pic.data[1] : s->last_pic.data[1]; + srcV = dir ? s->next_pic.data[2] : s->last_pic.data[2]; src_x = s->mb_x * 16 + xoff + mx; src_y = s->mb_y * 16 + yoff + my; uvsrc_x = s->mb_x * 8 + (xoff >> 1) + umx; @@ -884,11 +886,11 @@ static int rv34_decode_mv(RV34DecContext *r, int block_type) switch(block_type){ case RV34_MB_TYPE_INTRA: case RV34_MB_TYPE_INTRA16x16: - ZERO8x2(s->cur_pic_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); + ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); return 0; case RV34_MB_SKIP: if(s->pict_type == AV_PICTURE_TYPE_P){ - ZERO8x2(s->cur_pic_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); + ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); rv34_mc_1mv (r, block_type, 0, 0, 0, 2, 2, 0); break; } @@ -898,21 +900,21 @@ static int rv34_decode_mv(RV34DecContext *r, int block_type) if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) ff_thread_await_progress(&s->next_pic_ptr->tf, FFMAX(0, s->mb_y-1), 0); - next_bt = s->next_pic_ptr->mb_type[s->mb_x + s->mb_y * s->mb_stride]; + next_bt = s->next_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride]; if(IS_INTRA(next_bt) || IS_SKIP(next_bt)){ - ZERO8x2(s->cur_pic_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); - ZERO8x2(s->cur_pic_ptr->motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); + ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); + ZERO8x2(s->cur_pic.motion_val[1][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); }else for(j = 0; j < 2; j++) for(i = 0; i < 2; i++) for(k = 0; k < 2; k++) for(l = 0; l < 2; l++) - s->cur_pic_ptr->motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_pic_ptr->motion_val[0][mv_pos + i + j*s->b8_stride][k]); + s->cur_pic.motion_val[l][mv_pos + i + j*s->b8_stride][k] = calc_add_mv(r, l, s->next_pic.motion_val[0][mv_pos + i + j*s->b8_stride][k]); if(!(IS_16X8(next_bt) || IS_8X16(next_bt) || IS_8X8(next_bt))) //we can use whole macroblock MC rv34_mc_2mv(r, block_type); else rv34_mc_2mv_skip(r); - ZERO8x2(s->cur_pic_ptr->motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); + ZERO8x2(s->cur_pic.motion_val[0][s->mb_x * 2 + s->mb_y * 2 * s->b8_stride], s->b8_stride); break; case RV34_MB_P_16x16: case RV34_MB_P_MIX16x16: @@ -1180,7 +1182,7 @@ static int rv34_set_deblock_coef(RV34DecContext *r) MpegEncContext *s = &r->s; int hmvmask = 0, vmvmask = 0, i, j; int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride; - int16_t (*motion_val)[2] = &s->cur_pic_ptr->motion_val[0][midx]; + int16_t (*motion_val)[2] = &s->cur_pic.motion_val[0][midx]; for(j = 0; j < 16; j += 8){ for(i = 0; i < 2; i++){ if(is_mv_diff_gt_3(motion_val + i, 1)) @@ -1223,26 +1225,26 @@ static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types) dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width; if(s->mb_x && dist) r->avail_cache[5] = - r->avail_cache[9] = s->cur_pic_ptr->mb_type[mb_pos - 1]; + r->avail_cache[9] = s->cur_pic.mb_type[mb_pos - 1]; if(dist >= s->mb_width) r->avail_cache[2] = - r->avail_cache[3] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride]; + r->avail_cache[3] = s->cur_pic.mb_type[mb_pos - s->mb_stride]; if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1) - r->avail_cache[4] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride + 1]; + r->avail_cache[4] = s->cur_pic.mb_type[mb_pos - s->mb_stride + 1]; if(s->mb_x && dist > s->mb_width) - r->avail_cache[1] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride - 1]; + r->avail_cache[1] = s->cur_pic.mb_type[mb_pos - s->mb_stride - 1]; s->qscale = r->si.quant; cbp = cbp2 = rv34_decode_inter_mb_header(r, intra_types); r->cbp_luma [mb_pos] = cbp; r->cbp_chroma[mb_pos] = cbp >> 16; r->deblock_coefs[mb_pos] = rv34_set_deblock_coef(r) | r->cbp_luma[mb_pos]; - s->cur_pic_ptr->qscale_table[mb_pos] = s->qscale; + s->cur_pic.qscale_table[mb_pos] = s->qscale; if(cbp == -1) return -1; - if (IS_INTRA(s->cur_pic_ptr->mb_type[mb_pos])){ + if (IS_INTRA(s->cur_pic.mb_type[mb_pos])) { if(r->is16) rv34_output_i16x16(r, intra_types, cbp); else rv34_output_intra(r, intra_types, cbp); return 0; @@ -1325,21 +1327,21 @@ static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types) dist = (s->mb_x - s->resync_mb_x) + (s->mb_y - s->resync_mb_y) * s->mb_width; if(s->mb_x && dist) r->avail_cache[5] = - r->avail_cache[9] = s->cur_pic_ptr->mb_type[mb_pos - 1]; + r->avail_cache[9] = s->cur_pic.mb_type[mb_pos - 1]; if(dist >= s->mb_width) r->avail_cache[2] = - r->avail_cache[3] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride]; + r->avail_cache[3] = s->cur_pic.mb_type[mb_pos - s->mb_stride]; if(((s->mb_x+1) < s->mb_width) && dist >= s->mb_width - 1) - r->avail_cache[4] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride + 1]; + r->avail_cache[4] = s->cur_pic.mb_type[mb_pos - s->mb_stride + 1]; if(s->mb_x && dist > s->mb_width) - r->avail_cache[1] = s->cur_pic_ptr->mb_type[mb_pos - s->mb_stride - 1]; + r->avail_cache[1] = s->cur_pic.mb_type[mb_pos - s->mb_stride - 1]; s->qscale = r->si.quant; cbp = rv34_decode_intra_mb_header(r, intra_types); r->cbp_luma [mb_pos] = cbp; r->cbp_chroma[mb_pos] = cbp >> 16; r->deblock_coefs[mb_pos] = 0xFFFF; - s->cur_pic_ptr->qscale_table[mb_pos] = s->qscale; + s->cur_pic.qscale_table[mb_pos] = s->qscale; if(cbp == -1) return -1; diff --git a/libavcodec/rv40.c b/libavcodec/rv40.c index a98e64f5bf..536bbc9623 100644 --- a/libavcodec/rv40.c +++ b/libavcodec/rv40.c @@ -371,7 +371,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row) mb_pos = row * s->mb_stride; for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){ - int mbtype = s->cur_pic_ptr->mb_type[mb_pos]; + int mbtype = s->cur_pic.mb_type[mb_pos]; if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype)) r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF; if(IS_INTRA(mbtype)) @@ -386,7 +386,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row) unsigned y_to_deblock; int c_to_deblock[2]; - q = s->cur_pic_ptr->qscale_table[mb_pos]; + q = s->cur_pic.qscale_table[mb_pos]; alpha = rv40_alpha_tab[q]; beta = rv40_beta_tab [q]; betaY = betaC = beta * 3; @@ -401,7 +401,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row) if(avail[i]){ int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride; mvmasks[i] = r->deblock_coefs[pos]; - mbtype [i] = s->cur_pic_ptr->mb_type[pos]; + mbtype [i] = s->cur_pic.mb_type[pos]; cbp [i] = r->cbp_luma[pos]; uvcbp[i][0] = r->cbp_chroma[pos] & 0xF; uvcbp[i][1] = r->cbp_chroma[pos] >> 4; @@ -460,7 +460,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row) } for(j = 0; j < 16; j += 4){ - Y = s->cur_pic_ptr->f->data[0] + mb_x*16 + (row*16 + j) * s->linesize; + Y = s->cur_pic.data[0] + mb_x*16 + (row*16 + j) * s->linesize; for(i = 0; i < 4; i++, Y += 4){ int ij = i + j; int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0; @@ -505,7 +505,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row) } for(k = 0; k < 2; k++){ for(j = 0; j < 2; j++){ - C = s->cur_pic_ptr->f->data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize; + C = s->cur_pic.data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize; for(i = 0; i < 2; i++, C += 4){ int ij = i + j*2; int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0; diff --git a/libavcodec/wmv2dec.c b/libavcodec/wmv2dec.c index 61e1759449..432d6f7223 100644 --- a/libavcodec/wmv2dec.c +++ b/libavcodec/wmv2dec.c @@ -103,7 +103,7 @@ static int parse_mb_skip(WMV2DecContext *w) int mb_x, mb_y; int coded_mb_count = 0; MpegEncContext *const s = &w->s; - uint32_t *const mb_type = s->cur_pic_ptr->mb_type; + uint32_t *const mb_type = s->cur_pic.mb_type; w->skip_type = get_bits(&s->gb, 2); switch (w->skip_type) { @@ -238,9 +238,8 @@ int ff_wmv2_decode_secondary_picture_header(MpegEncContext *s) if (s->pict_type == AV_PICTURE_TYPE_I) { /* Is filling with zeroes really the right thing to do? */ - memset(s->cur_pic_ptr->mb_type, 0, - sizeof(*s->cur_pic_ptr->mb_type) * - s->mb_height * s->mb_stride); + memset(s->cur_pic.mb_type, 0, + sizeof(*s->cur_pic.mb_type) * s->mb_height * s->mb_stride); if (w->j_type_bit) w->j_type = get_bits1(&s->gb); else -- 2.40.1 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org https://ffmpeg.org/mailman/listinfo/ffmpeg-devel To unsubscribe, visit link above, or email ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
next prev parent reply other threads:[~2024-05-11 20:56 UTC|newest] Thread overview: 75+ messages / expand[flat|nested] mbox.gz Atom feed top 2024-05-11 20:23 [FFmpeg-devel] [PATCH v2 01/71] avcodec/ratecontrol: Fix double free on error Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 02/71] avcodec/ratecontrol: Pass RCContext directly in ff_rate_control_uninit() Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 03/71] avcodec/ratecontrol: Don't call ff_rate_control_uninit() ourselves Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 04/71] avcodec/mpegvideo, ratecontrol: Remove write-only skip_count Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 05/71] avcodec/ratecontrol: Avoid padding in RateControlEntry Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 06/71] avcodec/get_buffer: Remove redundant check Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 07/71] avcodec/mpegpicture: Store linesize in ScratchpadContext Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 08/71] avcodec/mpegvideo_dec: Sync linesize and uvlinesize between threads Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 09/71] avcodec/mpegvideo_dec: Factor allocating dummy frames out Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 10/71] avcodec/mpegpicture: Mark dummy frames as such Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 11/71] avcodec/mpeg12dec: Allocate dummy frames for non-I fields Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 12/71] avcodec/mpegvideo_motion: Remove dead checks for existence of reference Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 13/71] avcodec/mpegvideo_motion: Optimize check away Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 14/71] " Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 15/71] avcodec/mpegvideo_motion: Avoid constant function argument Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 16/71] avcodec/msmpeg4enc: Only calculate coded_cbp when used Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 17/71] avcodec/mpegvideo: Only allocate coded_block when needed Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 18/71] avcodec/mpegvideo: Don't reset coded_block unnecessarily Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 19/71] avcodec/mpegvideo: Only allocate cbp_table, pred_dir_table when needed Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 20/71] avcodec/mpegpicture: Always reset motion val buffer Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 21/71] avcodec/mpegpicture: Always reset mbskip_table Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 22/71] avcodec/mpegvideo: Redo aligning mb_height for VC-1 Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 23/71] avcodec/mpegvideo, mpegpicture: Add buffer pool Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 24/71] avcodec/mpegpicture: Reindent after the previous commit Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 25/71] avcodec/mpegpicture: Use RefStruct-pool API Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 26/71] avcodec/h263: Move encoder-only part out of ff_h263_update_motion_val() Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 27/71] avcodec/h263, mpeg(picture|video): Only allocate mbskip_table for MPEG-4 Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 28/71] avcodec/mpegvideo: Reindent after the previous commit Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 29/71] avcodec/h263: Move setting mbskip_table to decoder/encoders Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 30/71] avcodec/mpegvideo: Restrict resetting mbskip_table to MPEG-4 decoder Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 31/71] avcodec/mpegvideo: Shorten variable names Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 32/71] avcodec/mpegpicture: Reduce value of MAX_PLANES define Andreas Rheinhardt 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 33/71] avcodec/mpegpicture: Cache AVFrame.data and linesize values Andreas Rheinhardt 2024-05-11 20:50 ` Andreas Rheinhardt [this message] 2024-05-11 20:50 ` [FFmpeg-devel] [PATCH v2 35/71] avcodec/mpegvideo: Add const where appropriate Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 36/71] avcodec/vc1_pred: Remove unused function parameter Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 37/71] avcodec/mpegpicture: Improve error messages and code Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 38/71] avcodec/mpegpicture: Split ff_alloc_picture() into check and alloc part Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 39/71] avcodec/mpegvideo_enc: Pass AVFrame*, not Picture* to alloc_picture() Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 40/71] avcodec/mpegvideo_enc: Move copying properties " Andreas Rheinhardt 2024-05-12 19:55 ` Michael Niedermayer 2024-06-08 14:03 ` [FFmpeg-devel] [PATCH v3 " Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 41/71] avcodec/mpegpicture: Rename Picture->MPVPicture Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 42/71] avcodec/vc1_mc: Don't check AVFrame INTERLACE flags Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 43/71] avcodec/mpegpicture: Split MPVPicture into WorkPicture and ordinary Pic Andreas Rheinhardt 2024-06-23 22:28 ` Michael Niedermayer 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 44/71] avcodec/error_resilience: Deduplicate cleanup code Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 45/71] avcodec/mpegvideo_enc: Factor setting length of B frame chain out Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 46/71] avcodec/mpegvideo_enc: Return early when getting length of B frame chain Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 47/71] avcodec/mpegvideo_enc: Reindentation Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 48/71] avcodec/mpeg12dec: Don't initialize inter tables for IPU Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 49/71] avcodec/mpeg12dec: Only initialize IDCT " Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 50/71] avcodec/mpeg12dec: Remove write-only assignment Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 51/71] avcodec/mpeg12dec: Set out_format only once Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 52/71] avformat/riff: Declare VCR2 to be MPEG-2 Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 53/71] avcodec/mpegvideo_dec: Add close function for mpegvideo-decoders Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 54/71] avcodec/mpegpicture: Make MPVPicture refcounted Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 55/71] avcodec/mpeg4videoenc: Avoid branch for writing stuffing Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 56/71] avcodec/mpeg4videoenc: Simplify writing startcodes Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 57/71] avcodec/mpegpicture: Use ThreadProgress instead of ThreadFrame API Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 58/71] avcodec/mpegpicture: Avoid loop and branch when setting motion_val Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 59/71] avcodec/mpegpicture: Use union for b_scratchpad and rd_scratchpad Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 60/71] avcodec/mpegpicture: Avoid MotionEstContext in ff_mpeg_framesize_alloc() Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 61/71] avcodec/mpegvideo_enc: Unify initializing PutBitContexts Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 62/71] avcodec/mpeg12enc: Simplify writing startcodes Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 63/71] avcodec/mpegvideo_dec, rv34: Simplify check for "does pic exist?" Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 64/71] avcodec/mpegvideo_dec: Don't sync encoder-only coded_picture_number Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 65/71] avcodec/mpeg12dec: Pass Mpeg1Context* in mpeg_field_start() Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 66/71] avcodec/mpeg12dec: Don't initialize inter_scantable Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 67/71] avcodec/mpegvideo: Remove pblocks Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 68/71] avcodec/mpegvideo: Use enum for msmpeg4_version Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 69/71] avcodec/ituh263enc: Remove redundant check Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 70/71] avcodec/mpegvideo_enc: Binarize reference Andreas Rheinhardt 2024-05-11 20:51 ` [FFmpeg-devel] [PATCH v2 71/71] avcodec/vc1_pred: Fix indentation Andreas Rheinhardt 2024-06-11 20:59 ` [FFmpeg-devel] [PATCH v2 01/71] avcodec/ratecontrol: Fix double free on error Andreas Rheinhardt
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=GV1P250MB073761EE9C7D00D475AFEC1B8FE02@GV1P250MB0737.EURP250.PROD.OUTLOOK.COM \ --to=andreas.rheinhardt@outlook.com \ --cc=ffmpeg-devel@ffmpeg.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: link
Git Inbox Mirror of the ffmpeg-devel mailing list - see https://ffmpeg.org/mailman/listinfo/ffmpeg-devel This inbox may be cloned and mirrored by anyone: git clone --mirror https://master.gitmailbox.com/ffmpegdev/0 ffmpegdev/git/0.git # If you have public-inbox 1.1+ installed, you may # initialize and index your mirror using the following commands: public-inbox-init -V2 ffmpegdev ffmpegdev/ https://master.gitmailbox.com/ffmpegdev \ ffmpegdev@gitmailbox.com public-inbox-index ffmpegdev Example config snippet for mirrors. AGPL code for this site: git clone https://public-inbox.org/public-inbox.git