[FFmpeg-cvslog] lavc/vvc: Remove left shifts of negative values
Frank Plowman
git at videolan.org
Tue Jan 23 16:17:09 EET 2024
ffmpeg | branch: master | Frank Plowman <post at frankplowman.com> | Sat Jan 20 14:51:04 2024 +0000| [8157b5d4051d52d9bcabda9dcd5f012d070cba71] | committer: James Almer
lavc/vvc: Remove left shifts of negative values
VVC specifies << as arithmetic left shift, i.e. x << y is equivalent to
x * pow2(y). C's << on the other hand has UB if x is negative. This
patch removes all UB resulting from this, mostly by replacing x << y
with x * (1 << y), but there are also a couple places where the OOP was
changed instead.
Signed-off-by: Frank Plowman <post at frankplowman.com>
Signed-off-by: James Almer <jamrial at gmail.com>
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=8157b5d4051d52d9bcabda9dcd5f012d070cba71
---
libavcodec/vvc/vvc_ctu.c | 4 ++--
libavcodec/vvc/vvc_filter.c | 4 ++--
libavcodec/vvc/vvc_inter.c | 24 +++++++++----------
libavcodec/vvc/vvc_inter_template.c | 4 ++--
libavcodec/vvc/vvc_mvs.c | 46 ++++++++++++++++++-------------------
5 files changed, 41 insertions(+), 41 deletions(-)
diff --git a/libavcodec/vvc/vvc_ctu.c b/libavcodec/vvc/vvc_ctu.c
index dd3c8884cb..307bc3490e 100644
--- a/libavcodec/vvc/vvc_ctu.c
+++ b/libavcodec/vvc/vvc_ctu.c
@@ -1554,8 +1554,8 @@ static void mvp_add_difference(MotionInfo *mi, const int num_cp_mv,
if (mi->pred_flag & mask) {
for (int j = 0; j < num_cp_mv; j++) {
const Mv *mvd = &mvds[i][j];
- mi->mv[i][j].x += mvd->x << amvr_shift;
- mi->mv[i][j].y += mvd->y << amvr_shift;
+ mi->mv[i][j].x += mvd->x * (1 << amvr_shift);
+ mi->mv[i][j].y += mvd->y * (1 << amvr_shift);
}
}
}
diff --git a/libavcodec/vvc/vvc_filter.c b/libavcodec/vvc/vvc_filter.c
index e5cd89b8a3..cebc02fd9f 100644
--- a/libavcodec/vvc/vvc_filter.c
+++ b/libavcodec/vvc/vvc_filter.c
@@ -1115,12 +1115,12 @@ static void alf_prepare_buffer(VVCFrameContext *fc, uint8_t *_dst, const uint8_t
copy_ctb(_dst, _src, width << ps, height, dst_stride, src_stride);
//top
- src = fc->tab.alf_pixel_buffer_h[c_idx][1] + (((border_pixels * (y_ctb - 1)) * w + x) << ps);
+ src = fc->tab.alf_pixel_buffer_h[c_idx][1] + (((border_pixels * w) << ps) * (y_ctb - 1) + (x << ps));
dst = _dst - border_pixels * dst_stride;
alf_fill_border_h(dst, dst_stride, src, w << ps, _dst, width, border_pixels, ps, edges[TOP]);
//bottom
- src = fc->tab.alf_pixel_buffer_h[c_idx][0] + ((border_pixels * (y_ctb + 1) * w + x) << ps);
+ src = fc->tab.alf_pixel_buffer_h[c_idx][0] + (((border_pixels * w) << ps) * (y_ctb + 1) + (x << ps));
dst = _dst + height * dst_stride;
alf_fill_border_h(dst, dst_stride, src, w << ps, _dst + (height - 1) * dst_stride, width, border_pixels, ps, edges[BOTTOM]);
diff --git a/libavcodec/vvc/vvc_inter.c b/libavcodec/vvc/vvc_inter.c
index 9b2925f60c..e05f3db93e 100644
--- a/libavcodec/vvc/vvc_inter.c
+++ b/libavcodec/vvc/vvc_inter.c
@@ -190,7 +190,7 @@ static void luma_mc(VVCLocalContext *lc, int16_t *dst, const AVFrame *ref, const
x_off += mv->x >> 4;
y_off += mv->y >> 4;
- src += y_off * src_stride + (x_off << fc->ps.sps->pixel_shift);
+ src += y_off * src_stride + (x_off * (1 << fc->ps.sps->pixel_shift));
EMULATED_EDGE_LUMA(lc->edge_emu_buffer, &src, &src_stride, x_off, y_off);
@@ -213,7 +213,7 @@ static void chroma_mc(VVCLocalContext *lc, int16_t *dst, const AVFrame *ref, con
x_off += mv->x >> (4 + hs);
y_off += mv->y >> (4 + vs);
- src += y_off * src_stride + (x_off << fc->ps.sps->pixel_shift);
+ src += y_off * src_stride + (x_off * (1 << fc->ps.sps->pixel_shift));
EMULATED_EDGE_CHROMA(lc->edge_emu_buffer, &src, &src_stride, x_off, y_off);
fc->vvcdsp.inter.put[CHROMA][idx][!!my][!!mx](dst, src, src_stride, block_h, hf, vf, block_w);
@@ -237,7 +237,7 @@ static void luma_mc_uni(VVCLocalContext *lc, uint8_t *dst, const ptrdiff_t dst_s
x_off += mv->x >> 4;
y_off += mv->y >> 4;
- src += y_off * src_stride + (x_off << fc->ps.sps->pixel_shift);
+ src += y_off * src_stride + (x_off * (1 << fc->ps.sps->pixel_shift));
EMULATED_EDGE_LUMA(lc->edge_emu_buffer, &src, &src_stride, x_off, y_off);
@@ -270,7 +270,7 @@ static void luma_mc_bi(VVCLocalContext *lc, uint8_t *dst, const ptrdiff_t dst_st
const int ox = x_off + (mv->x >> 4);
const int oy = y_off + (mv->y >> 4);
ptrdiff_t src_stride = ref[i]->linesize[0];
- const uint8_t *src = ref[i]->data[0] + oy * src_stride + (ox << fc->ps.sps->pixel_shift);
+ const uint8_t *src = ref[i]->data[0] + oy * src_stride + (ox * (1 << fc->ps.sps->pixel_shift));
const int8_t *hf = ff_vvc_inter_luma_filters[hf_idx][mx];
const int8_t *vf = ff_vvc_inter_luma_filters[vf_idx][my];
@@ -314,7 +314,7 @@ static void chroma_mc_uni(VVCLocalContext *lc, uint8_t *dst, const ptrdiff_t dst
x_off += mv->x >> (4 + hs);
y_off += mv->y >> (4 + vs);
- src += y_off * src_stride + (x_off << fc->ps.sps->pixel_shift);
+ src += y_off * src_stride + (x_off * (1 << fc->ps.sps->pixel_shift));
EMULATED_EDGE_CHROMA(lc->edge_emu_buffer, &src, &src_stride, x_off, y_off);
@@ -348,7 +348,7 @@ static void chroma_mc_bi(VVCLocalContext *lc, uint8_t *dst, const ptrdiff_t dst_
const int ox = x_off + (mv->x >> (4 + hs));
const int oy = y_off + (mv->y >> (4 + vs));
ptrdiff_t src_stride = ref[i]->linesize[c_idx];
- const uint8_t *src = ref[i]->data[c_idx] + oy * src_stride + (ox << fc->ps.sps->pixel_shift);
+ const uint8_t *src = ref[i]->data[c_idx] + oy * src_stride + (ox * (1 << fc->ps.sps->pixel_shift));
const int8_t *hf = ff_vvc_inter_chroma_filters[hf_idx][mx];
const int8_t *vf = ff_vvc_inter_chroma_filters[vf_idx][my];
if (dmvr_flag) {
@@ -386,7 +386,7 @@ static void luma_prof_uni(VVCLocalContext *lc, uint8_t *dst, const ptrdiff_t dst
x_off += mv->x >> 4;
y_off += mv->y >> 4;
- src += y_off * src_stride + (x_off << fc->ps.sps->pixel_shift);
+ src += y_off * src_stride + (x_off * (1 << fc->ps.sps->pixel_shift));
EMULATED_EDGE_LUMA(lc->edge_emu_buffer, &src, &src_stride, x_off, y_off);
if (cb_prof_flag) {
@@ -424,7 +424,7 @@ static void luma_prof_bi(VVCLocalContext *lc, uint8_t *dst, const ptrdiff_t dst_
const int ox = x_off + (mv->x >> 4);
const int oy = y_off + (mv->y >> 4);
ptrdiff_t src_stride = ref[i]->linesize[0];
- const uint8_t *src = ref[i]->data[0] + oy * src_stride + (ox << fc->ps.sps->pixel_shift);
+ const uint8_t *src = ref[i]->data[0] + oy * src_stride + (ox * (1 << fc->ps.sps->pixel_shift));
const int8_t *hf = ff_vvc_inter_luma_filters[2][mx];
const int8_t *vf = ff_vvc_inter_luma_filters[2][my];
@@ -654,7 +654,7 @@ static int parametric_mv_refine(const int *sad, const int stride)
else if (sad_plus == sad_center)
dmvc = 8;
else {
- int num = ( sad_minus - sad_plus ) << 4;
+ int num = ( sad_minus - sad_plus ) * (1 << 4);
int sign_num = 0;
int quotient = 0;
int counter = 3;
@@ -704,7 +704,7 @@ static void dmvr_mv_refine(VVCLocalContext *lc, MvField *mvf, MvField *orig_mv,
const int ox = x_off + (mv->x >> 4) - sr_range;
const int oy = y_off + (mv->y >> 4) - sr_range;
ptrdiff_t src_stride = ref[i]->linesize[LUMA];
- const uint8_t *src = ref[i]->data[LUMA] + oy * src_stride + (ox << fc->ps.sps->pixel_shift);
+ const uint8_t *src = ref[i]->data[LUMA] + oy * src_stride + (ox * (1 << fc->ps.sps->pixel_shift));
EMULATED_EDGE_BILINEAR(lc->edge_emu_buffer, &src, &src_stride, ox, oy);
fc->vvcdsp.inter.dmvr[!!my][!!mx](tmp[i], src, src_stride, pred_h, mx, my, pred_w);
}
@@ -728,8 +728,8 @@ static void dmvr_mv_refine(VVCLocalContext *lc, MvField *mvf, MvField *orig_mv,
}
}
}
- dmv[0] = (min_dx - sr_range) << 4;
- dmv[1] = (min_dy - sr_range) << 4;
+ dmv[0] = (min_dx - sr_range) * (1 << 4);
+ dmv[1] = (min_dy - sr_range) * (1 << 4);
if (min_dx != 0 && min_dx != 4 && min_dy != 0 && min_dy != 4) {
dmv[0] += parametric_mv_refine(&sad[min_dy][min_dx], 1);
dmv[1] += parametric_mv_refine(&sad[min_dy][min_dx], SAD_ARRAY_SIZE);
diff --git a/libavcodec/vvc/vvc_inter_template.c b/libavcodec/vvc/vvc_inter_template.c
index b67b66a2dc..7160907778 100644
--- a/libavcodec/vvc/vvc_inter_template.c
+++ b/libavcodec/vvc/vvc_inter_template.c
@@ -817,8 +817,8 @@ static void FUNC(derive_bdof_vx_vy)(const int16_t *_src0, const int16_t *_src1,
src0 += MAX_PB_SIZE;
src1 += MAX_PB_SIZE;
}
- *vx = sgx2 > 0 ? av_clip((sgxdi << 2) >> av_log2(sgx2) , -thres + 1, thres - 1) : 0;
- *vy = sgy2 > 0 ? av_clip(((sgydi << 2) - ((*vx * sgxgy) >> 1)) >> av_log2(sgy2), -thres + 1, thres - 1) : 0;
+ *vx = sgx2 > 0 ? av_clip((sgxdi * (1 << 2)) >> av_log2(sgx2) , -thres + 1, thres - 1) : 0;
+ *vy = sgy2 > 0 ? av_clip(((sgydi * (1 << 2)) - ((*vx * sgxgy) >> 1)) >> av_log2(sgy2), -thres + 1, thres - 1) : 0;
}
static void FUNC(apply_bdof_min_block)(pixel* dst, const ptrdiff_t dst_stride, const int16_t *src0, const int16_t *src1,
diff --git a/libavcodec/vvc/vvc_mvs.c b/libavcodec/vvc/vvc_mvs.c
index 81e73fe746..a3861b5b1a 100644
--- a/libavcodec/vvc/vvc_mvs.c
+++ b/libavcodec/vvc/vvc_mvs.c
@@ -60,7 +60,7 @@ static av_always_inline void mv_compression(Mv *motion)
for (int i = 0; i < 2; i++) {
const int s = mv[i] >> 17;
const int f = av_log2((mv[i] ^ s) | 31) - 4;
- const int mask = (-1 << f) >> 1;
+ const int mask = (-1 * (1 << f)) >> 1;
const int round = (1 << f) >> 2;
mv[i] = (mv[i] + round) & mask;
}
@@ -342,17 +342,17 @@ static void init_subblock_params(SubblockParams *sp, const MotionInfo* mi,
const int log2_cbh = av_log2(cb_height);
const Mv* cp_mv = mi->mv[lx];
const int num_cp_mv = mi->motion_model_idc + 1;
- sp->d_hor_x = (cp_mv[1].x - cp_mv[0].x) << (MAX_CU_DEPTH - log2_cbw);
- sp->d_ver_x = (cp_mv[1].y - cp_mv[0].y) << (MAX_CU_DEPTH - log2_cbw);
+ sp->d_hor_x = (cp_mv[1].x - cp_mv[0].x) * (1 << (MAX_CU_DEPTH - log2_cbw));
+ sp->d_ver_x = (cp_mv[1].y - cp_mv[0].y) * (1 << (MAX_CU_DEPTH - log2_cbw));
if (num_cp_mv == 3) {
- sp->d_hor_y = (cp_mv[2].x - cp_mv[0].x) << (MAX_CU_DEPTH - log2_cbh);
- sp->d_ver_y = (cp_mv[2].y - cp_mv[0].y) << (MAX_CU_DEPTH - log2_cbh);
+ sp->d_hor_y = (cp_mv[2].x - cp_mv[0].x) * (1 << (MAX_CU_DEPTH - log2_cbh));
+ sp->d_ver_y = (cp_mv[2].y - cp_mv[0].y) * (1 << (MAX_CU_DEPTH - log2_cbh));
} else {
sp->d_hor_y = -sp->d_ver_x;
sp->d_ver_y = sp->d_hor_x;
}
- sp->mv_scale_hor = (cp_mv[0].x) << MAX_CU_DEPTH;
- sp->mv_scale_ver = (cp_mv[0].y) << MAX_CU_DEPTH;
+ sp->mv_scale_hor = (cp_mv[0].x) * (1 << MAX_CU_DEPTH);
+ sp->mv_scale_ver = (cp_mv[0].y) * (1 << MAX_CU_DEPTH);
sp->cb_width = cb_width;
sp->cb_height = cb_height;
sp->is_fallback = is_fallback_mode(sp, mi->pred_flag);
@@ -368,8 +368,8 @@ static void derive_subblock_diff_mvs(const VVCLocalContext *lc, PredictionUnit*
for (int x = 0; x < AFFINE_MIN_BLOCK_SIZE; x++) {
for (int y = 0; y < AFFINE_MIN_BLOCK_SIZE; y++) {
Mv diff;
- diff.x = x * (sp->d_hor_x << 2) + y * (sp->d_hor_y << 2) - pos_offset_x;
- diff.y = x * (sp->d_ver_x << 2) + y * (sp->d_ver_y << 2) - pos_offset_y;
+ diff.x = x * (sp->d_hor_x * (1 << 2)) + y * (sp->d_hor_y * (1 << 2)) - pos_offset_x;
+ diff.y = x * (sp->d_ver_x * (1 << 2)) + y * (sp->d_ver_y * (1 << 2)) - pos_offset_y;
ff_vvc_round_mv(&diff, 0, 8);
pu->diff_mv_x[lx][AFFINE_MIN_BLOCK_SIZE * y + x] = av_clip(diff.x, -dmv_limit + 1, dmv_limit - 1);
pu->diff_mv_y[lx][AFFINE_MIN_BLOCK_SIZE * y + x] = av_clip(diff.y, -dmv_limit + 1, dmv_limit - 1);
@@ -467,8 +467,8 @@ void ff_vvc_store_gpm_mvf(const VVCLocalContext *lc, const PredictionUnit *pu)
for (int y = 0; y < cu->cb_height; y += block_size) {
for (int x = 0; x < cu->cb_width; x += block_size) {
- const int motion_idx = (((x + offset_x) << 1) + 5) * displacement_x +
- (((y + offset_y) << 1) + 5) * displacement_y;
+ const int motion_idx = (((x + offset_x) * (1 << 1)) + 5) * displacement_x +
+ (((y + offset_y) * (1 << 1)) + 5) * displacement_y;
const int s_type = FFABS(motion_idx) < 32 ? 2 : (motion_idx <= 0 ? (1 - is_flip) : is_flip);
const int pred_flag = pu->gpm_mv[0].pred_flag | pu->gpm_mv[1].pred_flag;
const int x0 = cu->x0 + x;
@@ -867,14 +867,14 @@ static void affine_cps_from_nb(const VVCLocalContext *lc,
l = &TAB_CP_MV(lx, x_nb, y_nb);
r = &TAB_CP_MV(lx, x_nb + nbw - 1, y_nb) + 1;
}
- mv_scale_hor = l->x << 7;
- mv_scale_ver = l->y << 7;
- d_hor_x = (r->x - l->x) << (7 - log2_nbw);
- d_ver_x = (r->y - l->y) << (7 - log2_nbw);
+ mv_scale_hor = l->x * (1 << 7);
+ mv_scale_ver = l->y * (1 << 7);
+ d_hor_x = (r->x - l->x) * (1 << (7 - log2_nbw));
+ d_ver_x = (r->y - l->y) * (1 << (7 - log2_nbw));
if (!is_ctb_boundary && motion_model_idc_nb == MOTION_6_PARAMS_AFFINE) {
const Mv* lb = &TAB_CP_MV(lx, x_nb, y_nb + nbh - 1) + 2;
- d_hor_y = (lb->x - l->x) << (7 - log2_nbh);
- d_ver_y = (lb->y - l->y) << (7 - log2_nbh);
+ d_hor_y = (lb->x - l->x) * (1 << (7 - log2_nbh));
+ d_ver_y = (lb->y - l->y) * (1 << (7 - log2_nbh));
} else {
d_hor_y = -d_ver_x;
d_ver_y = d_hor_x;
@@ -1242,8 +1242,8 @@ static int affine_merge_const6(const MvField* c0, const MvField* c2, const int c
mi->pred_flag |= mask;
mi->ref_idx[i] = c0->ref_idx[i];
mi->mv[i][0] = c0->mv[i];
- mi->mv[i][1].x = (c0->mv[i].x << 7) + ((c2->mv[i].y - c0->mv[i].y) << shift);
- mi->mv[i][1].y = (c0->mv[i].y << 7) - ((c2->mv[i].x - c0->mv[i].x) << shift);
+ mi->mv[i][1].x = (c0->mv[i].x * (1 << 7)) + ((c2->mv[i].y - c0->mv[i].y) * (1 << shift));
+ mi->mv[i][1].y = (c0->mv[i].y * (1 << 7)) - ((c2->mv[i].x - c0->mv[i].x) * (1 << shift));
ff_vvc_round_mv(&mi->mv[i][1], 0, 7);
ff_vvc_clip_mv(&mi->mv[i][1]);
}
@@ -1736,11 +1736,11 @@ void ff_vvc_round_mv(Mv *mv, const int lshift, const int rshift)
{
if (rshift) {
const int offset = 1 << (rshift - 1);
- mv->x = ((mv->x + offset - (mv->x >= 0)) >> rshift) << lshift;
- mv->y = ((mv->y + offset - (mv->y >= 0)) >> rshift) << lshift;
+ mv->x = ((mv->x + offset - (mv->x >= 0)) >> rshift) * (1 << lshift);
+ mv->y = ((mv->y + offset - (mv->y >= 0)) >> rshift) * (1 << lshift);
} else {
- mv->x = mv->x << lshift;
- mv->y = mv->y << lshift;
+ mv->x = mv->x * (1 << lshift);
+ mv->y = mv->y * (1 << lshift);
}
}
More information about the ffmpeg-cvslog
mailing list