[FFmpeg-devel] [PATCH 1/4] lavfi: add frame counter into AVFilterLink and use it in filters.
Stefano Sabatini
stefasab at gmail.com
Mon Apr 15 23:10:47 CEST 2013
On date Monday 2013-04-15 18:56:35 +0200, Clément Bœsch encoded:
> ---
> libavfilter/avfilter.c | 1 +
> libavfilter/avfilter.h | 5 +++++
> libavfilter/f_select.c | 2 +-
> libavfilter/vf_blackdetect.c | 6 ++----
> libavfilter/vf_blend.c | 3 +--
> libavfilter/vf_crop.c | 3 +--
> libavfilter/vf_decimate.c | 3 +--
> libavfilter/vf_drawtext.c | 12 +++++-------
> libavfilter/vf_fieldmatch.c | 8 +++-----
> libavfilter/vf_framestep.c | 4 ++--
> libavfilter/vf_geq.c | 2 +-
> libavfilter/vf_hue.c | 3 +--
> libavfilter/vf_overlay.c | 2 +-
> libavfilter/vf_separatefields.c | 6 +++---
> libavfilter/vf_telecine.c | 3 +--
> 15 files changed, 29 insertions(+), 34 deletions(-)
>
> diff --git a/libavfilter/avfilter.c b/libavfilter/avfilter.c
> index f392613..43340d1 100644
> --- a/libavfilter/avfilter.c
> +++ b/libavfilter/avfilter.c
> @@ -915,6 +915,7 @@ static int ff_filter_frame_framed(AVFilterLink *link, AVFrame *frame)
>
> pts = out->pts;
> ret = filter_frame(link, out);
> + link->frame_count++;
> link->frame_requested = 0;
> ff_update_link_current_pts(link, pts);
> return ret;
> diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h
> index 0b970d0..38bc5ee 100644
> --- a/libavfilter/avfilter.h
> +++ b/libavfilter/avfilter.h
> @@ -718,6 +718,11 @@ struct AVFilterLink {
> * Link processing flags.
> */
> unsigned flags;
> +
> + /**
> + * Number of frames a link has seen
nit: number of frames
Also I don't really like ambiguous expressions like "has seen", what
about:
number of frames have seen sent through the link
> + */
> + int64_t frame_count;
> };
>
> /**
> diff --git a/libavfilter/f_select.c b/libavfilter/f_select.c
> index 9a5666f..37e9a50 100644
> --- a/libavfilter/f_select.c
> +++ b/libavfilter/f_select.c
> @@ -258,6 +258,7 @@ static int select_frame(AVFilterContext *ctx, AVFrame *frame)
> if (isnan(select->var_values[VAR_START_T]))
> select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
>
> + select->var_values[VAR_N ] = inlink->frame_count;
> select->var_values[VAR_PTS] = TS2D(frame->pts);
> select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
> select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
> @@ -319,7 +320,6 @@ static int select_frame(AVFilterContext *ctx, AVFrame *frame)
> select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
> }
>
> - select->var_values[VAR_N] += 1.0;
> select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
> select->var_values[VAR_PREV_T] = select->var_values[VAR_T];
>
> diff --git a/libavfilter/vf_blackdetect.c b/libavfilter/vf_blackdetect.c
> index 9c9e6b4..ddbf082 100644
> --- a/libavfilter/vf_blackdetect.c
> +++ b/libavfilter/vf_blackdetect.c
> @@ -43,7 +43,6 @@ typedef struct {
> double pixel_black_th;
> unsigned int pixel_black_th_i;
>
> - unsigned int frame_count; ///< frame number
> unsigned int nb_black_pixels; ///< number of black pixels counted so far
> } BlackDetectContext;
>
> @@ -149,8 +148,8 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
> picture_black_ratio = (double)blackdetect->nb_black_pixels / (inlink->w * inlink->h);
>
> av_log(ctx, AV_LOG_DEBUG,
> - "frame:%u picture_black_ratio:%f pts:%s t:%s type:%c\n",
> - blackdetect->frame_count, picture_black_ratio,
> + "frame:%"PRId64" picture_black_ratio:%f pts:%s t:%s type:%c\n",
> + inlink->frame_count, picture_black_ratio,
> av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base),
> av_get_picture_type_char(picref->pict_type));
>
> @@ -168,7 +167,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
> }
>
> blackdetect->last_picref_pts = picref->pts;
> - blackdetect->frame_count++;
> blackdetect->nb_black_pixels = 0;
> return ff_filter_frame(inlink->dst->outputs[0], picref);
> }
> diff --git a/libavfilter/vf_blend.c b/libavfilter/vf_blend.c
> index b2af5bf..93b68be 100644
> --- a/libavfilter/vf_blend.c
> +++ b/libavfilter/vf_blend.c
> @@ -81,7 +81,6 @@ typedef struct {
> struct FFBufQueue queue_bottom;
> int hsub, vsub; ///< chroma subsampling values
> int frame_requested;
> - int framenum;
> char *all_expr;
> enum BlendMode all_mode;
> double all_opacity;
> @@ -382,7 +381,7 @@ static void blend_frame(AVFilterContext *ctx,
> uint8_t *bottom = bottom_buf->data[plane];
>
> param = &b->params[plane];
> - param->values[VAR_N] = b->framenum++;
> + param->values[VAR_N] = inlink->frame_count;
> param->values[VAR_T] = dst_buf->pts == AV_NOPTS_VALUE ? NAN : dst_buf->pts * av_q2d(inlink->time_base);
> param->values[VAR_W] = outw;
> param->values[VAR_H] = outh;
> diff --git a/libavfilter/vf_crop.c b/libavfilter/vf_crop.c
> index 11ed375..5d7dd2c 100644
> --- a/libavfilter/vf_crop.c
> +++ b/libavfilter/vf_crop.c
> @@ -259,6 +259,7 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
> frame->width = crop->w;
> frame->height = crop->h;
>
> + crop->var_values[VAR_N] = link->frame_count;
> crop->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
> NAN : frame->pts * av_q2d(link->time_base);
> crop->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ?
> @@ -299,8 +300,6 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
> frame->data[3] += crop->x * crop->max_step[3];
> }
>
> - crop->var_values[VAR_N] += 1.0;
> -
> return ff_filter_frame(link->dst->outputs[0], frame);
> }
>
> diff --git a/libavfilter/vf_decimate.c b/libavfilter/vf_decimate.c
> index 55dd5a8..9548531 100644
> --- a/libavfilter/vf_decimate.c
> +++ b/libavfilter/vf_decimate.c
> @@ -40,7 +40,6 @@ typedef struct {
> int fid; ///< current frame id in the queue
> int filled; ///< 1 if the queue is filled, 0 otherwise
> AVFrame *last; ///< last frame from the previous queue
> - int64_t frame_count; ///< output frame counter
> AVFrame **clean_src; ///< frame queue for the clean source
> int got_frame[2]; ///< frame request flag for each input stream
> double ts_unit; ///< timestamp units for the output frames
> @@ -215,7 +214,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
> av_frame_free(&frame);
> frame = dm->clean_src[i];
> }
> - frame->pts = dm->frame_count++ * dm->ts_unit;
> + frame->pts = outlink->frame_count * dm->ts_unit;
> ret = ff_filter_frame(outlink, frame);
> if (ret < 0)
> break;
> diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c
> index 9e04341..05d4202 100644
> --- a/libavfilter/vf_drawtext.c
> +++ b/libavfilter/vf_drawtext.c
> @@ -164,7 +164,6 @@ typedef struct {
> AVRational tc_rate; ///< frame rate for timecode
> AVTimecode tc; ///< timecode context
> int tc24hmax; ///< 1 if timecode is wrapped to 24 hours, 0 otherwise
> - int frame_id;
> int reload; ///< reload text file for each frame
> } DrawTextContext;
>
> @@ -816,8 +815,8 @@ static int draw_glyphs(DrawTextContext *dtext, AVFrame *frame,
> return 0;
> }
>
> -static int draw_text(AVFilterContext *ctx, AVFrame *frame,
> - int width, int height)
> +static int draw_text(AVFilterContext *ctx, AVFilterLink *inlink,
> + AVFrame *frame, int width, int height)
Nit:
inlink = ctx->inputs[0];
no need to change the signature
> {
> DrawTextContext *dtext = ctx->priv;
> uint32_t code = 0, prev_code = 0;
> @@ -857,7 +856,7 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame,
>
> if (dtext->tc_opt_string) {
> char tcbuf[AV_TIMECODE_STR_SIZE];
> - av_timecode_make_string(&dtext->tc, tcbuf, dtext->frame_id++);
> + av_timecode_make_string(&dtext->tc, tcbuf, inlink->frame_count);
> av_bprint_clear(bp);
> av_bprintf(bp, "%s%s", dtext->text, tcbuf);
> }
> @@ -983,18 +982,17 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
> if ((ret = load_textfile(ctx)) < 0)
> return ret;
>
> + dtext->var_values[VAR_N] = inlink->frame_count;
> dtext->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
> NAN : frame->pts * av_q2d(inlink->time_base);
>
> - draw_text(ctx, frame, frame->width, frame->height);
> + draw_text(ctx, inlink, frame, frame->width, frame->height);
>
> av_log(ctx, AV_LOG_DEBUG, "n:%d t:%f text_w:%d text_h:%d x:%d y:%d\n",
> (int)dtext->var_values[VAR_N], dtext->var_values[VAR_T],
> (int)dtext->var_values[VAR_TEXT_W], (int)dtext->var_values[VAR_TEXT_H],
> dtext->x, dtext->y);
>
> - dtext->var_values[VAR_N] += 1.0;
> -
> return ff_filter_frame(outlink, frame);
> }
>
> diff --git a/libavfilter/vf_fieldmatch.c b/libavfilter/vf_fieldmatch.c
> index ff803f4..3495895 100644
> --- a/libavfilter/vf_fieldmatch.c
> +++ b/libavfilter/vf_fieldmatch.c
> @@ -77,7 +77,6 @@ typedef struct {
>
> AVFrame *prv, *src, *nxt; ///< main sliding window of 3 frames
> AVFrame *prv2, *src2, *nxt2; ///< sliding window of the optional second stream
> - int64_t frame_count; ///< output frame counter
> int got_frame[2]; ///< frame request flag for each input stream
> int hsub, vsub; ///< chroma subsampling values
> uint32_t eof; ///< bitmask for end of stream
> @@ -738,7 +737,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
>
> /* scene change check */
> if (fm->combmatch == COMBMATCH_SC) {
> - if (fm->lastn == fm->frame_count - 1) {
> + if (fm->lastn == outlink->frame_count - 1) {
> if (fm->lastscdiff > fm->scthresh)
> sc = 1;
> } else if (luma_abs_diff(fm->prv, fm->src) > fm->scthresh) {
> @@ -746,7 +745,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
> }
>
> if (!sc) {
> - fm->lastn = fm->frame_count;
> + fm->lastn = outlink->frame_count;
> fm->lastscdiff = luma_abs_diff(fm->src, fm->nxt);
> sc = fm->lastscdiff > fm->scthresh;
> }
> @@ -805,10 +804,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
> dst->interlaced_frame = combs[match] >= fm->combpel;
> if (dst->interlaced_frame) {
> av_log(ctx, AV_LOG_WARNING, "Frame #%"PRId64" at %s is still interlaced\n",
> - fm->frame_count, av_ts2timestr(in->pts, &inlink->time_base));
> + outlink->frame_count, av_ts2timestr(in->pts, &inlink->time_base));
> dst->top_field_first = field;
> }
> - fm->frame_count++;
>
> av_log(ctx, AV_LOG_DEBUG, "SC:%d | COMBS: %3d %3d %3d %3d %3d (combpel=%d)"
> " match=%d combed=%s\n", sc, combs[0], combs[1], combs[2], combs[3], combs[4],
> diff --git a/libavfilter/vf_framestep.c b/libavfilter/vf_framestep.c
> index bd079cc..fb20411 100644
> --- a/libavfilter/vf_framestep.c
> +++ b/libavfilter/vf_framestep.c
> @@ -30,7 +30,7 @@
>
> typedef struct {
> const AVClass *class;
> - int frame_step, frame_count;
> + int frame_step;
> } FrameStepContext;
>
> #define OFFSET(x) offsetof(FrameStepContext, x)
> @@ -64,7 +64,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
> {
> FrameStepContext *framestep = inlink->dst->priv;
>
> - if (!(framestep->frame_count++ % framestep->frame_step)) {
> + if (!(inlink->frame_count % framestep->frame_step)) {
Seems wrong. If you use inlink->frame_count, since it will be already
set to 1, while it was previously 0 for the first frame (and
post-incremented).
> return ff_filter_frame(inlink->dst->outputs[0], ref);
> } else {
> av_frame_free(&ref);
> diff --git a/libavfilter/vf_geq.c b/libavfilter/vf_geq.c
> index 4c5ed7a..5e2960b 100644
> --- a/libavfilter/vf_geq.c
> +++ b/libavfilter/vf_geq.c
> @@ -163,7 +163,7 @@ static int geq_filter_frame(AVFilterLink *inlink, AVFrame *in)
> AVFilterLink *outlink = inlink->dst->outputs[0];
> AVFrame *out;
> double values[VAR_VARS_NB] = {
> - [VAR_N] = geq->framenum++,
> + [VAR_N] = inlink->frame_count,
Same here.
> [VAR_T] = in->pts == AV_NOPTS_VALUE ? NAN : in->pts * av_q2d(inlink->time_base),
> };
>
> diff --git a/libavfilter/vf_hue.c b/libavfilter/vf_hue.c
> index 9b2ecd4..a1280be 100644
> --- a/libavfilter/vf_hue.c
> +++ b/libavfilter/vf_hue.c
> @@ -252,6 +252,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
> av_frame_copy_props(outpic, inpic);
> }
>
> + hue->var_values[VAR_N] = inlink->frame_count;
and here
> hue->var_values[VAR_T] = TS2T(inpic->pts, inlink->time_base);
> hue->var_values[VAR_PTS] = TS2D(inpic->pts);
>
> @@ -281,8 +282,6 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
>
> compute_sin_and_cos(hue);
>
> - hue->var_values[VAR_N] += 1;
> -
> if (!direct) {
> av_image_copy_plane(outpic->data[0], outpic->linesize[0],
> inpic->data[0], inpic->linesize[0],
> diff --git a/libavfilter/vf_overlay.c b/libavfilter/vf_overlay.c
> index dab6707..c7a204b 100644
> --- a/libavfilter/vf_overlay.c
> +++ b/libavfilter/vf_overlay.c
> @@ -600,6 +600,7 @@ static int try_filter_frame(AVFilterContext *ctx, AVFrame *mainpic)
> if (over->eval_mode == EVAL_MODE_FRAME) {
> int64_t pos = av_frame_get_pkt_pos(mainpic);
>
> + over->var_values[VAR_N] = inlink->frame_count;
and here as well
[...]
--
FFmpeg = Formidable and Foolish Mere Programmable Enlightened Generator
More information about the ffmpeg-devel
mailing list