[FFmpeg-devel] [PATCH 1/7] lavfi/buffersink: set AVFrame.time_base
Anton Khirnov
anton at khirnov.net
Mon Sep 23 18:01:39 EEST 2024
So the caller does not need to call av_buffersink_get_time_base()
separately.
---
doc/APIchanges | 3 +++
doc/examples/transcode.c | 1 -
fftools/ffmpeg_filter.c | 2 --
fftools/ffplay.c | 10 ++--------
libavdevice/lavfi.c | 4 ++--
libavfilter/buffersink.c | 21 ++++++++++++++++-----
libavfilter/version.h | 2 +-
tools/uncoded_frame.c | 2 +-
8 files changed, 25 insertions(+), 20 deletions(-)
diff --git a/doc/APIchanges b/doc/APIchanges
index 2273c3bce7..b392c756d7 100644
--- a/doc/APIchanges
+++ b/doc/APIchanges
@@ -2,6 +2,9 @@ The last version increases of all libraries were on 2024-03-07
API changes, most recent first:
+2024-09-xx - xxxxxxxxxx - lavfi 10.4.100
+ Buffersink now sets AVFrame.time_base on the frames it outputs.
+
2024-09-23 - xxxxxxxxxx - lavc 61.18.100 - avcodec.h
Add a new flag AV_CODEC_EXPORT_DATA_ENHANCEMENTS for export_side_data.
diff --git a/doc/examples/transcode.c b/doc/examples/transcode.c
index cbe5088ef6..07d9ee9152 100644
--- a/doc/examples/transcode.c
+++ b/doc/examples/transcode.c
@@ -500,7 +500,6 @@ static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
break;
}
- filter->filtered_frame->time_base = av_buffersink_get_time_base(filter->buffersink_ctx);;
filter->filtered_frame->pict_type = AV_PICTURE_TYPE_NONE;
ret = encode_write_frame(stream_index, 0);
av_frame_unref(filter->filtered_frame);
diff --git a/fftools/ffmpeg_filter.c b/fftools/ffmpeg_filter.c
index 529e631781..81c4911b03 100644
--- a/fftools/ffmpeg_filter.c
+++ b/fftools/ffmpeg_filter.c
@@ -2487,8 +2487,6 @@ static int fg_output_step(OutputFilterPriv *ofp, FilterGraphThread *fgt,
return 0;
}
- frame->time_base = av_buffersink_get_time_base(filter);
-
if (debug_ts)
av_log(ofp, AV_LOG_INFO, "filter_raw -> pts:%s pts_time:%s time_base:%d/%d\n",
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &frame->time_base),
diff --git a/fftools/ffplay.c b/fftools/ffplay.c
index 60d8874eab..4ea48e11bb 100644
--- a/fftools/ffplay.c
+++ b/fftools/ffplay.c
@@ -2076,7 +2076,6 @@ static int audio_thread(void *arg)
int last_serial = -1;
int reconfigure;
int got_frame = 0;
- AVRational tb;
int ret = 0;
if (!frame)
@@ -2087,8 +2086,6 @@ static int audio_thread(void *arg)
goto the_end;
if (got_frame) {
- tb = (AVRational){1, frame->sample_rate};
-
reconfigure =
cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.ch_layout.nb_channels,
frame->format, frame->ch_layout.nb_channels) ||
@@ -2121,11 +2118,10 @@ static int audio_thread(void *arg)
while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL;
- tb = av_buffersink_get_time_base(is->out_audio_filter);
if (!(af = frame_queue_peek_writable(&is->sampq)))
goto the_end;
- af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
+ af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(frame->time_base);
af->pos = fd ? fd->pkt_pos : -1;
af->serial = is->auddec.pkt_serial;
af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
@@ -2164,7 +2160,6 @@ static int video_thread(void *arg)
double pts;
double duration;
int ret;
- AVRational tb = is->video_st->time_base;
AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
AVFilterGraph *graph = NULL;
@@ -2242,9 +2237,8 @@ static int video_thread(void *arg)
is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
is->frame_last_filter_delay = 0;
- tb = av_buffersink_get_time_base(filt_out);
duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
- pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
+ pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(frame->time_base);
ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial);
av_frame_unref(frame);
if (is->videoq.serial != is->viddec.pkt_serial)
diff --git a/libavdevice/lavfi.c b/libavdevice/lavfi.c
index ce10d61f8a..3b77a7396a 100644
--- a/libavdevice/lavfi.c
+++ b/libavdevice/lavfi.c
@@ -384,7 +384,6 @@ static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
/* iterate through all the graph sinks. Select the sink with the
* minimum PTS */
for (i = 0; i < lavfi->nb_sinks; i++) {
- AVRational tb = av_buffersink_get_time_base(lavfi->sinks[i]);
double d;
if (lavfi->sink_eof[i])
@@ -398,7 +397,8 @@ static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
continue;
} else if (ret < 0)
goto fail;
- d = av_rescale_q_rnd(frame->pts, tb, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
+ d = av_rescale_q_rnd(frame->pts, frame->time_base, AV_TIME_BASE_Q,
+ AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
ff_dlog(avctx, "sink_idx:%d time:%f\n", i, d);
av_frame_unref(frame);
diff --git a/libavfilter/buffersink.c b/libavfilter/buffersink.c
index 5811720c61..575075ff47 100644
--- a/libavfilter/buffersink.c
+++ b/libavfilter/buffersink.c
@@ -69,18 +69,29 @@ int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *f
return av_buffersink_get_frame_flags(ctx, frame, 0);
}
-static int return_or_keep_frame(BufferSinkContext *buf, AVFrame *out, AVFrame *in, int flags)
+static int return_or_keep_frame(AVFilterContext *ctx, AVFrame *out, AVFrame *in,
+ int flags)
{
+ BufferSinkContext *buf = ctx->priv;
+
if ((flags & AV_BUFFERSINK_FLAG_PEEK)) {
buf->peeked_frame = in;
- return out ? av_frame_ref(out, in) : 0;
+ if (out) {
+ int ret = av_frame_ref(out, in);
+ if (ret < 0)
+ return ret;
+ }
} else {
av_assert1(out);
buf->peeked_frame = NULL;
av_frame_move_ref(out, in);
av_frame_free(&in);
- return 0;
}
+
+ if (out)
+ out->time_base = ctx->inputs[0]->time_base;
+
+ return 0;
}
static int get_frame_internal(AVFilterContext *ctx, AVFrame *frame, int flags, int samples)
@@ -93,7 +104,7 @@ static int get_frame_internal(AVFilterContext *ctx, AVFrame *frame, int flags, i
int64_t pts;
if (buf->peeked_frame)
- return return_or_keep_frame(buf, frame, buf->peeked_frame, flags);
+ return return_or_keep_frame(ctx, frame, buf->peeked_frame, flags);
while (1) {
ret = samples ? ff_inlink_consume_samples(inlink, samples, samples, &cur_frame) :
@@ -102,7 +113,7 @@ static int get_frame_internal(AVFilterContext *ctx, AVFrame *frame, int flags, i
return ret;
} else if (ret) {
/* TODO return the frame instead of copying it */
- return return_or_keep_frame(buf, frame, cur_frame, flags);
+ return return_or_keep_frame(ctx, frame, cur_frame, flags);
} else if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
return status;
} else if ((flags & AV_BUFFERSINK_FLAG_NO_REQUEST)) {
diff --git a/libavfilter/version.h b/libavfilter/version.h
index 7e0eb9af97..4d8f28e512 100644
--- a/libavfilter/version.h
+++ b/libavfilter/version.h
@@ -31,7 +31,7 @@
#include "version_major.h"
-#define LIBAVFILTER_VERSION_MINOR 3
+#define LIBAVFILTER_VERSION_MINOR 4
#define LIBAVFILTER_VERSION_MICRO 100
diff --git a/tools/uncoded_frame.c b/tools/uncoded_frame.c
index 447bfc8b0d..a17d406417 100644
--- a/tools/uncoded_frame.c
+++ b/tools/uncoded_frame.c
@@ -237,7 +237,7 @@ int main(int argc, char **argv)
}
if (frame->pts != AV_NOPTS_VALUE)
frame->pts = av_rescale_q(frame->pts,
- av_buffersink_get_time_base(st->sink),
+ frame->time_base,
st->stream->time_base);
ret = av_interleaved_write_uncoded_frame(st->mux,
st->stream->index,
--
2.43.0
More information about the ffmpeg-devel
mailing list