[FFmpeg-devel] [PATCH 2/5] cmdutils: add insert_timeline_graph()
wm4
nfxjfg at googlemail.com
Tue Jan 6 20:28:48 CET 2015
On Tue, 6 Jan 2015 18:09:58 +0100
Clément Bœsch <u at pkh.me> wrote:
> From: Clément Bœsch <clement at stupeflix.com>
>
> This function will be used in the following commits in ffmpeg and
> ffplay.
> ---
> cmdutils.c | 170 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
> cmdutils.h | 12 +++++
> 2 files changed, 182 insertions(+)
>
> diff --git a/cmdutils.c b/cmdutils.c
> index b35180e..0e22e57 100644
> --- a/cmdutils.c
> +++ b/cmdutils.c
> @@ -31,7 +31,9 @@
>
> #include "config.h"
> #include "compat/va_copy.h"
> +#include "libavcodec/bytestream.h"
> #include "libavformat/avformat.h"
> +#include "libavformat/isom.h"
> #include "libavfilter/avfilter.h"
> #include "libavdevice/avdevice.h"
> #include "libavresample/avresample.h"
> @@ -2252,3 +2254,171 @@ int show_sinks(void *optctx, const char *opt, const char *arg)
> return ret;
> }
> #endif
> +
> +static int parse_elst(MOVElst **ret, const uint8_t *buf, int size)
> +{
> + GetByteContext gb;
> + int i, edit_count, version;
> + MOVElst *elst_data;
> +
> + bytestream2_init(&gb, buf, size);
> +
> + version = bytestream2_get_byte(&gb);
> + bytestream2_skip(&gb, 3); /* flags */
> + edit_count = bytestream2_get_be32(&gb);
> +
> + if (!edit_count)
> + return 0;
> +
> + elst_data = av_malloc_array(edit_count, sizeof(*elst_data));
> + if (!elst_data)
> + return AVERROR(ENOMEM);
> +
> + for (i = 0; i < edit_count && bytestream2_get_bytes_left(&gb) > 0; i++) {
> + MOVElst *e = &elst_data[i];
> +
> + if (version == 1) {
> + e->duration = bytestream2_get_be64(&gb);
> + e->time = bytestream2_get_be64(&gb);
> + } else {
> + e->duration = bytestream2_get_be32(&gb);
> + e->time = (int32_t)bytestream2_get_be32(&gb);
> + }
> + e->rate = bytestream2_get_be32(&gb) / 65536.0;
> + }
> +
> + *ret = elst_data;
> + return i;
> +}
> +
> +static int get_elst_lavfi_graph_str(AVBPrint *bp, const AVStream *st, int64_t start_time)
> +{
> + int i, elst_count, size;
> + AVBPrint select;
> + AVBPrint setpts;
> + MOVElst *elst;
> + AVRational tb;
> +
> + const uint8_t *buf = av_stream_get_side_data(st, AV_PKT_DATA_MOV_TIMELINE, &size);
> + if (!buf || size <= 4)
> + return 0;
> +
> + tb = av_make_q(1, AV_RB32(buf));
> +
> + elst_count = parse_elst(&elst, buf + 4, size - 4);
> + if (elst_count <= 0)
> + return elst_count;
> +
> + av_bprint_init(bp, 0, AV_BPRINT_SIZE_UNLIMITED);
> + av_bprint_init(&select, 0, AV_BPRINT_SIZE_UNLIMITED);
> + av_bprint_init(&setpts, 0, AV_BPRINT_SIZE_UNLIMITED);
> +
> + for (i = 0; i < elst_count; i++) {
> + int64_t gap;
> + const MOVElst *segment = &elst[i];
> + const MOVElst *next = i < elst_count - 1 ? &elst[i + 1] : NULL;
> + const MOVElst *prev = i > 0 ? &elst[i - 1] : NULL;
> + int64_t rescaled_start = av_rescale_q(segment->time, tb, st->time_base);
> + int64_t end = segment->duration ? segment->time + segment->duration : -1;
> +
> + if (!segment->duration && next)
> + end = next->time;
> +
> + if (select.str[0]) {
> + av_bprintf(&select, "+");
> + } if (end == -1) {
> + av_bprintf(&select, "gte(pts,%"PRId64")", rescaled_start);
> + } else {
> + const int64_t rescaled_end = av_rescale_q(end, tb, st->time_base);
> + av_bprintf(&select, "between(pts,%"PRId64",%"PRId64"-1)",
> + rescaled_start, rescaled_end);
> + }
> +
> + if (segment->time == -1)
> + /* XXX: we are supposed to insert initial silence/emptiness here */
> + gap = segment->duration;
> + else if (prev)
> + gap = segment->time - prev->time - prev->duration;
> + else
> + gap = segment->time;
> + gap *= segment->rate;
> +
> + if (gap) {
> + if (!*setpts.str)
> + av_bprintf(&setpts, "PTS");
> + gap = av_rescale_q(gap, tb, st->time_base);
> + av_bprintf(&setpts, "-if(gte(PTS,%"PRId64"),%"PRId64",0)",
> + segment->time, gap);
> + }
> + }
> +
> + av_freep(&elst);
> +
> + if (select.str[0] && av_bprint_is_complete(&select) && av_bprint_is_complete(&setpts)) {
> + const char *tstr = st->codec->codec_type == AVMEDIA_TYPE_AUDIO ? "a" : "";
> + int64_t rescaled_start_time = start_time == AV_NOPTS_VALUE ? 0 : av_rescale_q(start_time, AV_TIME_BASE_Q, st->time_base);
> +
> + av_bprintf(bp, "[tl_in] ");
> +
> + /* make sure the following filters will not take into account the PTS
> + * shift that can occur with ffmpeg (-ss) */
> + if (rescaled_start_time)
> + av_bprintf(bp, "%ssetpts=PTS+%"PRId64", ", tstr, rescaled_start_time);
> +
> + /* select the time ranges
> + * FIXME: aselect should be replaced with a sample accurate filter */
> + av_bprintf(bp, "%sselect='%s'", tstr, select.str);
> +
> + /* insert the time adjustment filter if there are time time gaps (often
> + * the case if there is more than one entry) */
> + if (setpts.str[0])
> + av_bprintf(bp, ", %ssetpts='%s'", tstr, setpts.str);
> +
> + /* restore the time shift introduced previously */
> + if (rescaled_start_time)
> + av_bprintf(bp, ", %ssetpts=PTS-%"PRId64, tstr, rescaled_start_time);
> +
> + av_bprintf(bp, " [tl_out]");
> + }
> +
> + av_bprint_finalize(&select, NULL);
> + av_bprint_finalize(&setpts, NULL);
> +
> + return 0;
> +}
> +
> +int insert_timeline_graph(const AVStream *st, AVFilterContext **last_filter,
> + int64_t start_time, int reverse)
> +{
> + AVBPrint bp;
> + AVFilterInOut *inputs, *outputs;
> + AVFilterGraph *graph = (*last_filter)->graph;
> +
> + int ret = get_elst_lavfi_graph_str(&bp, st, start_time);
> + if (ret < 0)
> + goto end;
> +
> + if (!av_bprint_is_complete(&bp) || !bp.str[0])
> + goto end;
> +
> + if ((ret = avfilter_graph_parse2(graph, bp.str, &inputs, &outputs)) < 0) {
> + av_log(NULL, AV_LOG_ERROR, "Unable to parse timeline graph\n");
> + goto end;
> + }
> +
> + if (reverse) ret = avfilter_link(outputs[0].filter_ctx, 0, *last_filter, 0);
> + else ret = avfilter_link(*last_filter, 0, inputs[0].filter_ctx, 0);
> +
> + if (ret < 0) {
> + av_log(NULL, AV_LOG_ERROR, "Unable to link the end of the timeline "
> + "graph to the last inserted filter: %s\n", av_err2str(ret));
> + goto end;
> + }
> +
> + if (reverse) *last_filter = inputs[0].filter_ctx;
> + else *last_filter = outputs[0].filter_ctx;
> +
> +end:
> + av_bprint_finalize(&bp, NULL);
> + return ret;
> +}
> diff --git a/cmdutils.h b/cmdutils.h
> index f6ad44c..7b140fd 100644
> --- a/cmdutils.h
> +++ b/cmdutils.h
> @@ -597,4 +597,16 @@ void *grow_array(void *array, int elem_size, int *size, int new_size);
> char name[128];\
> av_get_channel_layout_string(name, sizeof(name), 0, ch_layout);
>
> +/**
> + * Get the MOV timeline from the stream side data, construct a libavfilter
> + * filtergraph, and insert it after the last filter.
> + *
> + * @param st the stream with the timeline
> + * @param last_filter pointer to last filter to stick the filtergraph (will be updated)
> + * @param start_time initial timestamp offset in AV_TIME_BASE_Q time base
> + * @param reverse if set, prepend the timeline filtergraph instead of appending it
> + */
> +int insert_timeline_graph(const AVStream *st, AVFilterContext **last_filter,
> + int64_t start_time, int reverse);
> +
> #endif /* CMDUTILS_H */
So libavformat exports the raw MOV atom, and all tools ffmpeg.c uses to
parse it are private libavformat/libavcodec API?? That seems very
unfair to the API user.
More information about the ffmpeg-devel
mailing list