[FFmpeg-devel] [PATCH v5] Added Turing codec interface for ffmpeg
wm4
nfxjfg at googlemail.com
Wed Feb 8 11:28:00 EET 2017
On Wed, 8 Feb 2017 08:41:56 +0000
Saverio Blasi <saverio.blasi at bbc.co.uk> wrote:
> - This patch contains the changes to interface the Turing codec (http://turingcodec.org/) with ffmpeg. The patch was modified to address the comments in the review as follows:
> - Added a pkg-config file to list all dependencies required by libturing. This should address the issue pointed out by Hendrik Leppkes on Fri 18/11/2016
> - As per suggestions of wm4, two functions (add_option and finalise_options) have been created. The former appends new options while the latter sets up the argv array of pointers to char* accordingly. add_option re-allocates the buffer for options using av_realloc
> - Additionally, both these functions handle the errors in case the memory wasn't allocated correctly
> - malloc|free|realloc have been substituted with their corresponding av_{malloc|free|realloc} version
> - Check on bit-depth has been removed since the ffmpeg already casts the right pix_fmt and bit depth
> - pix_fmts is now set in ff_libturing_encoder as in h264dec.c.
> - Added av_freep to release the allocated memory
> - Added brackets to single-line operators
> ---
> LICENSE.md | 1 +
> configure | 5 +
> libavcodec/Makefile | 1 +
> libavcodec/allcodecs.c | 1 +
> libavcodec/libturing.c | 320 +++++++++++++++++++++++++++++++++++++++++++++++++
> 5 files changed, 328 insertions(+)
> create mode 100644 libavcodec/libturing.c
The patch seems mostly ok, some minor comments below.
> +static av_cold int add_option(const char* current_option, optionContext* option_ctx)
> +{
> + int option_length = strlen(current_option);
> + char *temp_ptr;
> + if (option_ctx->buffer_filled + option_length + 1 > option_ctx->options_buffer_size) {
> + option_ctx->options_buffer_size += option_length + 1;
You probably shouldn't update options_buffer_size before the
reallocation actually succeeded. (Probably doesn't matter with the
current code, but for robustness...)
> + temp_ptr = av_realloc(option_ctx->options, option_ctx->options_buffer_size);
> + if (temp_ptr == NULL) {
> + return AVERROR(ENOMEM);
> + }
> + option_ctx->options = temp_ptr;
> + option_ctx->s = option_ctx->options + option_ctx->buffer_filled;
> + }
> + strcpy(option_ctx->s, current_option);
> + option_ctx->s += 1 + option_length;
> + option_ctx->options_added++;
> + option_ctx->buffer_filled += option_length + 1;
> + return 0;
> +}
Still not sure why there's at least 1 redundant field (s which is
redundant with buffer_filled).
Using memcpy might be slightly nicer than strcpy, because everyone
hates strcpy. But it looks correct anyway.
> +
> +static av_cold int finalise_options(optionContext* option_ctx)
> +{
> + if (option_ctx->options_added) {
> + char *p;
> + option_ctx->argv = av_malloc(option_ctx->options_added * sizeof(char*));
> + if (option_ctx->argv == NULL) {
> + return AVERROR(ENOMEM);
> + }
> + p = option_ctx->options;
> + for(int option_idx=0; option_idx<option_ctx->options_added; option_idx++) {
> + option_ctx->argv[option_idx] = p;
> + p += strlen(p) + 1;
> + }
> + }
> + return 0;
> +}
Not sure why this isn't just done by add_option.
> +
> +static av_cold int libturing_encode_init(AVCodecContext *avctx)
> +{
> + libturingEncodeContext *ctx = avctx->priv_data;
> + const int bit_depth = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth;
> +
> + optionContext encoder_options;
> + turing_encoder_settings settings;
> + char option_string[MAX_OPTION_LENGTH];
> + double frame_rate;
> +
> + frame_rate = (double)avctx->time_base.den / (avctx->time_base.num * avctx->ticks_per_frame);
> +
> + encoder_options.buffer_filled = 0;
> + encoder_options.options_added = 0;
> + encoder_options.options_buffer_size = strlen("turing") + 1;
> + encoder_options.options = av_malloc(encoder_options.options_buffer_size);
> + if(encoder_options.options == NULL) {
> + av_log(avctx, AV_LOG_ERROR, "Cannot allocate the memory for command line options\n");
> + return AVERROR(ENOMEM);
> + }
Couldn't this extra initial allocation be dropped? It seems rather
redundant.
> + encoder_options.s = encoder_options.options;
> + encoder_options.argv = NULL;
> +
> + // Add baseline command line options
> + if (add_option("turing", &encoder_options)) {
> + goto optionfail;
> + }
> +
> + if (add_option("--frames=0", &encoder_options)) {
> + goto optionfail;
> + }
> +
> + snprintf(option_string, MAX_OPTION_LENGTH, "--input-res=%dx%d", avctx->width, avctx->height);
> + if (add_option(option_string, &encoder_options)) {
> + goto optionfail;
> + }
Maybe add_option() could be a vararg function (like snprintf) to
minimize this code further. Also, {/} are generally not considered
necessary in this project if the body is just 1 line.
(You don't need to change this.)
> +
> + snprintf(option_string, MAX_OPTION_LENGTH, "--frame-rate=%f", frame_rate);
> + if (add_option(option_string, &encoder_options)) {
> + goto optionfail;
> + }
> +
> + snprintf(option_string, MAX_OPTION_LENGTH, "--bit-depth=%d", bit_depth);
> + if (add_option(option_string, &encoder_options)) {
> + goto optionfail;
> + }
> +
> + if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0) {
> + int sar_num, sar_den;
> +
> + av_reduce(&sar_num, &sar_den,
> + avctx->sample_aspect_ratio.num,
> + avctx->sample_aspect_ratio.den, 65535);
> + snprintf(option_string, MAX_OPTION_LENGTH, "--sar=%d:%d", sar_num, sar_den);
> + if (add_option(option_string, &encoder_options)) {
> + goto optionfail;
> + }
> + }
> +
> + // Parse additional command line options
> + if (ctx->options) {
> + AVDictionary *dict = NULL;
> + AVDictionaryEntry *en = NULL;
> +
> + if (!av_dict_parse_string(&dict, ctx->options, "=", ":", 0)) {
> + while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
> + int const illegal_option =
> + !strcmp("input-res", en->key) ||
> + !strcmp("frame-rate", en->key) ||
> + !strcmp("f", en->key) ||
> + !strcmp("frames", en->key) ||
> + !strcmp("sar", en->key) ||
> + !strcmp("bit-depth", en->key) ||
> + !strcmp("internal-bit-depth", en->key);
> + if (illegal_option) {
> + av_log(avctx, AV_LOG_WARNING, "%s=%s ignored.\n", en->key, en->value);
Why do this extra check, instead of applying the internal values after
the user options? (Assuming redundant options overwrite previous
option values in libturing.)
> + } else {
> + if (turing_check_binary_option(en->key)) {
> + snprintf(option_string, MAX_OPTION_LENGTH, "--%s", en->key);
> + } else {
> + snprintf(option_string, MAX_OPTION_LENGTH, "--%s=%s", en->key, en->value);
> + }
> + if (add_option(option_string, &encoder_options)) {
> + goto optionfail;
> + }
> + }
> + }
> + av_dict_free(&dict);
> + }
> + }
> +
> + if (add_option("dummy-input-filename", &encoder_options)) {
> + goto optionfail;
> + }
> +
> + if (finalise_options(&encoder_options)) {
> + goto optionfail;
> + }
> +
> + settings.argv = (char const**)encoder_options.argv;
> + settings.argc = encoder_options.options_added;
> +
> + for (int i=0; i<settings.argc; ++i) {
> + av_log(avctx, AV_LOG_INFO, "arg %d: %s\n", i, settings.argv[i]);
> + }
> +
> + ctx->encoder = turing_create_encoder(settings);
> +
> + if (!ctx->encoder) {
> + av_log(avctx, AV_LOG_ERROR, "Failed to create libturing encoder.\n");
> + av_freep(&encoder_options.argv);
> + av_freep(&encoder_options.options);
> + return AVERROR_INVALIDDATA;
> + }
> +
> + if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
> + turing_bitstream const *bitstream;
> + bitstream = turing_encode_headers(ctx->encoder);
> + if (bitstream->size <= 0) {
> + av_log(avctx, AV_LOG_ERROR, "Failed to encode headers.\n");
> + av_freep(&encoder_options.argv);
> + av_freep(&encoder_options.options);
> + turing_destroy_encoder(ctx->encoder);
> + return AVERROR_INVALIDDATA;
> + }
> +
> + avctx->extradata_size = bitstream->size;
> +
> + avctx->extradata = av_malloc(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
I think you should use av_mallocz to ensure the padding is cleared.
More information about the ffmpeg-devel
mailing list