[FFmpeg-devel] [PATCH 8/9] fftools/ffmpeg: Replace sub2video with subtitle frame filtering

Soft Works softworkz at hotmail.com
Thu Aug 19 10:43:37 EEST 2021


Signed-off-by: softworkz <softworkz at hotmail.com>
---
 fftools/ffmpeg.c        | 324 +++++++++++++++++++++++-----------------
 fftools/ffmpeg.h        |   7 +-
 fftools/ffmpeg_filter.c | 198 +++++++++++++++++-------
 fftools/ffmpeg_hw.c     |   2 +-
 fftools/ffmpeg_opt.c    |   3 +-
 5 files changed, 330 insertions(+), 204 deletions(-)

diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c
index b0ce7c7c32..aec8422111 100644
--- a/fftools/ffmpeg.c
+++ b/fftools/ffmpeg.c
@@ -174,114 +174,99 @@ static void free_input_threads(void);
    This is a temporary solution until libavfilter gets real subtitles support.
  */
 
-static int sub2video_get_blank_frame(InputStream *ist)
+static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame);
+
+
+static int get_subtitle_format_from_codecdesc(const AVCodecDescriptor *codec_descriptor)
 {
-    int ret;
-    AVFrame *frame = ist->sub2video.frame;
+    int format;
 
-    av_frame_unref(frame);
-    ist->sub2video.frame->width  = ist->dec_ctx->width  ? ist->dec_ctx->width  : ist->sub2video.w;
-    ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
-    ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
-    if ((ret = av_frame_get_buffer(frame, 0)) < 0)
-        return ret;
-    memset(frame->data[0], 0, frame->height * frame->linesize[0]);
-    return 0;
+    if(codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB)
+        format = SUBTITLE_BITMAP;
+    else if(codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
+        format = SUBTITLE_ASS;
+    else
+        format = SUBTITLE_TEXT;
+
+    return format;
 }
 
-static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
-                                AVSubtitleRect *r)
+static void avsubtitle_free_ref(void *opaque, uint8_t *data)
 {
-    uint32_t *pal, *dst2;
-    uint8_t *src, *src2;
-    int x, y;
+    avsubtitle_free((AVSubtitle *)data);
+}
 
-    if (r->type != SUBTITLE_BITMAP) {
-        av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
+static void sub2video_resend_current(InputStream *ist, int64_t heartbeat_pts)
+{
+    AVFrame *frame;
+    AVSubtitle *current_sub;
+    int8_t *dst;
+    int num_rects, i, ret;
+    int64_t pts, end_pts, pts_sub;
+    int format = get_subtitle_format_from_codecdesc(ist->dec_ctx->codec_descriptor);
+
+    /* If we are initializing the system, utilize current heartbeat
+       PTS as the start time, and show until the following subpicture
+       is received. Otherwise, utilize the previous subpicture's end time
+       as the fall-back value. */
+    pts       = ist->sub2video.end_pts <= 0 ?
+                heartbeat_pts : ist->sub2video.end_pts;
+    end_pts   = INT64_MAX;
+
+    ////av_log(ist->dec_ctx, AV_LOG_ERROR, "sub2video_resend_current1: heartbeat_pts: %lld ist->sub2video.end_pts: %lld\n", heartbeat_pts, ist->sub2video.end_pts);
+
+    pts     = av_rescale_q(pts * 1000LL,
+                           AV_TIME_BASE_Q, ist->st->time_base);
+
+    frame = av_frame_alloc();
+    if (!frame) {
+        av_log(ist->dec_ctx, AV_LOG_ERROR, "Unable to alloc frame (out of memory).\n");
         return;
     }
-    if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
-        av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
-            r->x, r->y, r->w, r->h, w, h
-        );
+
+    frame->format = get_subtitle_format_from_codecdesc(ist->dec_ctx->codec_descriptor);
+
+    if ((ret = av_frame_get_buffer2(frame, AVMEDIA_TYPE_SUBTITLE, 0)) < 0) {
+        av_log(ist->dec_ctx, AV_LOG_ERROR, "Error (av_frame_get_buffer): %d.\n", ret);
         return;
     }
 
-    dst += r->y * dst_linesize + r->x * 4;
-    src = r->data[0];
-    pal = (uint32_t *)r->data[1];
-    for (y = 0; y < r->h; y++) {
-        dst2 = (uint32_t *)dst;
-        src2 = src;
-        for (x = 0; x < r->w; x++)
-            *(dst2++) = pal[*(src2++)];
-        dst += dst_linesize;
-        src += r->linesize[0];
+    frame->width = ist->sub2video.w;
+    frame->height = ist->sub2video.h;
+
+    if (ist->sub2video.current_subtitle) {
+        frame->buf[0] = av_buffer_ref(ist->sub2video.current_subtitle);
+        frame->data[0] = ist->sub2video.current_subtitle->data;
+    }
+    else {
+        AVBufferRef *empty_sub_buffer;
+        AVSubtitle *empty_sub = av_mallocz(sizeof(*empty_sub));
+        empty_sub->format = format;
+        empty_sub->num_rects = 0;
+        empty_sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
+        empty_sub->end_display_time = 1000;
+        empty_sub_buffer = av_buffer_create((uint8_t*)empty_sub, sizeof(*empty_sub), avsubtitle_free_ref, NULL, AV_BUFFER_FLAG_READONLY);
+        frame->buf[0] = empty_sub_buffer;
+        frame->data[0] = empty_sub_buffer->data;
     }
-}
 
-static void sub2video_push_ref(InputStream *ist, int64_t pts)
-{
-    AVFrame *frame = ist->sub2video.frame;
-    int i;
-    int ret;
+    current_sub = (AVSubtitle *)frame->data[0];
 
-    av_assert1(frame->data[0]);
-    ist->sub2video.last_pts = frame->pts = pts;
-    for (i = 0; i < ist->nb_filters; i++) {
-        ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
-                                           AV_BUFFERSRC_FLAG_KEEP_REF |
-                                           AV_BUFFERSRC_FLAG_PUSH);
-        if (ret != AVERROR_EOF && ret < 0)
-            av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
-                   av_err2str(ret));
-    }
-}
+    frame->pts = pts;
+    ist->sub2video.last_pts = pts;
 
-void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
-{
-    AVFrame *frame = ist->sub2video.frame;
-    int8_t *dst;
-    int     dst_linesize;
-    int num_rects, i;
-    int64_t pts, end_pts;
+    ////av_log(ist->dec_ctx, AV_LOG_ERROR, ": frame->pts: %lld current_sub: %lld\n", frame->pts, current_sub->pts);
+
+    send_frame_to_filters(ist, frame);
 
-    if (!frame)
-        return;
-    if (sub) {
-        pts       = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
-                                 AV_TIME_BASE_Q, ist->st->time_base);
-        end_pts   = av_rescale_q(sub->pts + sub->end_display_time   * 1000LL,
-                                 AV_TIME_BASE_Q, ist->st->time_base);
-        num_rects = sub->num_rects;
-    } else {
-        /* If we are initializing the system, utilize current heartbeat
-           PTS as the start time, and show until the following subpicture
-           is received. Otherwise, utilize the previous subpicture's end time
-           as the fall-back value. */
-        pts       = ist->sub2video.initialize ?
-                    heartbeat_pts : ist->sub2video.end_pts;
-        end_pts   = INT64_MAX;
-        num_rects = 0;
-    }
-    if (sub2video_get_blank_frame(ist) < 0) {
-        av_log(ist->dec_ctx, AV_LOG_ERROR,
-               "Impossible to get a blank canvas.\n");
-        return;
-    }
-    dst          = frame->data    [0];
-    dst_linesize = frame->linesize[0];
-    for (i = 0; i < num_rects; i++)
-        sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
-    sub2video_push_ref(ist, pts);
     ist->sub2video.end_pts = end_pts;
-    ist->sub2video.initialize = 0;
 }
 
 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
 {
-    InputFile *infile = input_files[ist->file_index];
-    int i, j, nb_reqs;
+    const InputFile *infile = input_files[ist->file_index];
+    int i, j;
+    unsigned nb_reqs;
     int64_t pts2;
 
     /* When a frame is read from a file, examine all sub2video streams in
@@ -290,7 +275,7 @@ static void sub2video_heartbeat(InputStream *ist, int64_t pts)
        (possibly overlay) is desperately waiting for a subtitle frame. */
     for (i = 0; i < infile->nb_streams; i++) {
         InputStream *ist2 = input_streams[infile->ist_index + i];
-        if (!ist2->sub2video.frame)
+        if (!ist2->sub2video.is_active)
             continue;
         /* subtitles seem to be usually muxed ahead of other streams;
            if not, subtracting a larger time here is necessary */
@@ -298,15 +283,18 @@ static void sub2video_heartbeat(InputStream *ist, int64_t pts)
         /* do not send the heartbeat frame if the subtitle is already ahead */
         if (pts2 <= ist2->sub2video.last_pts)
             continue;
-        if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
+        if (pts2 >= ist2->sub2video.end_pts) {
             /* if we have hit the end of the current displayed subpicture,
                or if we need to initialize the system, update the
                overlayed subpicture and its start/end times */
-            sub2video_update(ist2, pts2 + 1, NULL);
-        for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
-            nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
-        if (nb_reqs)
-            sub2video_push_ref(ist2, pts2);
+            av_buffer_unref(&ist2->sub2video.current_subtitle);
+            ist2->sub2video.current_subtitle = NULL;
+            sub2video_resend_current(ist2, pts2 + 1);
+        }
+        //for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
+        //    nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
+        //if (nb_reqs)
+        //    sub2video_resend_current(ist2, pts2);
     }
 }
 
@@ -316,7 +304,7 @@ static void sub2video_flush(InputStream *ist)
     int ret;
 
     if (ist->sub2video.end_pts < INT64_MAX)
-        sub2video_update(ist, INT64_MAX, NULL);
+        sub2video_resend_current(ist, INT64_MAX);
     for (i = 0; i < ist->nb_filters; i++) {
         ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
         if (ret != AVERROR_EOF && ret < 0)
@@ -535,15 +523,6 @@ static void ffmpeg_cleanup(int ret)
                 av_frame_free(&frame);
             }
             av_fifo_freep(&ifilter->frame_queue);
-            if (ist->sub2video.sub_queue) {
-                while (av_fifo_size(ist->sub2video.sub_queue)) {
-                    AVSubtitle sub;
-                    av_fifo_generic_read(ist->sub2video.sub_queue,
-                                         &sub, sizeof(sub), NULL);
-                    avsubtitle_free(&sub);
-                }
-                av_fifo_freep(&ist->sub2video.sub_queue);
-            }
             av_buffer_unref(&ifilter->hw_frames_ctx);
             av_freep(&ifilter->name);
             av_freep(&fg->inputs[j]);
@@ -636,7 +615,6 @@ static void ffmpeg_cleanup(int ret)
         av_packet_free(&ist->pkt);
         av_dict_free(&ist->decoder_opts);
         avsubtitle_free(&ist->prev_sub.subtitle);
-        av_frame_free(&ist->sub2video.frame);
         av_freep(&ist->filters);
         av_freep(&ist->hwaccel_device);
         av_freep(&ist->dts_buffer);
@@ -1061,13 +1039,19 @@ error:
 
 static void do_subtitle_out(OutputFile *of,
                             OutputStream *ost,
-                            AVSubtitle *sub)
+                            AVFrame *frame)
 {
     int subtitle_out_max_size = 1024 * 1024;
     int subtitle_out_size, nb, i;
     AVCodecContext *enc;
     AVPacket *pkt = ost->pkt;
     int64_t pts;
+    AVSubtitle *sub = (AVSubtitle *)frame->data[0];
+
+    if (!sub)
+        return;
+
+    ////av_log(NULL, AV_LOG_VERBOSE, "do_subtitle_out: sub->pts: %lld  frame->pts: %lld\n", sub->pts, frame->pts);
 
     if (sub->pts == AV_NOPTS_VALUE) {
         av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
@@ -1576,8 +1560,11 @@ static int reap_filters(int flush)
                 }
                 do_audio_out(of, ost, filtered_frame);
                 break;
+            case AVMEDIA_TYPE_SUBTITLE:
+
+                do_subtitle_out(of, ost, filtered_frame);
+                break;
             default:
-                // TODO support subtitle filters
                 av_assert0(0);
             }
 
@@ -2173,7 +2160,8 @@ static int ifilter_has_all_input_formats(FilterGraph *fg)
     int i;
     for (i = 0; i < fg->nb_inputs; i++) {
         if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
-                                          fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
+                                          fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO ||
+                                          fg->inputs[i]->type == AVMEDIA_TYPE_SUBTITLE))
             return 0;
     }
     return 1;
@@ -2270,7 +2258,7 @@ static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
         // the filtergraph was never configured
         if (ifilter->format < 0)
             ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
-        if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
+        if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO || ifilter->type == AVMEDIA_TYPE_SUBTITLE)) {
             av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
             return AVERROR_INVALIDDATA;
         }
@@ -2528,12 +2516,24 @@ fail:
 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
                                int *decode_failed)
 {
-    AVSubtitle subtitle;
-    int free_sub = 1;
-    int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
-                                          &subtitle, got_output, pkt);
+    AVFrame *decoded_frame;
+    AVCodecContext *avctx = ist->dec_ctx;
+    int i = 0, ret = 0, err = 0;
+    int64_t pts, end_pts;
+    AVSubtitle *subtitle = av_mallocz(sizeof(*subtitle));
+    if (!subtitle)
+        return NULL;
+
+    if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
+        return AVERROR(ENOMEM);
+    if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
+        return AVERROR(ENOMEM);
+    decoded_frame = ist->decoded_frame;
+
+    ret = avcodec_decode_subtitle2(avctx, subtitle, got_output, pkt);
 
-    check_decode_result(NULL, got_output, ret);
+    if (ret != AVERROR_EOF)
+        check_decode_result(NULL, got_output, ret);
 
     if (ret < 0 || !*got_output) {
         *decode_failed = 1;
@@ -2545,10 +2545,10 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
     if (ist->fix_sub_duration) {
         int end = 1;
         if (ist->prev_sub.got_output) {
-            end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
+            end = av_rescale(subtitle->pts - ist->prev_sub.subtitle.pts,
                              1000, AV_TIME_BASE);
             if (end < ist->prev_sub.subtitle.end_display_time) {
-                av_log(ist->dec_ctx, AV_LOG_DEBUG,
+                av_log(avctx, AV_LOG_DEBUG,
                        "Subtitle duration reduced from %"PRId32" to %d%s\n",
                        ist->prev_sub.subtitle.end_display_time, end,
                        end <= 0 ? ", dropping it" : "");
@@ -2557,35 +2557,79 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
         }
         FFSWAP(int,        *got_output, ist->prev_sub.got_output);
         FFSWAP(int,        ret,         ist->prev_sub.ret);
-        FFSWAP(AVSubtitle, subtitle,    ist->prev_sub.subtitle);
+        FFSWAP(AVSubtitle, *subtitle,    ist->prev_sub.subtitle);
         if (end <= 0)
-            goto out;
+            return end;
     }
 
     if (!*got_output)
         return ret;
 
-    if (ist->sub2video.frame) {
-        sub2video_update(ist, INT64_MIN, &subtitle);
-    } else if (ist->nb_filters) {
-        if (!ist->sub2video.sub_queue)
-            ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
-        if (!ist->sub2video.sub_queue)
+    ////decoded_frame = av_frame_alloc();
+    ////if (!decoded_frame)
+    ////    return AVERROR(ENOMEM);
+
+    decoded_frame->format = get_subtitle_format_from_codecdesc(avctx->codec_descriptor);
+
+    if ((ret = av_frame_get_buffer2(decoded_frame, AVMEDIA_TYPE_SUBTITLE, 0)) < 0)
+        return ret;
+
+    av_buffer_unref(&ist->sub2video.current_subtitle);
+    ist->sub2video.current_subtitle = av_buffer_create((uint8_t*)subtitle, sizeof(*subtitle), avsubtitle_free_ref, NULL, AV_BUFFER_FLAG_READONLY);
+
+    decoded_frame->buf[0] = av_buffer_ref(ist->sub2video.current_subtitle);
+    decoded_frame->data[0] = ist->sub2video.current_subtitle->data;
+
+    pts     = av_rescale_q(subtitle->pts + subtitle->start_display_time * 1000LL,
+                           AV_TIME_BASE_Q, ist->st->time_base);
+    end_pts = av_rescale_q(subtitle->pts + subtitle->end_display_time   * 1000LL,
+                             AV_TIME_BASE_Q, ist->st->time_base);
+
+    ist->sub2video.last_pts = decoded_frame->pts = pts;
+    ist->sub2video.end_pts = end_pts;
+
+    ////av_log(ist->dec_ctx, AV_LOG_ERROR, "frame->pts: %lld subtitle->pts: %lld\n", decoded_frame->pts, subtitle->pts);
+
+    for (i = 0; i < nb_output_streams; i++) {
+        OutputStream *ost = output_streams[i];
+
+        if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
             exit_program(1);
-        if (!av_fifo_space(ist->sub2video.sub_queue)) {
-            ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
-            if (ret < 0)
-                exit_program(1);
-        }
-        av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
-        free_sub = 0;
+        if (!check_output_constraints(ist, ost) || !ost->encoding_needed
+            || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
+            continue;
+
+        do_subtitle_out(output_files[ost->file_index], ost, decoded_frame);
     }
 
-    if (!subtitle.num_rects)
-        goto out;
+    err = send_frame_to_filters(ist, decoded_frame);
 
-    ist->frames_decoded++;
+    av_frame_unref(ist->filter_frame);
+    av_frame_unref(decoded_frame);
+    return err < 0 ? err : ret;
 
+////    if (ist->sub2video.frame) {
+////        sub2video_update(ist, INT64_MIN, subtitle);
+////        free_sub = 0;
+////    } else if (ist->nb_filters) {
+////        if (!ist->sub2video.sub_queue)
+////            ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
+////        if (!ist->sub2video.sub_queue)
+////            exit_program(1);
+////        if (!av_fifo_space(ist->sub2video.sub_queue)) {
+////            ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
+////            if (ret < 0)
+////                exit_program(1);
+////        }
+////        av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
+////        free_sub = 0;
+////    }
+////
+////    if (!subtitle->num_rects)
+////        goto out;
+////
+////    ist->frames_decoded++;
+////
     for (i = 0; i < nb_output_streams; i++) {
         OutputStream *ost = output_streams[i];
 
@@ -2595,13 +2639,13 @@ static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
             || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
             continue;
 
-        do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
+        do_subtitle_out(output_files[ost->file_index], ost, subtitle);
     }
-
-out:
-    if (free_sub)
-        avsubtitle_free(&subtitle);
-    return ret;
+////
+////out:
+////    if (free_sub)
+////        avsubtitle_free(subtitle);
+////    return ret;
 }
 
 static int send_filter_eof(InputStream *ist)
diff --git a/fftools/ffmpeg.h b/fftools/ffmpeg.h
index d2dd7ca092..59e9dd775c 100644
--- a/fftools/ffmpeg.h
+++ b/fftools/ffmpeg.h
@@ -352,12 +352,11 @@ typedef struct InputStream {
     } prev_sub;
 
     struct sub2video {
+        int is_active;
         int64_t last_pts;
         int64_t end_pts;
-        AVFifoBuffer *sub_queue;    ///< queue of AVSubtitle* before filter init
-        AVFrame *frame;
+        AVBufferRef *current_subtitle;
         int w, h;
-        unsigned int initialize; ///< marks if sub2video_update should force an initialization
     } sub2video;
 
     int dr1;
@@ -664,8 +663,6 @@ int filtergraph_is_simple(FilterGraph *fg);
 int init_simple_filtergraph(InputStream *ist, OutputStream *ost);
 int init_complex_filtergraph(FilterGraph *fg);
 
-void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub);
-
 int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame);
 
 int ffmpeg_parse_options(int argc, char **argv);
diff --git a/fftools/ffmpeg_filter.c b/fftools/ffmpeg_filter.c
index 49076f13ee..139843402c 100644
--- a/fftools/ffmpeg_filter.c
+++ b/fftools/ffmpeg_filter.c
@@ -19,6 +19,7 @@
  */
 
 #include <stdint.h>
+#include <libavfilter/internal.h>
 
 #include "ffmpeg.h"
 
@@ -222,8 +223,8 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
     int i;
 
     // TODO: support other filter types
-    if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
-        av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
+    if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO && type != AVMEDIA_TYPE_SUBTITLE) {
+        av_log(NULL, AV_LOG_FATAL, "Only video, audio and subtitle filters supported "
                "currently.\n");
         exit_program(1);
     }
@@ -243,10 +244,6 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
 
         for (i = 0; i < s->nb_streams; i++) {
             enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
-            if (stream_type != type &&
-                !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
-                  type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
-                continue;
             if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
                 st = s->streams[i];
                 break;
@@ -416,6 +413,39 @@ static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
     return 0;
 }
 
+static int configure_output_subtitle_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
+{
+    char *pix_fmts;
+    OutputStream *ost = ofilter->ost;
+    OutputFile    *of = output_files[ost->file_index];
+    AVFilterContext *last_filter = out->filter_ctx;
+    int pad_idx = out->pad_idx;
+    int ret;
+    char name[255];
+
+    snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
+    ret = avfilter_graph_create_filter(&ofilter->filter,
+                                       avfilter_get_by_name("sbuffersink"),
+                                       name, NULL, NULL, fg->graph);
+
+    if (ret < 0)
+        return ret;
+
+    ////snprintf(name, sizeof(name), "trim_out_%d_%d",
+    ////         ost->file_index, ost->index);
+    ////ret = insert_trim(of->start_time, of->recording_time,
+    ////                  &last_filter, &pad_idx, name);
+    ////if (ret < 0)
+    ////    return ret;
+
+    ////ost->st->codecpar->codec_tag = MKTAG('a', 's', 's', 's');
+
+    if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
+        return ret;
+
+    return 0;
+}
+
 static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
 {
     char *pix_fmts;
@@ -594,7 +624,8 @@ static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter,
         int i;
 
         for (i=0; i<of->ctx->nb_streams; i++)
-            if (of->ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
+            if (of->ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ||
+                of->ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
                 break;
 
         if (i<of->ctx->nb_streams) {
@@ -628,6 +659,7 @@ static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter,
     switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
     case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
     case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
+    case AVMEDIA_TYPE_SUBTITLE: return configure_output_subtitle_filter(fg, ofilter, out);
     default: av_assert0(0); return 0;
     }
 }
@@ -652,46 +684,110 @@ static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
     AVFormatContext *avf = input_files[ist->file_index]->ctx;
     int i, w, h;
 
-    /* Compute the size of the canvas for the subtitles stream.
-       If the subtitles codecpar has set a size, use it. Otherwise use the
-       maximum dimensions of the video streams in the same file. */
+    ist->sub2video.is_active = 1;
+
     w = ifilter->width;
     h = ifilter->height;
     if (!(w && h)) {
-        for (i = 0; i < avf->nb_streams; i++) {
-            if (avf->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
-                w = FFMAX(w, avf->streams[i]->codecpar->width);
-                h = FFMAX(h, avf->streams[i]->codecpar->height);
-            }
-        }
-        if (!(w && h)) {
-            w = FFMAX(w, 720);
-            h = FFMAX(h, 576);
-        }
-        av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
+        w = ist->dec_ctx->width;
+        h = ist->dec_ctx->height;
     }
-    ist->sub2video.w = ifilter->width  = w;
-    ist->sub2video.h = ifilter->height = h;
 
-    ifilter->width  = ist->dec_ctx->width  ? ist->dec_ctx->width  : ist->sub2video.w;
-    ifilter->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
+    ist->sub2video.w = w;
+    ist->sub2video.h = h;
+    av_log(avf, AV_LOG_INFO, "sub2video: decoding size %dx%d\n", ist->sub2video.w, ist->sub2video.h);
 
-    /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
-       palettes for all rectangles are identical or compatible */
-    ifilter->format = AV_PIX_FMT_RGB32;
+    ifilter->width = w;
+    ifilter->height = h;
 
-    ist->sub2video.frame = av_frame_alloc();
-    if (!ist->sub2video.frame)
-        return AVERROR(ENOMEM);
     ist->sub2video.last_pts = INT64_MIN;
-    ist->sub2video.end_pts  = INT64_MIN;
+    ist->sub2video.end_pts  = 0;
+
+    return 0;
+}
+
+static int configure_input_subtitle_filter(FilterGraph *fg, InputFilter *ifilter,
+                                        AVFilterInOut *in)
+{
+    AVFilterContext *last_filter;
+    const AVFilter *buffer_filt = avfilter_get_by_name("sbuffer");
+    InputStream *ist = ifilter->ist;
+    InputFile     *f = input_files[ist->file_index];
+    AVBPrint args;
+    char name[255];
+    int ret, pad_idx = 0;
+    int64_t tsoffset = 0;
+    AVBufferSrcParameters *par = av_buffersrc_parameters_alloc();
+
+    if (!par)
+        return AVERROR(ENOMEM);
+    memset(par, 0, sizeof(*par));
+    par->format = AV_PIX_FMT_NONE;
+
+    if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
+        av_log(NULL, AV_LOG_ERROR, "Cannot connect subtitle filter to audio input\n");
+        ret = AVERROR(EINVAL);
+        goto fail;
+    }
+
+    if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
+        av_log(NULL, AV_LOG_ERROR, "Cannot connect subtitle filter to video input\n");
+        ret = AVERROR(EINVAL);
+        goto fail;
+    }
+
+    ret = sub2video_prepare(ist, ifilter);
+    if (ret < 0)
+        goto fail;
+
+    snprintf(name, sizeof(name), "graph %d subtitle input from stream %d:%d", fg->index,
+             ist->file_index, ist->st->index);
+
 
-    /* sub2video structure has been (re-)initialized.
-       Mark it as such so that the system will be
-       initialized with the first received heartbeat. */
-    ist->sub2video.initialize = 1;
+    av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
+    av_bprintf(&args,
+             "subtitle_type=%d:time_base=%d/%d:",
+             ifilter->format,
+             ist->st->time_base.num, ist->st->time_base.den);
+    if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
+                                            args.str, NULL, fg->graph)) < 0)
+        goto fail;
+
+    par->hw_frames_ctx = ifilter->hw_frames_ctx;
+    par->format = ifilter->format;
+
+    ret = av_buffersrc_parameters_set(ifilter->filter, par);
+    if (ret < 0)
+        goto fail;
+    av_freep(&par);
+    last_filter = ifilter->filter;
 
+    if (in->filter_ctx->input_pads[in->pad_idx].type == AVMEDIA_TYPE_VIDEO) {
+        ret = insert_filter(&last_filter, &pad_idx, "sub2video", NULL);
+        if (ret < 0)
+            return ret;
+    }
+
+    ////snprintf(name, sizeof(name), "trim_in_%d_%d",
+    ////         ist->file_index, ist->st->index);
+    ////if (copy_ts) {
+    ////    tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
+    ////    if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
+    ////        tsoffset += f->ctx->start_time;
+    ////}
+    ////ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
+    ////                  AV_NOPTS_VALUE : tsoffset, f->recording_time,
+    ////                  &last_filter, &pad_idx, name);
+    ////if (ret < 0)
+    ////    return ret;
+
+    if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
+        return ret;
     return 0;
+fail:
+    av_freep(&par);
+
+    return ret;
 }
 
 static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
@@ -709,8 +805,13 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
     char name[255];
     int ret, pad_idx = 0;
     int64_t tsoffset = 0;
-    AVBufferSrcParameters *par = av_buffersrc_parameters_alloc();
+    AVBufferSrcParameters *par;
 
+    if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
+        return configure_input_subtitle_filter(fg, ifilter, in);
+    }
+
+    par = av_buffersrc_parameters_alloc();
     if (!par)
         return AVERROR(ENOMEM);
     memset(par, 0, sizeof(*par));
@@ -725,12 +826,6 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
     if (!fr.num)
         fr = av_guess_frame_rate(input_files[ist->file_index]->ctx, ist->st, NULL);
 
-    if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
-        ret = sub2video_prepare(ist, ifilter);
-        if (ret < 0)
-            goto fail;
-    }
-
     sar = ifilter->sample_aspect_ratio;
     if(!sar.den)
         sar = (AVRational){0,1};
@@ -742,7 +837,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
              tb.num, tb.den, sar.num, sar.den);
     if (fr.num && fr.den)
         av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
-    snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
+    snprintf(name, sizeof(name), "graph %d video input from stream %d:%d", fg->index,
              ist->file_index, ist->st->index);
 
 
@@ -938,6 +1033,7 @@ static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
     switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
     case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
     case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
+    case AVMEDIA_TYPE_SUBTITLE: return configure_input_subtitle_filter(fg, ifilter, in);
     default: av_assert0(0); return 0;
     }
 }
@@ -1110,19 +1206,6 @@ int configure_filtergraph(FilterGraph *fg)
         }
     }
 
-    /* process queued up subtitle packets */
-    for (i = 0; i < fg->nb_inputs; i++) {
-        InputStream *ist = fg->inputs[i]->ist;
-        if (ist->sub2video.sub_queue && ist->sub2video.frame) {
-            while (av_fifo_size(ist->sub2video.sub_queue)) {
-                AVSubtitle tmp;
-                av_fifo_generic_read(ist->sub2video.sub_queue, &tmp, sizeof(tmp), NULL);
-                sub2video_update(ist, INT64_MIN, &tmp);
-                avsubtitle_free(&tmp);
-            }
-        }
-    }
-
     return 0;
 
 fail:
@@ -1143,6 +1226,7 @@ int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
     ifilter->sample_rate         = frame->sample_rate;
     ifilter->channels            = frame->channels;
     ifilter->channel_layout      = frame->channel_layout;
+    ifilter->type                = frame->type;
 
     if (frame->hw_frames_ctx) {
         ifilter->hw_frames_ctx = av_buffer_ref(frame->hw_frames_ctx);
diff --git a/fftools/ffmpeg_hw.c b/fftools/ffmpeg_hw.c
index 41aaf776d7..f8b49fd5ac 100644
--- a/fftools/ffmpeg_hw.c
+++ b/fftools/ffmpeg_hw.c
@@ -449,7 +449,7 @@ int hw_device_setup_for_encode(OutputStream *ost)
     AVBufferRef *frames_ref = NULL;
     int i;
 
-    if (ost->filter) {
+    if (ost->filter && ost->filter->filter) {
         frames_ref = av_buffersink_get_hw_frames_ctx(ost->filter->filter);
         if (frames_ref &&
             ((AVHWFramesContext*)frames_ref->data)->format ==
diff --git a/fftools/ffmpeg_opt.c b/fftools/ffmpeg_opt.c
index 428934a3d8..9776455cca 100644
--- a/fftools/ffmpeg_opt.c
+++ b/fftools/ffmpeg_opt.c
@@ -2144,8 +2144,9 @@ static void init_output_filter(OutputFilter *ofilter, OptionsContext *o,
     switch (ofilter->type) {
     case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc, -1); break;
     case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc, -1); break;
+    case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc, -1); break;
     default:
-        av_log(NULL, AV_LOG_FATAL, "Only video and audio filters are supported "
+        av_log(NULL, AV_LOG_FATAL, "Only video, audio and subtitle filters are supported "
                "currently.\n");
         exit_program(1);
     }
-- 
2.28.0.windows.1



More information about the ffmpeg-devel mailing list