[FFmpeg-devel] [PATCH V3] Patch to add interlaced HEVC decoding to HEVCDEC
Jose Santiago
jsantiago at haivision.com
Wed Oct 30 20:31:35 EET 2024
From d55b49766d5e0256685c4874568db002d3dd7889 Mon Sep 17 00:00:00 2001
From: Jose Santiago <jsantiago at haivision.com>
Date: Wed, 30 Oct 2024 12:28:35 -0500
Subject: [PATCH] [PATCH V3] Patch to add interlaced HEVC decoding to
HEVCDEC
---
libavcodec/hevc/hevcdec.c | 24 ++-
libavcodec/hevc/hevcdec.h | 13 ++
libavcodec/hevc/refs.c | 412 +++++++++++++++++++++++++++++++++++++-
libavcodec/hevc/sei.c | 16 +-
libavcodec/hevc/sei.h | 129 +++++++++++-
5 files changed, 569 insertions(+), 25 deletions(-)
diff --git a/libavcodec/hevc/hevcdec.c b/libavcodec/hevc/hevcdec.c
index 1ea8df0fa0..d7179bdcf7 100644
--- a/libavcodec/hevc/hevcdec.c
+++ b/libavcodec/hevc/hevcdec.c
@@ -359,7 +359,18 @@ static void export_stream_params(HEVCContext *s,
const HEVCSPS *sps)
avctx->profile = sps->ptl.general_ptl.profile_idc;
avctx->level = sps->ptl.general_ptl.level_idc;
- ff_set_sar(avctx, sps->vui.common.sar);
+ // There are some streams in the wild that were encode field pitcures
+ // and set double height aspect ratio so that some players that
do not
+ // support interlaced HEVC display the field pictures with
double height.
+ // Since we are now combining the field pictures into a single
interlaced
+ // frame, fix the sample aspect ratio to restore the correct
shape for the
+ // reconstructed interlaced frames.
+ if
(ff_hevc_sei_pict_struct_is_field_picture(s->sei.picture_timing.picture_struct)
&&
+ sps->vui.common.sar.num == 1 && sps->vui.common.sar.den ==
2) {
+ ff_set_sar(avctx, (AVRational){1, 1});
+ } else {
+ ff_set_sar(avctx, sps->vui.common.sar);
+ }
if (sps->vui.common.video_signal_type_present_flag)
avctx->color_range = sps->vui.common.video_full_range_flag ?
AVCOL_RANGE_JPEG
@@ -3821,6 +3832,7 @@ static int hevc_ref_frame(HEVCFrame *dst, const
HEVCFrame *src)
dst->rpl = ff_refstruct_ref(src->rpl);
dst->nb_rpl_elems = src->nb_rpl_elems;
+ dst->sei_pic_struct = src->sei_pic_struct;
dst->poc = src->poc;
dst->ctb_count = src->ctb_count;
dst->flags = src->flags;
@@ -3851,6 +3863,8 @@ static av_cold int hevc_decode_free(AVCodecContext
*avctx)
av_freep(&s->md5_ctx);
av_freep(&s->h274db);
+ ff_hevc_output_frame_construction_ctx_unref(s);
+
ff_container_fifo_free(&s->output_fifo);
for (int layer = 0; layer < FF_ARRAY_ELEMS(s->layers); layer++) {
@@ -3895,6 +3909,11 @@ static av_cold int
hevc_init_context(AVCodecContext *avctx)
s->local_ctx[0].logctx = avctx;
s->local_ctx[0].common_cabac_state = &s->cabac;
+ if (ff_hevc_output_frame_construction_ctx_alloc(s) != 0 ||
+ !s->output_frame_construction_ctx) {
+ return AVERROR(ENOMEM);
+ }
+
s->output_fifo = ff_container_fifo_alloc_avframe(0);
if (!s->output_fifo)
return AVERROR(ENOMEM);
@@ -3949,6 +3968,8 @@ static int
hevc_update_thread_context(AVCodecContext *dst,
}
}
+ ff_hevc_output_frame_construction_ctx_replace(s, s0);
+
for (int i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++)
ff_refstruct_replace(&s->ps.vps_list[i], s0->ps.vps_list[i]);
@@ -4012,6 +4033,7 @@ static int
hevc_update_thread_context(AVCodecContext *dst,
s->sei.common.content_light = s0->sei.common.content_light;
s->sei.common.aom_film_grain = s0->sei.common.aom_film_grain;
s->sei.tdrdi = s0->sei.tdrdi;
+ s->sei.picture_timing = s0->sei.picture_timing;
return 0;
}
diff --git a/libavcodec/hevc/hevcdec.h b/libavcodec/hevc/hevcdec.h
index 73b792c880..4ce764f287 100644
--- a/libavcodec/hevc/hevcdec.h
+++ b/libavcodec/hevc/hevcdec.h
@@ -369,6 +369,10 @@ typedef struct HEVCFrame {
int ctb_count;
int poc;
+ // SEI Picture Timing Picture Structure Type.
+ // HEVC_SEI_PicStructType.
+ int sei_pic_struct;
+
const HEVCPPS *pps; ///< RefStruct reference
RefPicListTab *rpl; ///< RefStruct reference
int nb_rpl_elems;
@@ -484,6 +488,8 @@ typedef struct HEVCLayerContext {
struct FFRefStructPool *rpl_tab_pool;
} HEVCLayerContext;
+struct HEVCOutputFrameConstructionContext;
+
typedef struct HEVCContext {
const AVClass *c; // needed by private avoptions
AVCodecContext *avctx;
@@ -502,6 +508,9 @@ typedef struct HEVCContext {
/** 1 if the independent slice segment header was successfully
parsed */
uint8_t slice_initialized;
+ // Interlaced Frame Construction Context.
+ struct HEVCOutputFrameConstructionContext
*output_frame_construction_ctx; ///< RefStruct reference
+
struct ContainerFifo *output_fifo;
HEVCParamSets ps;
@@ -664,6 +673,10 @@ static av_always_inline int
ff_hevc_nal_is_nonref(enum HEVCNALUnitType type)
return 0;
}
+int ff_hevc_output_frame_construction_ctx_alloc(HEVCContext *s);
+void ff_hevc_output_frame_construction_ctx_replace(HEVCContext *dst,
HEVCContext *src);
+void ff_hevc_output_frame_construction_ctx_unref(HEVCContext *s);
+
/**
* Find frames in the DPB that are ready for output and either write
them to the
* output FIFO or drop their output flag, depending on the value of
discard.
diff --git a/libavcodec/hevc/refs.c b/libavcodec/hevc/refs.c
index 6ba667e9f5..703fa763f9 100644
--- a/libavcodec/hevc/refs.c
+++ b/libavcodec/hevc/refs.c
@@ -20,9 +20,13 @@
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
*/
-
+
+#include "libavutil/avassert.h"
#include "libavutil/mem.h"
+#include "libavutil/pixdesc.h"
#include "libavutil/stereo3d.h"
+#include "libavutil/thread.h"
+#include "libavutil/timestamp.h"
#include "container_fifo.h"
#include "decode.h"
@@ -31,6 +35,99 @@
#include "progressframe.h"
#include "refstruct.h"
+typedef struct HEVCOutputFrameConstructionContext {
+ // Thread Data Access/Synchronization.
+ AVMutex mutex;
+
+ // Decoder Output Tracking.
+ uint64_t dpb_counter;
+ int dpb_poc;
+ uint64_t dpb_poc_ooorder_counter;
+
+ // Collect the First Field.
+ int have_first_field;
+ int first_field_poc;
+ int first_field_sei_pic_struct;
+ AVFrame *first_field;
+
+ uint64_t orphaned_field_pictures;
+
+ // Reconstructed Interlaced Frames From Field Pictures for Output.
+ AVFrame *constructed_frame;
+
+ // Output Frame Counter.
+ uint64_t output_counter;
+ int output_poc;
+ uint64_t output_poc_ooorder_counter;
+} HEVCOutputFrameConstructionContext;
+
+static void hevc_output_frame_construction_ctx_free(FFRefStructOpaque
opaque, void *obj)
+{
+ HEVCOutputFrameConstructionContext * ctx =
(HEVCOutputFrameConstructionContext *)obj;
+
+ if (!ctx)
+ return;
+
+ av_frame_free(&ctx->first_field);
+ av_frame_free(&ctx->constructed_frame);
+ av_assert0(ff_mutex_destroy(&ctx->mutex) == 0);
+}
+
+int ff_hevc_output_frame_construction_ctx_alloc(HEVCContext *s)
+{
+ if (s->output_frame_construction_ctx) {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "s->output_frame_construction_ctx is already set.\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ s->output_frame_construction_ctx =
+ ff_refstruct_alloc_ext(sizeof(*(s->output_frame_construction_ctx)),
+ 0, NULL,
hevc_output_frame_construction_ctx_free);
+ if (!s->output_frame_construction_ctx)
+ return AVERROR(ENOMEM);
+
+ av_assert0(ff_mutex_init(&s->output_frame_construction_ctx->mutex,
NULL) == 0);
+
+ return 0;
+}
+
+void ff_hevc_output_frame_construction_ctx_replace(HEVCContext *dst,
HEVCContext *src)
+{
+ ff_refstruct_replace(&dst->output_frame_construction_ctx,
+ src->output_frame_construction_ctx);
+}
+
+void ff_hevc_output_frame_construction_ctx_unref(HEVCContext *s)
+{
+ if (s->output_frame_construction_ctx &&
+ ff_refstruct_exclusive(s->output_frame_construction_ctx)) {
+
+ HEVCOutputFrameConstructionContext * ctx =
s->output_frame_construction_ctx;
+
+ ff_mutex_lock(&ctx->mutex);
+
+ if (ctx->dpb_counter) {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "[HEVCOutputFrameConstructionContext @ 0x%p]:\n"
+ " DPB: Counter=%" PRIu64 " POCOutOfOrder=%"
PRIu64 " Orphaned=%" PRIu64 "\n"
+ " Output: Counter=%" PRIu64 " POCOutOfOrder=%"
PRIu64 "\n"
+ "%s",
+ ctx,
+ ctx->dpb_counter,
+ ctx->dpb_poc_ooorder_counter,
+ ctx->orphaned_field_pictures,
+ ctx->output_counter,
+ ctx->output_poc_ooorder_counter,
+ "");
+ }
+
+ ff_mutex_unlock(&ctx->mutex);
+ }
+
+ ff_refstruct_unref(&s->output_frame_construction_ctx);
+}
+
void ff_hevc_unref_frame(HEVCFrame *frame, int flags)
{
frame->flags &= ~flags;
@@ -151,11 +248,15 @@ static HEVCFrame *alloc_frame(HEVCContext *s,
HEVCLayerContext *l)
for (j = 0; j < frame->ctb_count; j++)
frame->rpl_tab[j] = frame->rpl;
- if (s->sei.picture_timing.picture_struct ==
AV_PICTURE_STRUCTURE_TOP_FIELD)
- frame->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST;
- if ((s->sei.picture_timing.picture_struct ==
AV_PICTURE_STRUCTURE_TOP_FIELD) ||
- (s->sei.picture_timing.picture_struct ==
AV_PICTURE_STRUCTURE_BOTTOM_FIELD))
+ frame->sei_pic_struct = s->sei.picture_timing.picture_struct;
+ if (ff_hevc_sei_pic_struct_is_interlaced(frame->sei_pic_struct)) {
frame->f->flags |= AV_FRAME_FLAG_INTERLACED;
+ if (ff_hevc_sei_pic_struct_is_tff(frame->sei_pic_struct))
+ frame->f->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST;
+ if (frame->sei_pic_struct ==
HEVC_SEI_PIC_STRUCT_FRAME_TFBFTF ||
+ frame->sei_pic_struct == HEVC_SEI_PIC_STRUCT_FRAME_BFTFBF)
+ frame->f->repeat_pict = 1;
+ }
ret = ff_hwaccel_frame_priv_alloc(s->avctx,
&frame->hwaccel_picture_private);
if (ret < 0)
@@ -223,6 +324,81 @@ static void unref_missing_refs(HEVCLayerContext *l)
}
}
+static void copy_field2(AVFrame *_dst, const AVFrame *_src)
+{
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(_src->format);
+ int i, j, planes_nb = 0;
+ for (i = 0; i < desc->nb_components; i++)
+ planes_nb = FFMAX(planes_nb, desc->comp[i].plane + 1);
+ for (i = 0; i < planes_nb; i++) {
+ int h = _src->height;
+ uint8_t *dst = _dst->data[i] + (_dst->linesize[i] / 2);
+ uint8_t *src = _src->data[i];
+ if (i == 1 || i == 2) {
+ h = FF_CEIL_RSHIFT(_src->height, desc->log2_chroma_h);
+ }
+ for (j = 0; j < h; j++) {
+ memcpy(dst, src, _src->linesize[i]);
+ dst += _dst->linesize[i];
+ src += _src->linesize[i];
+ }
+ }
+}
+
+static int interlaced_frame_from_fields(AVFrame *dst,
+ const AVFrame *field1,
+ const AVFrame *field2)
+{
+ int i, ret = 0;
+
+ av_frame_unref(dst);
+
+ dst->format = field1->format;
+ dst->width = field1->width;
+ dst->height = field1->height * 2;
+ dst->nb_samples = field1->nb_samples;
+ ret = av_channel_layout_copy(&dst->ch_layout, &field1->ch_layout);
+ if (ret < 0)
+ return ret;
+
+ ret = av_frame_copy_props(dst, field1);
+ if (ret < 0)
+ return ret;
+ if (field1->duration > 0 && field1->duration != AV_NOPTS_VALUE)
+ dst->duration = field2->duration * 2;
+ else if (field2->duration > 0 && field2->duration != AV_NOPTS_VALUE)
+ dst->duration = field2->duration * 2;
+
+ for (i = 0; i < field2->nb_side_data; i++) {
+ const AVFrameSideData *sd_src = field2->side_data[i];
+ AVFrameSideData *sd_dst;
+ AVBufferRef *ref = av_buffer_ref(sd_src->buf);
+ sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
+ if (!sd_dst) {
+ av_buffer_unref(&ref);
+ return AVERROR(ENOMEM);
+ }
+ }
+
+ for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
+ dst->linesize[i] = field1->linesize[i]*2;
+
+ ret = av_frame_get_buffer(dst, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = av_frame_copy(dst, field1);
+ if (ret < 0)
+ av_frame_unref(dst);
+
+ copy_field2(dst, field2);
+
+ for (i = 0; i < AV_NUM_DATA_POINTERS; i++)
+ dst->linesize[i] = field1->linesize[i];
+
+ return ret;
+}
+
int ff_hevc_output_frames(HEVCContext *s,
unsigned layers_active_decode, unsigned
layers_active_output,
unsigned max_output, unsigned max_dpb, int
discard)
@@ -265,10 +441,232 @@ int ff_hevc_output_frames(HEVCContext *s,
AVFrame *f = frame->needs_fg ? frame->frame_grain : frame->f;
int output = !discard && (layers_active_output & (1 <<
min_layer));
+ if (frame->poc != s->poc) {
+ if (s->avctx->active_thread_type == FF_THREAD_FRAME)
+ {
+ // Wait for other thread to finish decoding this
frame/field picture.
+ // Otherwise I have seen image corruption for some
streams..
+ av_log(s->avctx, AV_LOG_DEBUG,
+ "Waiting on Frame POC: %d.\n",
+ frame->poc);
+ ff_progress_frame_await(&frame->tf, INT_MAX);
+ }
+ } else {
+ // This is the Context currently decoding..
+ // Skip it to ensure that this frame is completely
decoded and finalized.
+ // This will allow the next context to process it
+ // Otherwise I have seen image corruption for some
streams.
+ av_log(s->avctx, AV_LOG_DEBUG,
+ "Schedule Frame for Next Pass POC: %d.\n",
+ frame->poc);
+ return 0;
+ }
+
+ av_assert0(s->output_frame_construction_ctx);
+ av_assert0(ff_mutex_lock(&s->output_frame_construction_ctx->mutex) == 0);
+
if (output) {
- f->pkt_dts = s->pkt_dts;
- ret = ff_container_fifo_write(s->output_fifo, f);
+ const int dpb_poc = frame->poc;
+ const int dpb_sei_pic_struct = frame->sei_pic_struct;
+ AVFrame *output_frame = f;
+ int output_poc = dpb_poc;
+ int output_sei_pic_struct = dpb_sei_pic_struct;
+
+ s->output_frame_construction_ctx->dpb_counter++;
+ if (s->output_frame_construction_ctx->dpb_counter > 1 &&
+ dpb_poc <
s->output_frame_construction_ctx->dpb_poc &&
+ dpb_poc > 0) {
+ s->output_frame_construction_ctx->dpb_poc_ooorder_counter++;
+ av_log(s->avctx, AV_LOG_ERROR,
+ "DPB POC Out of Order POC %d < PrevPOC %d "
+ ": Counter=%" PRIu64 " OORCounter=%" PRIu64
".\n",
+ dpb_poc,
+ s->output_frame_construction_ctx->dpb_poc,
+ s->output_frame_construction_ctx->dpb_counter,
+ s->output_frame_construction_ctx->dpb_poc_ooorder_counter);
+ }
+ s->output_frame_construction_ctx->dpb_poc = dpb_poc;
+
+ if
(ff_hevc_sei_pict_struct_is_field_picture(dpb_sei_pic_struct)) {
+ const int have_first_field =
s->output_frame_construction_ctx->have_first_field;
+ const int is_first_field =
+ (ff_hevc_sei_pic_struct_is_tff(dpb_sei_pic_struct) &&
+ ff_hevc_sei_pic_struct_is_tf(dpb_sei_pic_struct)) ||
+ (ff_hevc_sei_pic_struct_is_bff(dpb_sei_pic_struct) &&
+ ff_hevc_sei_pic_struct_is_bf(dpb_sei_pic_struct)) ||
+ (!s->output_frame_construction_ctx->have_first_field &&
+ (dpb_poc % 2) == 0) ||
+ (s->output_frame_construction_ctx->have_first_field &&
+ s->output_frame_construction_ctx->first_field_sei_pic_struct ==
dpb_sei_pic_struct &&
+ (dpb_poc % 2) == 0 &&
+ dpb_poc >
s->output_frame_construction_ctx->first_field_poc);
+
+ output_frame = NULL;
+
+ if (!s->output_frame_construction_ctx->first_field)
+ {
+ s->output_frame_construction_ctx->first_field = av_frame_alloc();
+ if
(!s->output_frame_construction_ctx->first_field) {
+ av_log(s->avctx, AV_LOG_ERROR,
"AVERROR(ENOMEM)");
+ ret = AVERROR(ENOMEM);
+ goto unref_frame_and_check_ret;
+ }
+ }
+ if
(!s->output_frame_construction_ctx->constructed_frame) {
+ s->output_frame_construction_ctx->constructed_frame = av_frame_alloc();
+ if
(!s->output_frame_construction_ctx->constructed_frame) {
+ av_log(s->avctx, AV_LOG_ERROR,
"AVERROR(ENOMEM)");
+ ret = AVERROR(ENOMEM);
+ goto unref_frame_and_check_ret;
+ }
+ }
+
+ if (is_first_field) {
+ // This is a first field picture.
+ av_log(s->avctx, AV_LOG_DEBUG,
+ "Found first field picture POC %d.\n",
+ dpb_poc);
+ if
(s->output_frame_construction_ctx->have_first_field) {
+ // We were waiting for a second field, but
got another frist
+ // field instead.
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Discarded Orphaned First Field with
POC %d.\n",
+ s->output_frame_construction_ctx->first_field_poc);
+ }
+ s->output_frame_construction_ctx->have_first_field = 1;
+ s->output_frame_construction_ctx->first_field_sei_pic_struct =
dpb_sei_pic_struct;
+ s->output_frame_construction_ctx->first_field_poc = dpb_poc;
+ ret =
av_frame_ref(s->output_frame_construction_ctx->first_field, f);
+ if (ret < 0) {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Failure updating first Field
picture POC %d.\n",
+ dpb_poc);
+ s->output_frame_construction_ctx->have_first_field = 0;
+ s->output_frame_construction_ctx->orphaned_field_pictures++;
+ goto unref_frame_and_check_ret;
+ }
+ } else if (have_first_field) {
+ // We Found the next field.
+ if (f->width ==
s->output_frame_construction_ctx->first_field->width &&
+ f->height ==
s->output_frame_construction_ctx->first_field->height) {
+ // Combine the top and bottom fields into
one frame for output.
+ AVFrame *constructed_frame =
s->output_frame_construction_ctx->constructed_frame;
+ AVFrame *top_field;
+ AVFrame *bottom_field;
+ int tfPoc, bfPoc;
+ if
(ff_hevc_sei_pic_struct_is_tf(dpb_sei_pic_struct)) {
+ top_field = f;
+ tfPoc = dpb_poc;
+ bottom_field =
s->output_frame_construction_ctx->first_field;
+ bfPoc =
s->output_frame_construction_ctx->first_field_poc;
+ } else {
+ top_field =
s->output_frame_construction_ctx->first_field;
+ tfPoc =
s->output_frame_construction_ctx->first_field_poc;
+ bottom_field = f;
+ bfPoc = dpb_poc;
+ }
+ ret =
interlaced_frame_from_fields(constructed_frame, top_field, bottom_field);
+ if (ret >= 0) {
+ output_frame = constructed_frame;
+ output_poc =
s->output_frame_construction_ctx->first_field_poc;
+ output_sei_pic_struct =
s->output_frame_construction_ctx->first_field_sei_pic_struct;
+ output_frame->flags |=
AV_FRAME_FLAG_INTERLACED;
+ if
(!ff_hevc_sei_pic_struct_is_bf(output_sei_pic_struct)) {
+ output_frame->flags |=
AV_FRAME_FLAG_TOP_FIELD_FIRST;
+ } else {
+ output_frame->flags &=
~AV_FRAME_FLAG_TOP_FIELD_FIRST;
+ }
+ } else {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Interlaced Frame Construction
Failure POCs: %d %d.\n",
+ tfPoc, bfPoc);
+ s->output_frame_construction_ctx->orphaned_field_pictures += 2;
+ }
+ } else if ((dpb_poc % 2) == 0) {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Discarded orphaned first field
pictures POC: %d.\n",
+ s->output_frame_construction_ctx->first_field_poc);
+ s->output_frame_construction_ctx->orphaned_field_pictures++;
+ // This may be the next first field.
+ s->output_frame_construction_ctx->have_first_field = 0;
+ av_assert0(ff_mutex_unlock(&s->output_frame_construction_ctx->mutex)
== 0);
+ continue;
+ } else {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Discarded mismatched field pictures
POCs: %d %d.\n",
+ s->output_frame_construction_ctx->first_field_poc,
+ dpb_poc);
+ s->output_frame_construction_ctx->orphaned_field_pictures++;
+ }
+ // Find the next first field.
+ s->output_frame_construction_ctx->have_first_field = 0;
+ } else {
+ // We have a second field without a first field.
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Discarded orphaned second field picture
with POC %d.\n",
+ dpb_poc);
+ s->output_frame_construction_ctx->orphaned_field_pictures++;
+ }
+ } else if
(s->output_frame_construction_ctx->have_first_field) {
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Discarded orphaned first field pictures
POC: %d.\n",
+ s->output_frame_construction_ctx->first_field_poc);
+ s->output_frame_construction_ctx->orphaned_field_pictures++;
+ // Find the next first field.
+ s->output_frame_construction_ctx->have_first_field = 0;
+ }
+
+ if (output_frame) {
+ output_frame->pkt_dts = s->pkt_dts;
+
+ //av_log(s->avctx, AV_LOG_ERROR,
+ av_log(s->avctx, AV_LOG_DEBUG,
+ "s=0x%" PRIx64 " s->avctx=0x%" PRIx64 "\n"
+ " ====Output: FrameType:%s\n"
+ " === POC=%d PKTDTS=%s PTS=%s Duration=%s\n"
+ " === SEIPic=%d Interlaced=%s TFF=%s
PictType='%c' Key=%s\n"
+ " === WxH=%dx%d SAR=%dx%d\n"
+ "%s",
+ (uint64_t)s, (uint64_t)s->avctx,
+ (output_frame->flags &
AV_FRAME_FLAG_INTERLACED) ? "Interlaced" : "Progressive",
+ output_poc,
+ av_ts2str(output_frame->pkt_dts),
+ av_ts2str(output_frame->pts),
+ av_ts2str(output_frame->duration),
+ output_sei_pic_struct,
+ (output_frame->flags &
AV_FRAME_FLAG_INTERLACED) ? "Yes" : "No",
+ (output_frame->flags &
AV_FRAME_FLAG_TOP_FIELD_FIRST) ? "Yes" : "No",
+ av_get_picture_type_char(output_frame->pict_type),
+ (output_frame->flags & AV_FRAME_FLAG_KEY) ?
"Yes" : "No",
+ output_frame->width, output_frame->height,
+ (int)output_frame->sample_aspect_ratio.num,
+ (int)output_frame->sample_aspect_ratio.den,
+ "");
+
+ s->output_frame_construction_ctx->output_counter++;
+ if (output_poc != dpb_poc &&
+ s->output_frame_construction_ctx->output_counter > 1 &&
+ output_poc <
s->output_frame_construction_ctx->output_poc &&
+ output_poc > 0) {
+ s->output_frame_construction_ctx->output_poc_ooorder_counter++;
+ av_log(s->avctx, AV_LOG_ERROR,
+ "Output POC Out of Order POC %d <
PrevPOC %d "
+ ": Counter=%" PRIu64 " OORCounter=%"
PRIu64 ".\n",
+ output_poc,
+ s->output_frame_construction_ctx->output_poc,
+ s->output_frame_construction_ctx->output_counter,
+ s->output_frame_construction_ctx->output_poc_ooorder_counter);
+ }
+ s->output_frame_construction_ctx->output_poc = output_poc;
+
+ ret = ff_container_fifo_write(s->output_fifo,
output_frame);
+ }
}
+
+unref_frame_and_check_ret:
+
+ av_assert0(ff_mutex_unlock(&s->output_frame_construction_ctx->mutex)
== 0);
+
ff_hevc_unref_frame(frame, HEVC_FRAME_FLAG_OUTPUT);
if (ret < 0)
return ret;
diff --git a/libavcodec/hevc/sei.c b/libavcodec/hevc/sei.c
index e11a33773c..50b669c34b 100644
--- a/libavcodec/hevc/sei.c
+++ b/libavcodec/hevc/sei.c
@@ -59,21 +59,7 @@ static int decode_nal_sei_pic_timing(HEVCSEI *s,
GetBitContext *gb,
return AVERROR_INVALIDDATA;
if (sps->vui.frame_field_info_present_flag) {
- int pic_struct = get_bits(gb, 4);
- h->picture_struct = AV_PICTURE_STRUCTURE_UNKNOWN;
- if (pic_struct == 2 || pic_struct == 10 || pic_struct == 12) {
- av_log(logctx, AV_LOG_DEBUG, "BOTTOM Field\n");
- h->picture_struct = AV_PICTURE_STRUCTURE_BOTTOM_FIELD;
- } else if (pic_struct == 1 || pic_struct == 9 || pic_struct ==
11) {
- av_log(logctx, AV_LOG_DEBUG, "TOP Field\n");
- h->picture_struct = AV_PICTURE_STRUCTURE_TOP_FIELD;
- } else if (pic_struct == 7) {
- av_log(logctx, AV_LOG_DEBUG, "Frame/Field Doubling\n");
- h->picture_struct = HEVC_SEI_PIC_STRUCT_FRAME_DOUBLING;
- } else if (pic_struct == 8) {
- av_log(logctx, AV_LOG_DEBUG, "Frame/Field Tripling\n");
- h->picture_struct = HEVC_SEI_PIC_STRUCT_FRAME_TRIPLING;
- }
+ h->picture_struct = get_bits(gb, 4);
}
return 0;
diff --git a/libavcodec/hevc/sei.h b/libavcodec/hevc/sei.h
index 806540fac6..aff1f5fc1d 100644
--- a/libavcodec/hevc/sei.h
+++ b/libavcodec/hevc/sei.h
@@ -33,10 +33,135 @@
typedef enum {
- HEVC_SEI_PIC_STRUCT_FRAME_DOUBLING = 7,
- HEVC_SEI_PIC_STRUCT_FRAME_TRIPLING = 8
+ // SEI Picture Timing Picture Structure.
+ // From the ITU=T H.265 Standards Document v3 (04/2015):
+ // Table D.2: Interpretation of pic_struct:
+ // When present, pic_struct is constrained to use one of the
following:
+ // - all pictures in CSV are one of: 0, 7 or 8.
+ // - all pictures in CSV are one of: 1, 2, 9, 10, 11, or 12..
+ // - all pictures in CSV are one of: 3, 4, 5 or 6...
+
+ // progressive frame.
+ HEVC_SEI_PIC_STRUCT_FRAME_PROGRESSIVE = 0,
+
+ // top field.
+ HEVC_SEI_PIC_STRUCT_FIELD_TOP = 1,
+ // bottom field.
+ HEVC_SEI_PIC_STRUCT_FIELD_BOTTOM = 2,
+
+ // top field, bottom field, in that order. Top Field First.
+ HEVC_SEI_PIC_STRUCT_FRAME_TFBF = 3,
+ // bottom Field, top field, in that order. Bottom Field First.
+ HEVC_SEI_PIC_STRUCT_FRAME_BFTF = 4,
+
+ // top field, bottom field, top field repeated, Top Field First.
+ HEVC_SEI_PIC_STRUCT_FRAME_TFBFTF = 5,
+ // bottom field, top field, bottom field repeated, Bottom Field First.
+ HEVC_SEI_PIC_STRUCT_FRAME_BFTFBF = 6,
+
+ // frame doubling.
+ HEVC_SEI_PIC_STRUCT_FRAME_DOUBLING = 7,
+ // frame trippling.
+ HEVC_SEI_PIC_STRUCT_FRAME_TRIPLING = 8,
+
+ // top field paired with previous bottom field. Bottom Field First.
+ HEVC_SEI_PIC_STRUCT_FIELD_TFPBF = 9,
+ // bottom field paired with previous top field. Top Field First.
+ HEVC_SEI_PIC_STRUCT_FIELD_BFPTF = 10,
+
+ // top field paired with next bottom field. Top Field First.
+ HEVC_SEI_PIC_STRUCT_FIELD_TFNBF = 11,
+ // bottom field paired with next top field. Bottom Field First.
+ HEVC_SEI_PIC_STRUCT_FIELD_BFNTF = 12,
} HEVC_SEI_PicStructType;
+// Returns 1 - when type is interlaced, 0 - otherwise.
+static inline int
ff_hevc_sei_pic_struct_is_interlaced(HEVC_SEI_PicStructType type)
+{
+ switch (type) {
+ case HEVC_SEI_PIC_STRUCT_FIELD_TOP:
+ case HEVC_SEI_PIC_STRUCT_FIELD_BOTTOM:
+ case HEVC_SEI_PIC_STRUCT_FRAME_TFBF:
+ case HEVC_SEI_PIC_STRUCT_FRAME_BFTF:
+ case HEVC_SEI_PIC_STRUCT_FRAME_TFBFTF:
+ case HEVC_SEI_PIC_STRUCT_FRAME_BFTFBF:
+ case HEVC_SEI_PIC_STRUCT_FIELD_TFPBF:
+ case HEVC_SEI_PIC_STRUCT_FIELD_BFPTF:
+ case HEVC_SEI_PIC_STRUCT_FIELD_TFNBF:
+ case HEVC_SEI_PIC_STRUCT_FIELD_BFNTF:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+// Returns 1 - when type is top field first, 0 - otherwise.
+static inline int ff_hevc_sei_pic_struct_is_tff(HEVC_SEI_PicStructType
type)
+{
+ switch (type) {
+ case HEVC_SEI_PIC_STRUCT_FRAME_TFBF:
+ case HEVC_SEI_PIC_STRUCT_FRAME_TFBFTF:
+ case HEVC_SEI_PIC_STRUCT_FIELD_BFPTF:
+ case HEVC_SEI_PIC_STRUCT_FIELD_TFNBF:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+// Returns 1 - when type is bottom field first, 0 - otherwise.
+static inline int ff_hevc_sei_pic_struct_is_bff(HEVC_SEI_PicStructType
type)
+{
+ switch (type) {
+ case HEVC_SEI_PIC_STRUCT_FRAME_BFTF:
+ case HEVC_SEI_PIC_STRUCT_FRAME_BFTFBF:
+ case HEVC_SEI_PIC_STRUCT_FIELD_TFPBF:
+ case HEVC_SEI_PIC_STRUCT_FIELD_BFNTF:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+// Returns 1 - when type is top field, 0 - otherwise.
+static inline int ff_hevc_sei_pic_struct_is_tf(HEVC_SEI_PicStructType
type)
+{
+ switch (type) {
+ case HEVC_SEI_PIC_STRUCT_FIELD_TOP:
+ case HEVC_SEI_PIC_STRUCT_FIELD_TFPBF:
+ case HEVC_SEI_PIC_STRUCT_FIELD_TFNBF:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+// Returns 1 - when type is bottom field, 0 - otherwise.
+static inline int ff_hevc_sei_pic_struct_is_bf(HEVC_SEI_PicStructType
type)
+{
+ switch (type) {
+ case HEVC_SEI_PIC_STRUCT_FIELD_BOTTOM:
+ case HEVC_SEI_PIC_STRUCT_FIELD_BFPTF:
+ case HEVC_SEI_PIC_STRUCT_FIELD_BFNTF:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+// Returns 1 - when type is a field picture, 0 - otherwise.
+static inline int
ff_hevc_sei_pict_struct_is_field_picture(HEVC_SEI_PicStructType type)
+{
+ return (ff_hevc_sei_pic_struct_is_tf(type) ||
ff_hevc_sei_pic_struct_is_bf(type)) ? 1 : 0;
+}
+
+// Returns 1 - when type is a frame picture, 0 - otherwise.
+static inline int
ff_hevc_sei_pict_struct_is_frame_picture(HEVC_SEI_PicStructType type)
+{
+ return ff_hevc_sei_pict_struct_is_field_picture(type) ? 0 : 1;
+}
+
+
typedef struct HEVCSEIPictureHash {
uint8_t md5[3][16];
uint8_t is_md5;
--
2.46.1
Jose Santiago
Senior Architect
847-362-6800 ext 7411
More information about the ffmpeg-devel
mailing list