[FFmpeg-cvslog] Merge commit 'c8dcff0cdb17d0aa03ac729eba12d1a20f1f59c8'
Clément Bœsch
git at videolan.org
Sun Jun 12 19:03:48 CEST 2016
ffmpeg | branch: master | Clément Bœsch <u at pkh.me> | Sun Jun 12 16:06:58 2016 +0200| [bd3fd467febe92300e0ebf8ff13c193f9236479a] | committer: Clément Bœsch
Merge commit 'c8dcff0cdb17d0aa03ac729eba12d1a20f1f59c8'
* commit 'c8dcff0cdb17d0aa03ac729eba12d1a20f1f59c8':
h264: factor out calculating the POC count into a separate file
Merged-by: Clément Bœsch <u at pkh.me>
> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=bd3fd467febe92300e0ebf8ff13c193f9236479a
---
libavcodec/dxva2_h264.c | 2 +-
libavcodec/h264.c | 103 ++++++---------------------------------------
libavcodec/h264.h | 13 +-----
libavcodec/h264_parse.c | 80 +++++++++++++++++++++++++++++++++++
libavcodec/h264_parse.h | 17 ++++++++
libavcodec/h264_parser.c | 34 ++++++++-------
libavcodec/h264_picture.c | 8 ++--
libavcodec/h264_refs.c | 2 +-
libavcodec/h264_slice.c | 66 ++++++++++++++---------------
libavcodec/vaapi_h264.c | 2 +-
libavcodec/vdpau.c | 2 +-
libavcodec/vdpau_h264.c | 2 +-
12 files changed, 170 insertions(+), 161 deletions(-)
diff --git a/libavcodec/dxva2_h264.c b/libavcodec/dxva2_h264.c
index bd1fa1e..a6aa0f6 100644
--- a/libavcodec/dxva2_h264.c
+++ b/libavcodec/dxva2_h264.c
@@ -145,7 +145,7 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *
pp->num_ref_idx_l0_active_minus1 = pps->ref_count[0] - 1;
pp->num_ref_idx_l1_active_minus1 = pps->ref_count[1] - 1;
pp->Reserved8BitsA = 0;
- pp->frame_num = h->frame_num;
+ pp->frame_num = h->poc.frame_num;
pp->log2_max_frame_num_minus4 = sps->log2_max_frame_num - 4;
pp->pic_order_cnt_type = sps->poc_type;
if (sps->poc_type == 0)
diff --git a/libavcodec/h264.c b/libavcodec/h264.c
index 0de6d91..367f6bf 100644
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@ -429,11 +429,11 @@ static int h264_init_context(AVCodecContext *avctx, H264Context *h)
h->slice_context_count = 1;
h->workaround_bugs = avctx->workaround_bugs;
h->flags = avctx->flags;
- h->prev_poc_msb = 1 << 16;
+ h->poc.prev_poc_msb = 1 << 16;
h->x264_build = -1;
h->recovery_frame = -1;
h->frame_recovered = 0;
- h->prev_frame_num = -1;
+ h->poc.prev_frame_num = -1;
h->sei_fpa.frame_packing_arrangement_cancel_flag = -1;
h->next_outputed_poc = INT_MIN;
@@ -831,10 +831,10 @@ static void idr(H264Context *h)
{
int i;
ff_h264_remove_all_refs(h);
- h->prev_frame_num =
- h->prev_frame_num_offset = 0;
- h->prev_poc_msb = 1<<16;
- h->prev_poc_lsb = 0;
+ h->poc.prev_frame_num =
+ h->poc.prev_frame_num_offset = 0;
+ h->poc.prev_poc_msb = 1<<16;
+ h->poc.prev_poc_lsb = 0;
for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
h->last_pocs[i] = INT_MIN;
}
@@ -848,7 +848,7 @@ void ff_h264_flush_change(H264Context *h)
h->prev_interlaced_frame = 1;
idr(h);
- h->prev_frame_num = -1;
+ h->poc.prev_frame_num = -1;
if (h->cur_pic_ptr) {
h->cur_pic_ptr->reference = 0;
for (j=i=0; h->delayed_pic[i]; i++)
@@ -889,85 +889,6 @@ static void flush_dpb(AVCodecContext *avctx)
h->context_initialized = 0;
}
-int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
-{
- const SPS *sps = h->ps.sps;
- const int max_frame_num = 1 << sps->log2_max_frame_num;
- int field_poc[2];
-
- h->frame_num_offset = h->prev_frame_num_offset;
- if (h->frame_num < h->prev_frame_num)
- h->frame_num_offset += max_frame_num;
-
- if (sps->poc_type == 0) {
- const int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
-
- if (h->poc_lsb < h->prev_poc_lsb &&
- h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
- h->poc_msb = h->prev_poc_msb + max_poc_lsb;
- else if (h->poc_lsb > h->prev_poc_lsb &&
- h->prev_poc_lsb - h->poc_lsb < -max_poc_lsb / 2)
- h->poc_msb = h->prev_poc_msb - max_poc_lsb;
- else
- h->poc_msb = h->prev_poc_msb;
- field_poc[0] =
- field_poc[1] = h->poc_msb + h->poc_lsb;
- if (h->picture_structure == PICT_FRAME)
- field_poc[1] += h->delta_poc_bottom;
- } else if (sps->poc_type == 1) {
- int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
- int i;
-
- if (sps->poc_cycle_length != 0)
- abs_frame_num = h->frame_num_offset + h->frame_num;
- else
- abs_frame_num = 0;
-
- if (h->nal_ref_idc == 0 && abs_frame_num > 0)
- abs_frame_num--;
-
- expected_delta_per_poc_cycle = 0;
- for (i = 0; i < sps->poc_cycle_length; i++)
- // FIXME integrate during sps parse
- expected_delta_per_poc_cycle += sps->offset_for_ref_frame[i];
-
- if (abs_frame_num > 0) {
- int poc_cycle_cnt = (abs_frame_num - 1) / sps->poc_cycle_length;
- int frame_num_in_poc_cycle = (abs_frame_num - 1) % sps->poc_cycle_length;
-
- expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
- for (i = 0; i <= frame_num_in_poc_cycle; i++)
- expectedpoc = expectedpoc + sps->offset_for_ref_frame[i];
- } else
- expectedpoc = 0;
-
- if (h->nal_ref_idc == 0)
- expectedpoc = expectedpoc + sps->offset_for_non_ref_pic;
-
- field_poc[0] = expectedpoc + h->delta_poc[0];
- field_poc[1] = field_poc[0] + sps->offset_for_top_to_bottom_field;
-
- if (h->picture_structure == PICT_FRAME)
- field_poc[1] += h->delta_poc[1];
- } else {
- int poc = 2 * (h->frame_num_offset + h->frame_num);
-
- if (!h->nal_ref_idc)
- poc--;
-
- field_poc[0] = poc;
- field_poc[1] = poc;
- }
-
- if (h->picture_structure != PICT_BOTTOM_FIELD)
- pic_field_poc[0] = field_poc[0];
- if (h->picture_structure != PICT_TOP_FIELD)
- pic_field_poc[1] = field_poc[1];
- *pic_poc = FFMIN(pic_field_poc[0], pic_field_poc[1]);
-
- return 0;
-}
-
/**
* Compute profile from profile_idc and constraint_set?_flags.
*
@@ -1148,22 +1069,22 @@ again:
break;
if (h->sei_recovery_frame_cnt >= 0) {
- if (h->frame_num != h->sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
+ if (h->poc.frame_num != h->sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
h->valid_recovery_point = 1;
if ( h->recovery_frame < 0
- || av_mod_uintp2(h->recovery_frame - h->frame_num, h->ps.sps->log2_max_frame_num) > h->sei_recovery_frame_cnt) {
- h->recovery_frame = av_mod_uintp2(h->frame_num + h->sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
+ || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > h->sei_recovery_frame_cnt) {
+ h->recovery_frame = av_mod_uintp2(h->poc.frame_num + h->sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
if (!h->valid_recovery_point)
- h->recovery_frame = h->frame_num;
+ h->recovery_frame = h->poc.frame_num;
}
}
h->cur_pic_ptr->f->key_frame |= (nal->type == NAL_IDR_SLICE);
if (nal->type == NAL_IDR_SLICE ||
- (h->recovery_frame == h->frame_num && nal->ref_idc)) {
+ (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
h->recovery_frame = -1;
h->cur_pic_ptr->recovered = 1;
}
diff --git a/libavcodec/h264.h b/libavcodec/h264.h
index 99262ae..da8a92f 100644
--- a/libavcodec/h264.h
+++ b/libavcodec/h264.h
@@ -648,17 +648,7 @@ typedef struct H264Context {
uint16_t *slice_table_base;
- // POC stuff
- int poc_lsb;
- int poc_msb;
- int delta_poc_bottom;
- int delta_poc[2];
- int frame_num;
- int prev_poc_msb; ///< poc_msb of the last reference pic for POC type 0
- int prev_poc_lsb; ///< poc_lsb of the last reference pic for POC type 0
- int frame_num_offset; ///< for POC type 2
- int prev_frame_num_offset; ///< for POC type 2
- int prev_frame_num; ///< frame_num of the last pic for POC type 1/2
+ H264POCContext poc;
/**
* frame_num for frames or 2 * frame_num + 1 for field pics.
@@ -1188,7 +1178,6 @@ void ff_h264_unref_picture(H264Context *h, H264Picture *pic);
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl);
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height);
-int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc);
int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl);
#define SLICE_SINGLETHREAD 1
diff --git a/libavcodec/h264_parse.c b/libavcodec/h264_parse.c
index 4f125b5..d99d4ae 100644
--- a/libavcodec/h264_parse.c
+++ b/libavcodec/h264_parse.c
@@ -239,3 +239,83 @@ fail:
ref_count[1] = 0;
return AVERROR_INVALIDDATA;
}
+
+int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc,
+ const SPS *sps, H264POCContext *pc,
+ int picture_structure, int nal_ref_idc)
+{
+ const int max_frame_num = 1 << sps->log2_max_frame_num;
+ int field_poc[2];
+
+ pc->frame_num_offset = pc->prev_frame_num_offset;
+ if (pc->frame_num < pc->prev_frame_num)
+ pc->frame_num_offset += max_frame_num;
+
+ if (sps->poc_type == 0) {
+ const int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
+
+ if (pc->poc_lsb < pc->prev_poc_lsb &&
+ pc->prev_poc_lsb - pc->poc_lsb >= max_poc_lsb / 2)
+ pc->poc_msb = pc->prev_poc_msb + max_poc_lsb;
+ else if (pc->poc_lsb > pc->prev_poc_lsb &&
+ pc->prev_poc_lsb - pc->poc_lsb < -max_poc_lsb / 2)
+ pc->poc_msb = pc->prev_poc_msb - max_poc_lsb;
+ else
+ pc->poc_msb = pc->prev_poc_msb;
+ field_poc[0] =
+ field_poc[1] = pc->poc_msb + pc->poc_lsb;
+ if (picture_structure == PICT_FRAME)
+ field_poc[1] += pc->delta_poc_bottom;
+ } else if (sps->poc_type == 1) {
+ int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
+ int i;
+
+ if (sps->poc_cycle_length != 0)
+ abs_frame_num = pc->frame_num_offset + pc->frame_num;
+ else
+ abs_frame_num = 0;
+
+ if (nal_ref_idc == 0 && abs_frame_num > 0)
+ abs_frame_num--;
+
+ expected_delta_per_poc_cycle = 0;
+ for (i = 0; i < sps->poc_cycle_length; i++)
+ // FIXME integrate during sps parse
+ expected_delta_per_poc_cycle += sps->offset_for_ref_frame[i];
+
+ if (abs_frame_num > 0) {
+ int poc_cycle_cnt = (abs_frame_num - 1) / sps->poc_cycle_length;
+ int frame_num_in_poc_cycle = (abs_frame_num - 1) % sps->poc_cycle_length;
+
+ expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
+ for (i = 0; i <= frame_num_in_poc_cycle; i++)
+ expectedpoc = expectedpoc + sps->offset_for_ref_frame[i];
+ } else
+ expectedpoc = 0;
+
+ if (nal_ref_idc == 0)
+ expectedpoc = expectedpoc + sps->offset_for_non_ref_pic;
+
+ field_poc[0] = expectedpoc + pc->delta_poc[0];
+ field_poc[1] = field_poc[0] + sps->offset_for_top_to_bottom_field;
+
+ if (picture_structure == PICT_FRAME)
+ field_poc[1] += pc->delta_poc[1];
+ } else {
+ int poc = 2 * (pc->frame_num_offset + pc->frame_num);
+
+ if (!nal_ref_idc)
+ poc--;
+
+ field_poc[0] = poc;
+ field_poc[1] = poc;
+ }
+
+ if (picture_structure != PICT_BOTTOM_FIELD)
+ pic_field_poc[0] = field_poc[0];
+ if (picture_structure != PICT_TOP_FIELD)
+ pic_field_poc[1] = field_poc[1];
+ *pic_poc = FFMIN(pic_field_poc[0], pic_field_poc[1]);
+
+ return 0;
+}
diff --git a/libavcodec/h264_parse.h b/libavcodec/h264_parse.h
index 0fac629..413f04d 100644
--- a/libavcodec/h264_parse.h
+++ b/libavcodec/h264_parse.h
@@ -39,6 +39,19 @@ typedef struct H264PredWeightTable {
int implicit_weight[48][48][2];
} H264PredWeightTable;
+typedef struct H264POCContext {
+ int poc_lsb;
+ int poc_msb;
+ int delta_poc_bottom;
+ int delta_poc[2];
+ int frame_num;
+ int prev_poc_msb; ///< poc_msb of the last reference pic for POC type 0
+ int prev_poc_lsb; ///< poc_lsb of the last reference pic for POC type 0
+ int frame_num_offset; ///< for POC type 2
+ int prev_frame_num_offset; ///< for POC type 2
+ int prev_frame_num; ///< frame_num of the last pic for POC type 1/2
+} H264POCContext;
+
struct SPS;
struct PPS;
@@ -65,4 +78,8 @@ int ff_h264_parse_ref_count(int *plist_count, int ref_count[2],
GetBitContext *gb, const struct PPS *pps,
int slice_type_nos, int picture_structure, void *logctx);
+int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc,
+ const struct SPS *sps, H264POCContext *poc,
+ int picture_structure, int nal_ref_idc);
+
#endif /* AVCODEC_H264_PARSE_H */
diff --git a/libavcodec/h264_parser.c b/libavcodec/h264_parser.c
index 51ca9f2..e0977ea 100644
--- a/libavcodec/h264_parser.c
+++ b/libavcodec/h264_parser.c
@@ -49,6 +49,7 @@ typedef struct H264ParseContext {
ParseContext pc;
H264ParamSets ps;
H264DSPContext h264dsp;
+ H264POCContext poc;
int got_first;
} H264ParseContext;
@@ -327,10 +328,10 @@ static inline int parse_nal_units(AVCodecParserContext *s,
case NAL_IDR_SLICE:
s->key_frame = 1;
- h->prev_frame_num = 0;
- h->prev_frame_num_offset = 0;
- h->prev_poc_msb =
- h->prev_poc_lsb = 0;
+ p->poc.prev_frame_num = 0;
+ p->poc.prev_frame_num_offset = 0;
+ p->poc.prev_poc_msb =
+ p->poc.prev_poc_lsb = 0;
/* fall through */
case NAL_SLICE:
get_ue_golomb_long(&nal.gb); // skip first_mb_in_slice
@@ -367,7 +368,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
if (h->ps.sps->ref_frame_count <= 1 && h->ps.pps->ref_count[0] <= 1 && s->pict_type == AV_PICTURE_TYPE_I)
s->key_frame = 1;
- h->frame_num = get_bits(&nal.gb, sps->log2_max_frame_num);
+ p->poc.frame_num = get_bits(&nal.gb, sps->log2_max_frame_num);
s->coded_width = 16 * sps->mb_width;
s->coded_height = 16 * sps->mb_height;
@@ -414,26 +415,27 @@ static inline int parse_nal_units(AVCodecParserContext *s,
if (h->nal_unit_type == NAL_IDR_SLICE)
get_ue_golomb_long(&nal.gb); /* idr_pic_id */
if (sps->poc_type == 0) {
- h->poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb);
+ p->poc.poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb);
if (p->ps.pps->pic_order_present == 1 &&
h->picture_structure == PICT_FRAME)
- h->delta_poc_bottom = get_se_golomb(&nal.gb);
+ p->poc.delta_poc_bottom = get_se_golomb(&nal.gb);
}
if (sps->poc_type == 1 &&
!sps->delta_pic_order_always_zero_flag) {
- h->delta_poc[0] = get_se_golomb(&nal.gb);
+ p->poc.delta_poc[0] = get_se_golomb(&nal.gb);
if (p->ps.pps->pic_order_present == 1 &&
h->picture_structure == PICT_FRAME)
- h->delta_poc[1] = get_se_golomb(&nal.gb);
+ p->poc.delta_poc[1] = get_se_golomb(&nal.gb);
}
/* Decode POC of this picture.
* The prev_ values needed for decoding POC of the next picture are not set here. */
field_poc[0] = field_poc[1] = INT_MAX;
- ff_init_poc(h, field_poc, &s->output_picture_number);
+ ff_h264_init_poc(field_poc, &s->output_picture_number, sps,
+ &p->poc, h->picture_structure, nal.ref_idc);
/* Continue parsing to check if MMCO_RESET is present.
* FIXME: MMCO_RESET could appear in non-first slice.
@@ -446,15 +448,15 @@ static inline int parse_nal_units(AVCodecParserContext *s,
}
/* Set up the prev_ values for decoding POC of the next picture. */
- h->prev_frame_num = got_reset ? 0 : h->frame_num;
- h->prev_frame_num_offset = got_reset ? 0 : h->frame_num_offset;
+ p->poc.prev_frame_num = got_reset ? 0 : p->poc.frame_num;
+ p->poc.prev_frame_num_offset = got_reset ? 0 : p->poc.frame_num_offset;
if (h->nal_ref_idc != 0) {
if (!got_reset) {
- h->prev_poc_msb = h->poc_msb;
- h->prev_poc_lsb = h->poc_lsb;
+ p->poc.prev_poc_msb = p->poc.poc_msb;
+ p->poc.prev_poc_lsb = p->poc.poc_lsb;
} else {
- h->prev_poc_msb = 0;
- h->prev_poc_lsb =
+ p->poc.prev_poc_msb = 0;
+ p->poc.prev_poc_lsb =
h->picture_structure == PICT_BOTTOM_FIELD ? 0 : field_poc[0];
}
}
diff --git a/libavcodec/h264_picture.c b/libavcodec/h264_picture.c
index c4b17c0..e2e324b 100644
--- a/libavcodec/h264_picture.c
+++ b/libavcodec/h264_picture.c
@@ -166,11 +166,11 @@ int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) {
if (!h->droppable) {
err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index);
- h->prev_poc_msb = h->poc_msb;
- h->prev_poc_lsb = h->poc_lsb;
+ h->poc.prev_poc_msb = h->poc.poc_msb;
+ h->poc.prev_poc_lsb = h->poc.poc_lsb;
}
- h->prev_frame_num_offset = h->frame_num_offset;
- h->prev_frame_num = h->frame_num;
+ h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
+ h->poc.prev_frame_num = h->poc.frame_num;
}
if (avctx->hwaccel) {
diff --git a/libavcodec/h264_refs.c b/libavcodec/h264_refs.c
index 02c7867..2b90168 100644
--- a/libavcodec/h264_refs.c
+++ b/libavcodec/h264_refs.c
@@ -725,7 +725,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
for (j = 0; j < 16; j++) {
remove_long(h, j, 0);
}
- h->frame_num = h->cur_pic_ptr->frame_num = 0;
+ h->poc.frame_num = h->cur_pic_ptr->frame_num = 0;
h->mmco_reset = 1;
h->cur_pic_ptr->mmco_reset = 1;
for (j = 0; j < MAX_DELAYED_PIC_COUNT; j++)
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
index 394a0c4..6c09707 100644
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@ -421,7 +421,7 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
h->x264_build = h1->x264_build;
// POC timing
- copy_fields(h, h1, poc_lsb, current_slice);
+ copy_fields(h, h1, poc, current_slice);
copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
@@ -435,11 +435,11 @@ int ff_h264_update_thread_context(AVCodecContext *dst,
if (!h->droppable) {
err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index);
- h->prev_poc_msb = h->poc_msb;
- h->prev_poc_lsb = h->poc_lsb;
+ h->poc.prev_poc_msb = h->poc.poc_msb;
+ h->poc.prev_poc_lsb = h->poc.poc_lsb;
}
- h->prev_frame_num_offset = h->frame_num_offset;
- h->prev_frame_num = h->frame_num;
+ h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
+ h->poc.prev_frame_num = h->poc.frame_num;
h->recovery_frame = h1->recovery_frame;
@@ -476,8 +476,7 @@ static int h264_frame_start(H264Context *h)
pic->reference = h->droppable ? 0 : h->picture_structure;
pic->f->coded_picture_number = h->coded_picture_number++;
pic->field_picture = h->picture_structure != PICT_FRAME;
- pic->frame_num = h->frame_num;
-
+ pic->frame_num = h->poc.frame_num;
/*
* Zero key_frame here; IDR markings per slice in frame or fields are ORed
* in later.
@@ -1324,15 +1323,15 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
if (!first_slice) {
- if (h->frame_num != frame_num) {
+ if (h->poc.frame_num != frame_num) {
av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
- h->frame_num, frame_num);
+ h->poc.frame_num, frame_num);
return AVERROR_INVALIDDATA;
}
}
if (!h->setup_finished)
- h->frame_num = frame_num;
+ h->poc.frame_num = frame_num;
sl->mb_mbaff = 0;
mb_aff_frame = 0;
@@ -1385,19 +1384,19 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
if (h->current_slice == 0) {
/* Shorten frame num gaps so we don't have to allocate reference
* frames just to throw them away */
- if (h->frame_num != h->prev_frame_num) {
- int unwrap_prev_frame_num = h->prev_frame_num;
+ if (h->poc.frame_num != h->poc.prev_frame_num) {
+ int unwrap_prev_frame_num = h->poc.prev_frame_num;
int max_frame_num = 1 << sps->log2_max_frame_num;
- if (unwrap_prev_frame_num > h->frame_num)
+ if (unwrap_prev_frame_num > h->poc.frame_num)
unwrap_prev_frame_num -= max_frame_num;
- if ((h->frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
- unwrap_prev_frame_num = (h->frame_num - sps->ref_frame_count) - 1;
+ if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
+ unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
if (unwrap_prev_frame_num < 0)
unwrap_prev_frame_num += max_frame_num;
- h->prev_frame_num = unwrap_prev_frame_num;
+ h->poc.prev_frame_num = unwrap_prev_frame_num;
}
}
@@ -1426,7 +1425,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
last_pic_structure == PICT_TOP_FIELD);
}
} else {
- if (h->cur_pic_ptr->frame_num != h->frame_num) {
+ if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
/* This and previous field were reference, but had
* different frame_nums. Consider this field first in
* pair. Throw away previous field except for reference
@@ -1458,11 +1457,11 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
}
}
- while (h->frame_num != h->prev_frame_num && !h->first_field &&
- h->frame_num != (h->prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
+ while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field &&
+ h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
- h->frame_num, h->prev_frame_num);
+ h->poc.frame_num, h->poc.prev_frame_num);
if (!sps->gaps_in_frame_num_allowed_flag)
for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
h->last_pocs[i] = INT_MIN;
@@ -1472,9 +1471,9 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
return ret;
}
- h->prev_frame_num++;
- h->prev_frame_num %= 1 << sps->log2_max_frame_num;
- h->cur_pic_ptr->frame_num = h->prev_frame_num;
+ h->poc.prev_frame_num++;
+ h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
+ h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
@@ -1505,7 +1504,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
prev->f->height);
h->short_ref[0]->poc = prev->poc + 2;
}
- h->short_ref[0]->frame_num = h->prev_frame_num;
+ h->short_ref[0]->frame_num = h->poc.prev_frame_num;
}
}
@@ -1526,7 +1525,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
h->first_field = FIELD_PICTURE(h);
} else {
h->missing_fields = 0;
- if (h->cur_pic_ptr->frame_num != h->frame_num) {
+ if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
h->picture_structure==PICT_BOTTOM_FIELD);
/* This and the previous field had different frame_nums.
@@ -1577,10 +1576,10 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
av_assert1(sl->mb_y < h->mb_height);
if (h->picture_structure == PICT_FRAME) {
- h->curr_pic_num = h->frame_num;
+ h->curr_pic_num = h->poc.frame_num;
h->max_pic_num = 1 << sps->log2_max_frame_num;
} else {
- h->curr_pic_num = 2 * h->frame_num + 1;
+ h->curr_pic_num = 2 * h->poc.frame_num + 1;
h->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
}
@@ -1591,12 +1590,12 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
int poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
if (!h->setup_finished)
- h->poc_lsb = poc_lsb;
+ h->poc.poc_lsb = poc_lsb;
if (pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) {
int delta_poc_bottom = get_se_golomb(&sl->gb);
if (!h->setup_finished)
- h->delta_poc_bottom = delta_poc_bottom;
+ h->poc.delta_poc_bottom = delta_poc_bottom;
}
}
@@ -1604,18 +1603,19 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
int delta_poc = get_se_golomb(&sl->gb);
if (!h->setup_finished)
- h->delta_poc[0] = delta_poc;
+ h->poc.delta_poc[0] = delta_poc;
if (pps->pic_order_present == 1 && h->picture_structure == PICT_FRAME) {
delta_poc = get_se_golomb(&sl->gb);
if (!h->setup_finished)
- h->delta_poc[1] = delta_poc;
+ h->poc.delta_poc[1] = delta_poc;
}
}
if (!h->setup_finished)
- ff_init_poc(h, h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc);
+ ff_h264_init_poc(h->cur_pic_ptr->field_poc, &h->cur_pic_ptr->poc,
+ sps, &h->poc, h->picture_structure, h->nal_ref_idc);
if (pps->redundant_pic_cnt_present)
sl->redundant_pic_count = get_ue_golomb(&sl->gb);
@@ -1829,7 +1829,7 @@ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl)
av_get_picture_type_char(sl->slice_type),
sl->slice_type_fixed ? " fix" : "",
h->nal_unit_type == NAL_IDR_SLICE ? " IDR" : "",
- pps_id, h->frame_num,
+ pps_id, h->poc.frame_num,
h->cur_pic_ptr->field_poc[0],
h->cur_pic_ptr->field_poc[1],
sl->ref_count[0], sl->ref_count[1],
diff --git a/libavcodec/vaapi_h264.c b/libavcodec/vaapi_h264.c
index 9b13fa9..91ee2d6 100644
--- a/libavcodec/vaapi_h264.c
+++ b/libavcodec/vaapi_h264.c
@@ -279,7 +279,7 @@ static int vaapi_h264_start_frame(AVCodecContext *avctx,
pic_param->pic_fields.bits.deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present;
pic_param->pic_fields.bits.redundant_pic_cnt_present_flag = pps->redundant_pic_cnt_present;
pic_param->pic_fields.bits.reference_pic_flag = h->nal_ref_idc != 0;
- pic_param->frame_num = h->frame_num;
+ pic_param->frame_num = h->poc.frame_num;
/* Fill in VAIQMatrixBufferH264. */
iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferH264));
diff --git a/libavcodec/vdpau.c b/libavcodec/vdpau.c
index d791d15..057f907 100644
--- a/libavcodec/vdpau.c
+++ b/libavcodec/vdpau.c
@@ -459,7 +459,7 @@ void ff_vdpau_h264_picture_start(H264Context *h)
render->info.h264.field_order_cnt[i] = foc;
}
- render->info.h264.frame_num = h->frame_num;
+ render->info.h264.frame_num = h->poc.frame_num;
}
void ff_vdpau_h264_picture_complete(H264Context *h)
diff --git a/libavcodec/vdpau_h264.c b/libavcodec/vdpau_h264.c
index 124fc98..a915461 100644
--- a/libavcodec/vdpau_h264.c
+++ b/libavcodec/vdpau_h264.c
@@ -134,7 +134,7 @@ static int vdpau_h264_start_frame(AVCodecContext *avctx,
info->field_order_cnt[0] = h264_foc(pic->field_poc[0]);
info->field_order_cnt[1] = h264_foc(pic->field_poc[1]);
info->is_reference = h->nal_ref_idc != 0;
- info->frame_num = h->frame_num;
+ info->frame_num = h->poc.frame_num;
info->field_pic_flag = h->picture_structure != PICT_FRAME;
info->bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD;
info->num_ref_frames = sps->ref_frame_count;
======================================================================
diff --cc libavcodec/h264.c
index 0de6d91,e415103..367f6bf
--- a/libavcodec/h264.c
+++ b/libavcodec/h264.c
@@@ -433,8 -403,6 +433,8 @@@ static int h264_init_context(AVCodecCon
h->x264_build = -1;
h->recovery_frame = -1;
h->frame_recovered = 0;
- h->prev_frame_num = -1;
++ h->poc.prev_frame_num = -1;
+ h->sei_fpa.frame_packing_arrangement_cancel_flag = -1;
h->next_outputed_poc = INT_MIN;
for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
@@@ -829,14 -812,11 +829,14 @@@ static void decode_postinit(H264Contex
*/
static void idr(H264Context *h)
{
+ int i;
ff_h264_remove_all_refs(h);
- h->prev_frame_num =
- h->prev_frame_num_offset = 0;
- h->prev_poc_msb = 1<<16;
- h->prev_poc_lsb = 0;
+ h->poc.prev_frame_num =
- h->poc.prev_frame_num_offset =
- h->poc.prev_poc_msb =
++ h->poc.prev_frame_num_offset = 0;
++ h->poc.prev_poc_msb = 1<<16;
+ h->poc.prev_poc_lsb = 0;
+ for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
+ h->last_pocs[i] = INT_MIN;
}
/* forget old pics after a seek */
@@@ -847,17 -828,8 +847,17 @@@ void ff_h264_flush_change(H264Context *
h->next_outputed_poc = INT_MIN;
h->prev_interlaced_frame = 1;
idr(h);
- if (h->cur_pic_ptr)
+
- h->prev_frame_num = -1;
++ h->poc.prev_frame_num = -1;
+ if (h->cur_pic_ptr) {
h->cur_pic_ptr->reference = 0;
+ for (j=i=0; h->delayed_pic[i]; i++)
+ if (h->delayed_pic[i] != h->cur_pic_ptr)
+ h->delayed_pic[j++] = h->delayed_pic[i];
+ h->delayed_pic[j] = NULL;
+ }
+ ff_h264_unref_picture(h, &h->last_pic_for_ec);
+
h->first_field = 0;
ff_h264_reset_sei(h);
h->recovery_frame = -1;
@@@ -1147,23 -984,15 +1068,23 @@@ again
if ((err = ff_h264_decode_slice_header(h, sl)))
break;
- if (h->sei_recovery_frame_cnt >= 0 && h->recovery_frame < 0) {
- h->recovery_frame = (h->poc.frame_num + h->sei_recovery_frame_cnt) &
- ((1 << h->ps.sps->log2_max_frame_num) - 1);
+ if (h->sei_recovery_frame_cnt >= 0) {
- if (h->frame_num != h->sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
++ if (h->poc.frame_num != h->sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
+ h->valid_recovery_point = 1;
+
+ if ( h->recovery_frame < 0
- || av_mod_uintp2(h->recovery_frame - h->frame_num, h->ps.sps->log2_max_frame_num) > h->sei_recovery_frame_cnt) {
- h->recovery_frame = av_mod_uintp2(h->frame_num + h->sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
++ || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > h->sei_recovery_frame_cnt) {
++ h->recovery_frame = av_mod_uintp2(h->poc.frame_num + h->sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
+
+ if (!h->valid_recovery_point)
- h->recovery_frame = h->frame_num;
++ h->recovery_frame = h->poc.frame_num;
+ }
}
- h->cur_pic_ptr->f->key_frame |=
- (nal->type == NAL_IDR_SLICE) || (h->sei_recovery_frame_cnt >= 0);
+ h->cur_pic_ptr->f->key_frame |= (nal->type == NAL_IDR_SLICE);
- if (nal->type == NAL_IDR_SLICE || h->recovery_frame == h->poc.frame_num) {
+ if (nal->type == NAL_IDR_SLICE ||
- (h->recovery_frame == h->frame_num && nal->ref_idc)) {
++ (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
h->recovery_frame = -1;
h->cur_pic_ptr->recovered = 1;
}
diff --cc libavcodec/h264.h
index 99262ae,007ce58..da8a92f
--- a/libavcodec/h264.h
+++ b/libavcodec/h264.h
@@@ -1188,12 -1049,8 +1178,11 @@@ void ff_h264_unref_picture(H264Context
int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl);
void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height);
- int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc);
int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl);
+#define SLICE_SINGLETHREAD 1
+#define SLICE_SKIPED 2
+
int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count);
int ff_h264_update_thread_context(AVCodecContext *dst,
const AVCodecContext *src);
diff --cc libavcodec/h264_parse.h
index 0fac629,c47b420..413f04d
--- a/libavcodec/h264_parse.h
+++ b/libavcodec/h264_parse.h
@@@ -63,6 -76,10 +76,10 @@@ int ff_h264_check_intra_pred_mode(void
int ff_h264_parse_ref_count(int *plist_count, int ref_count[2],
GetBitContext *gb, const struct PPS *pps,
- int slice_type_nos, int picture_structure);
+ int slice_type_nos, int picture_structure, void *logctx);
+ int ff_h264_init_poc(int pic_field_poc[2], int *pic_poc,
+ const struct SPS *sps, H264POCContext *poc,
+ int picture_structure, int nal_ref_idc);
+
#endif /* AVCODEC_H264_PARSE_H */
diff --cc libavcodec/h264_parser.c
index 51ca9f2,991a841..e0977ea
--- a/libavcodec/h264_parser.c
+++ b/libavcodec/h264_parser.c
@@@ -327,13 -275,13 +328,13 @@@ static inline int parse_nal_units(AVCod
case NAL_IDR_SLICE:
s->key_frame = 1;
- h->prev_frame_num = 0;
- h->prev_frame_num_offset = 0;
- h->prev_poc_msb =
- h->prev_poc_lsb = 0;
+ p->poc.prev_frame_num = 0;
+ p->poc.prev_frame_num_offset = 0;
+ p->poc.prev_poc_msb =
+ p->poc.prev_poc_lsb = 0;
/* fall through */
case NAL_SLICE:
- get_ue_golomb(&nal.gb); // skip first_mb_in_slice
+ get_ue_golomb_long(&nal.gb); // skip first_mb_in_slice
slice_type = get_ue_golomb_31(&nal.gb);
s->pict_type = ff_h264_golomb_to_pict_type[slice_type % 5];
if (h->sei_recovery_frame_cnt >= 0) {
@@@ -363,11 -311,7 +364,11 @@@
h->ps.pps = p->ps.pps;
sps = p->ps.sps;
+ // heuristic to detect non marked keyframes
+ if (h->ps.sps->ref_frame_count <= 1 && h->ps.pps->ref_count[0] <= 1 && s->pict_type == AV_PICTURE_TYPE_I)
+ s->key_frame = 1;
+
- h->frame_num = get_bits(&nal.gb, sps->log2_max_frame_num);
+ p->poc.frame_num = get_bits(&nal.gb, sps->log2_max_frame_num);
s->coded_width = 16 * sps->mb_width;
s->coded_height = 16 * sps->mb_height;
@@@ -412,9 -356,9 +413,9 @@@
}
if (h->nal_unit_type == NAL_IDR_SLICE)
- get_ue_golomb(&nal.gb); /* idr_pic_id */
+ get_ue_golomb_long(&nal.gb); /* idr_pic_id */
if (sps->poc_type == 0) {
- h->poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb);
+ p->poc.poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb);
if (p->ps.pps->pic_order_present == 1 &&
h->picture_structure == PICT_FRAME)
diff --cc libavcodec/h264_refs.c
index 02c7867,08c3bff..2b90168
--- a/libavcodec/h264_refs.c
+++ b/libavcodec/h264_refs.c
@@@ -725,11 -656,9 +725,11 @@@ int ff_h264_execute_ref_pic_marking(H26
for (j = 0; j < 16; j++) {
remove_long(h, j, 0);
}
- h->frame_num = h->cur_pic_ptr->frame_num = 0;
+ h->poc.frame_num = h->cur_pic_ptr->frame_num = 0;
h->mmco_reset = 1;
h->cur_pic_ptr->mmco_reset = 1;
+ for (j = 0; j < MAX_DELAYED_PIC_COUNT; j++)
+ h->last_pocs[j] = INT_MIN;
break;
default: assert(0);
}
diff --cc libavcodec/h264_slice.c
index 394a0c4,82a5a82..6c09707
--- a/libavcodec/h264_slice.c
+++ b/libavcodec/h264_slice.c
@@@ -418,10 -407,9 +418,10 @@@ int ff_h264_update_thread_context(AVCod
// extradata/NAL handling
h->is_avc = h1->is_avc;
h->nal_length_size = h1->nal_length_size;
+ h->x264_build = h1->x264_build;
// POC timing
- copy_fields(h, h1, poc_lsb, current_slice);
+ copy_fields(h, h1, poc, current_slice);
copy_picture_range(h->short_ref, h1->short_ref, 32, h, h1);
copy_picture_range(h->long_ref, h1->long_ref, 32, h, h1);
@@@ -435,13 -421,14 +435,13 @@@
if (!h->droppable) {
err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index);
- h->prev_poc_msb = h->poc_msb;
- h->prev_poc_lsb = h->poc_lsb;
+ h->poc.prev_poc_msb = h->poc.poc_msb;
+ h->poc.prev_poc_lsb = h->poc.poc_lsb;
}
- h->prev_frame_num_offset = h->frame_num_offset;
- h->prev_frame_num = h->frame_num;
+ h->poc.prev_frame_num_offset = h->poc.frame_num_offset;
+ h->poc.prev_frame_num = h->poc.frame_num;
h->recovery_frame = h1->recovery_frame;
- h->frame_recovered = h1->frame_recovered;
return err;
}
@@@ -1323,20 -1105,11 +1322,20 @@@ int ff_h264_decode_slice_header(H264Con
}
frame_num = get_bits(&sl->gb, sps->log2_max_frame_num);
+ if (!first_slice) {
- if (h->frame_num != frame_num) {
++ if (h->poc.frame_num != frame_num) {
+ av_log(h->avctx, AV_LOG_ERROR, "Frame num change from %d to %d\n",
- h->frame_num, frame_num);
++ h->poc.frame_num, frame_num);
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
if (!h->setup_finished)
- h->frame_num = frame_num;
+ h->poc.frame_num = frame_num;
sl->mb_mbaff = 0;
-
+ mb_aff_frame = 0;
+ last_mb_aff_frame = h->mb_aff_frame;
last_pic_structure = h->picture_structure;
last_pic_droppable = h->droppable;
@@@ -1373,27 -1146,18 +1372,27 @@@
h->current_slice + 1);
return AVERROR_INVALIDDATA;
}
- } else {
+ }
+
+ if (!h->setup_finished) {
+ h->droppable = droppable;
+ h->picture_structure = picture_structure;
+ h->mb_aff_frame = mb_aff_frame;
+ }
+ sl->mb_field_decoding_flag = picture_structure != PICT_FRAME;
+
+ if (h->current_slice == 0) {
/* Shorten frame num gaps so we don't have to allocate reference
* frames just to throw them away */
- if (h->frame_num != h->prev_frame_num) {
- int unwrap_prev_frame_num = h->prev_frame_num;
+ if (h->poc.frame_num != h->poc.prev_frame_num) {
+ int unwrap_prev_frame_num = h->poc.prev_frame_num;
int max_frame_num = 1 << sps->log2_max_frame_num;
- if (unwrap_prev_frame_num > h->frame_num)
+ if (unwrap_prev_frame_num > h->poc.frame_num)
unwrap_prev_frame_num -= max_frame_num;
- if ((h->frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
- unwrap_prev_frame_num = (h->frame_num - sps->ref_frame_count) - 1;
+ if ((h->poc.frame_num - unwrap_prev_frame_num) > sps->ref_frame_count) {
+ unwrap_prev_frame_num = (h->poc.frame_num - sps->ref_frame_count) - 1;
if (unwrap_prev_frame_num < 0)
unwrap_prev_frame_num += max_frame_num;
@@@ -1458,24 -1216,20 +1457,24 @@@
}
}
- while (h->frame_num != h->prev_frame_num && !h->first_field &&
- h->frame_num != (h->prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
- while (h->poc.frame_num != h->poc.prev_frame_num &&
++ while (h->poc.frame_num != h->poc.prev_frame_num && !h->first_field &&
+ h->poc.frame_num != (h->poc.prev_frame_num + 1) % (1 << sps->log2_max_frame_num)) {
H264Picture *prev = h->short_ref_count ? h->short_ref[0] : NULL;
av_log(h->avctx, AV_LOG_DEBUG, "Frame num gap %d %d\n",
- h->frame_num, h->prev_frame_num);
+ h->poc.frame_num, h->poc.prev_frame_num);
- ret = initialize_cur_frame(h);
+ if (!sps->gaps_in_frame_num_allowed_flag)
+ for(i=0; i<FF_ARRAY_ELEMS(h->last_pocs); i++)
+ h->last_pocs[i] = INT_MIN;
+ ret = h264_frame_start(h);
if (ret < 0) {
h->first_field = 0;
return ret;
}
- h->prev_frame_num++;
- h->prev_frame_num %= 1 << sps->log2_max_frame_num;
- h->cur_pic_ptr->frame_num = h->prev_frame_num;
+ h->poc.prev_frame_num++;
+ h->poc.prev_frame_num %= 1 << sps->log2_max_frame_num;
+ h->cur_pic_ptr->frame_num = h->poc.prev_frame_num;
+ h->cur_pic_ptr->invalid_gap = !sps->gaps_in_frame_num_allowed_flag;
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 0);
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX, 1);
ret = ff_generate_sliding_window_mmcos(h, 1);
@@@ -1501,11 -1255,11 +1500,11 @@@
(const uint8_t **)prev->f->data,
prev->f->linesize,
prev->f->format,
- h->mb_width * 16,
- h->mb_height * 16);
+ prev->f->width,
+ prev->f->height);
h->short_ref[0]->poc = prev->poc + 2;
}
- h->short_ref[0]->frame_num = h->prev_frame_num;
+ h->short_ref[0]->frame_num = h->poc.prev_frame_num;
}
}
@@@ -1525,10 -1278,7 +1524,10 @@@
h->cur_pic_ptr = NULL;
h->first_field = FIELD_PICTURE(h);
} else {
+ h->missing_fields = 0;
- if (h->cur_pic_ptr->frame_num != h->frame_num) {
+ if (h->cur_pic_ptr->frame_num != h->poc.frame_num) {
+ ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
+ h->picture_structure==PICT_BOTTOM_FIELD);
/* This and the previous field had different frame_nums.
* Consider this field first in pair. Throw away previous
* one except for reference purposes. */
@@@ -1574,13 -1315,13 +1573,13 @@@
FIELD_OR_MBAFF_PICTURE(h);
if (h->picture_structure == PICT_BOTTOM_FIELD)
sl->resync_mb_y = sl->mb_y = sl->mb_y + 1;
- assert(sl->mb_y < h->mb_height);
+ av_assert1(sl->mb_y < h->mb_height);
if (h->picture_structure == PICT_FRAME) {
- h->curr_pic_num = h->frame_num;
+ h->curr_pic_num = h->poc.frame_num;
h->max_pic_num = 1 << sps->log2_max_frame_num;
} else {
- h->curr_pic_num = 2 * h->frame_num + 1;
+ h->curr_pic_num = 2 * h->poc.frame_num + 1;
h->max_pic_num = 1 << (sps->log2_max_frame_num + 1);
}
diff --cc libavcodec/vdpau.c
index d791d15,bf5f8d9..057f907
--- a/libavcodec/vdpau.c
+++ b/libavcodec/vdpau.c
@@@ -355,345 -317,6 +355,345 @@@ int ff_vdpau_add_buffer(struct vdpau_pi
return 0;
}
+/* Obsolete non-hwaccel VDPAU support below... */
+
+#if FF_API_VDPAU
+void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size)
+{
+ struct vdpau_render_state *render = (struct vdpau_render_state*)data;
+ assert(render);
+
+ render->bitstream_buffers= av_fast_realloc(
+ render->bitstream_buffers,
+ &render->bitstream_buffers_allocated,
+ sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1)
+ );
+
+ render->bitstream_buffers[render->bitstream_buffers_used].struct_version = VDP_BITSTREAM_BUFFER_VERSION;
+ render->bitstream_buffers[render->bitstream_buffers_used].bitstream = buf;
+ render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size;
+ render->bitstream_buffers_used++;
+}
+
+#if CONFIG_H264_VDPAU_DECODER
+void ff_vdpau_h264_set_reference_frames(H264Context *h)
+{
+ struct vdpau_render_state *render, *render_ref;
+ VdpReferenceFrameH264 *rf, *rf2;
+ H264Picture *pic;
+ int i, list, pic_frame_idx;
+
+ render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
+ assert(render);
+
+ rf = &render->info.h264.referenceFrames[0];
+#define H264_RF_COUNT FF_ARRAY_ELEMS(render->info.h264.referenceFrames)
+
+ for (list = 0; list < 2; ++list) {
+ H264Picture **lp = list ? h->long_ref : h->short_ref;
+ int ls = list ? 16 : h->short_ref_count;
+
+ for (i = 0; i < ls; ++i) {
+ pic = lp[i];
+ if (!pic || !pic->reference)
+ continue;
+ pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num;
+
+ render_ref = (struct vdpau_render_state *)pic->f->data[0];
+ assert(render_ref);
+
+ rf2 = &render->info.h264.referenceFrames[0];
+ while (rf2 != rf) {
+ if (
+ (rf2->surface == render_ref->surface)
+ && (rf2->is_long_term == pic->long_ref)
+ && (rf2->frame_idx == pic_frame_idx)
+ )
+ break;
+ ++rf2;
+ }
+ if (rf2 != rf) {
+ rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
+ rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
+ continue;
+ }
+
+ if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT])
+ continue;
+
+ rf->surface = render_ref->surface;
+ rf->is_long_term = pic->long_ref;
+ rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE;
+ rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE;
+ rf->field_order_cnt[0] = pic->field_poc[0];
+ rf->field_order_cnt[1] = pic->field_poc[1];
+ rf->frame_idx = pic_frame_idx;
+
+ ++rf;
+ }
+ }
+
+ for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) {
+ rf->surface = VDP_INVALID_HANDLE;
+ rf->is_long_term = 0;
+ rf->top_is_reference = 0;
+ rf->bottom_is_reference = 0;
+ rf->field_order_cnt[0] = 0;
+ rf->field_order_cnt[1] = 0;
+ rf->frame_idx = 0;
+ }
+}
+
+void ff_vdpau_h264_picture_start(H264Context *h)
+{
+ struct vdpau_render_state *render;
+ int i;
+
+ render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
+ assert(render);
+
+ for (i = 0; i < 2; ++i) {
+ int foc = h->cur_pic_ptr->field_poc[i];
+ if (foc == INT_MAX)
+ foc = 0;
+ render->info.h264.field_order_cnt[i] = foc;
+ }
+
- render->info.h264.frame_num = h->frame_num;
++ render->info.h264.frame_num = h->poc.frame_num;
+}
+
+void ff_vdpau_h264_picture_complete(H264Context *h)
+{
+ struct vdpau_render_state *render;
+
+ render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0];
+ assert(render);
+
+ render->info.h264.slice_count = h->current_slice;
+ if (render->info.h264.slice_count < 1)
+ return;
+
+ render->info.h264.is_reference = (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE;
+ render->info.h264.field_pic_flag = h->picture_structure != PICT_FRAME;
+ render->info.h264.bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD;
+ render->info.h264.num_ref_frames = h->ps.sps->ref_frame_count;
+ render->info.h264.mb_adaptive_frame_field_flag = h->ps.sps->mb_aff && !render->info.h264.field_pic_flag;
+ render->info.h264.constrained_intra_pred_flag = h->ps.pps->constrained_intra_pred;
+ render->info.h264.weighted_pred_flag = h->ps.pps->weighted_pred;
+ render->info.h264.weighted_bipred_idc = h->ps.pps->weighted_bipred_idc;
+ render->info.h264.frame_mbs_only_flag = h->ps.sps->frame_mbs_only_flag;
+ render->info.h264.transform_8x8_mode_flag = h->ps.pps->transform_8x8_mode;
+ render->info.h264.chroma_qp_index_offset = h->ps.pps->chroma_qp_index_offset[0];
+ render->info.h264.second_chroma_qp_index_offset = h->ps.pps->chroma_qp_index_offset[1];
+ render->info.h264.pic_init_qp_minus26 = h->ps.pps->init_qp - 26;
+ render->info.h264.num_ref_idx_l0_active_minus1 = h->ps.pps->ref_count[0] - 1;
+ render->info.h264.num_ref_idx_l1_active_minus1 = h->ps.pps->ref_count[1] - 1;
+ render->info.h264.log2_max_frame_num_minus4 = h->ps.sps->log2_max_frame_num - 4;
+ render->info.h264.pic_order_cnt_type = h->ps.sps->poc_type;
+ render->info.h264.log2_max_pic_order_cnt_lsb_minus4 = h->ps.sps->poc_type ? 0 : h->ps.sps->log2_max_poc_lsb - 4;
+ render->info.h264.delta_pic_order_always_zero_flag = h->ps.sps->delta_pic_order_always_zero_flag;
+ render->info.h264.direct_8x8_inference_flag = h->ps.sps->direct_8x8_inference_flag;
+ render->info.h264.entropy_coding_mode_flag = h->ps.pps->cabac;
+ render->info.h264.pic_order_present_flag = h->ps.pps->pic_order_present;
+ render->info.h264.deblocking_filter_control_present_flag = h->ps.pps->deblocking_filter_parameters_present;
+ render->info.h264.redundant_pic_cnt_present_flag = h->ps.pps->redundant_pic_cnt_present;
+ memcpy(render->info.h264.scaling_lists_4x4, h->ps.pps->scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4));
+ memcpy(render->info.h264.scaling_lists_8x8[0], h->ps.pps->scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0]));
+ memcpy(render->info.h264.scaling_lists_8x8[1], h->ps.pps->scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0]));
+
+ ff_h264_draw_horiz_band(h, &h->slice_ctx[0], 0, h->avctx->height);
+ render->bitstream_buffers_used = 0;
+}
+#endif /* CONFIG_H264_VDPAU_DECODER */
+
+#if CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER
+void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf,
+ int buf_size, int slice_count)
+{
+ struct vdpau_render_state *render, *last, *next;
+ int i;
+
+ if (!s->current_picture_ptr) return;
+
+ render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0];
+ assert(render);
+
+ /* fill VdpPictureInfoMPEG1Or2 struct */
+ render->info.mpeg.picture_structure = s->picture_structure;
+ render->info.mpeg.picture_coding_type = s->pict_type;
+ render->info.mpeg.intra_dc_precision = s->intra_dc_precision;
+ render->info.mpeg.frame_pred_frame_dct = s->frame_pred_frame_dct;
+ render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors;
+ render->info.mpeg.intra_vlc_format = s->intra_vlc_format;
+ render->info.mpeg.alternate_scan = s->alternate_scan;
+ render->info.mpeg.q_scale_type = s->q_scale_type;
+ render->info.mpeg.top_field_first = s->top_field_first;
+ render->info.mpeg.full_pel_forward_vector = s->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2
+ render->info.mpeg.full_pel_backward_vector = s->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2
+ render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert.
+ render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1];
+ render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0];
+ render->info.mpeg.f_code[1][1] = s->mpeg_f_code[1][1];
+ for (i = 0; i < 64; ++i) {
+ render->info.mpeg.intra_quantizer_matrix[i] = s->intra_matrix[i];
+ render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
+ }
+
+ render->info.mpeg.forward_reference = VDP_INVALID_HANDLE;
+ render->info.mpeg.backward_reference = VDP_INVALID_HANDLE;
+
+ switch(s->pict_type){
+ case AV_PICTURE_TYPE_B:
+ next = (struct vdpau_render_state *)s->next_picture.f->data[0];
+ assert(next);
+ render->info.mpeg.backward_reference = next->surface;
+ // no return here, going to set forward prediction
+ case AV_PICTURE_TYPE_P:
+ last = (struct vdpau_render_state *)s->last_picture.f->data[0];
+ if (!last) // FIXME: Does this test make sense?
+ last = render; // predict second field from the first
+ render->info.mpeg.forward_reference = last->surface;
+ }
+
+ ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
+
+ render->info.mpeg.slice_count = slice_count;
+
+ if (slice_count)
+ ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
+ render->bitstream_buffers_used = 0;
+}
+#endif /* CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER */
+
+#if CONFIG_VC1_VDPAU_DECODER
+void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf,
+ int buf_size)
+{
+ VC1Context *v = s->avctx->priv_data;
+ struct vdpau_render_state *render, *last, *next;
+
+ render = (struct vdpau_render_state *)s->current_picture.f->data[0];
+ assert(render);
+
+ /* fill LvPictureInfoVC1 struct */
+ render->info.vc1.frame_coding_mode = v->fcm ? v->fcm + 1 : 0;
+ render->info.vc1.postprocflag = v->postprocflag;
+ render->info.vc1.pulldown = v->broadcast;
+ render->info.vc1.interlace = v->interlace;
+ render->info.vc1.tfcntrflag = v->tfcntrflag;
+ render->info.vc1.finterpflag = v->finterpflag;
+ render->info.vc1.psf = v->psf;
+ render->info.vc1.dquant = v->dquant;
+ render->info.vc1.panscan_flag = v->panscanflag;
+ render->info.vc1.refdist_flag = v->refdist_flag;
+ render->info.vc1.quantizer = v->quantizer_mode;
+ render->info.vc1.extended_mv = v->extended_mv;
+ render->info.vc1.extended_dmv = v->extended_dmv;
+ render->info.vc1.overlap = v->overlap;
+ render->info.vc1.vstransform = v->vstransform;
+ render->info.vc1.loopfilter = v->s.loop_filter;
+ render->info.vc1.fastuvmc = v->fastuvmc;
+ render->info.vc1.range_mapy_flag = v->range_mapy_flag;
+ render->info.vc1.range_mapy = v->range_mapy;
+ render->info.vc1.range_mapuv_flag = v->range_mapuv_flag;
+ render->info.vc1.range_mapuv = v->range_mapuv;
+ /* Specific to simple/main profile only */
+ render->info.vc1.multires = v->multires;
+ render->info.vc1.syncmarker = v->resync_marker;
+ render->info.vc1.rangered = v->rangered | (v->rangeredfrm << 1);
+ render->info.vc1.maxbframes = v->s.max_b_frames;
+
+ render->info.vc1.deblockEnable = v->postprocflag & 1;
+ render->info.vc1.pquant = v->pq;
+
+ render->info.vc1.forward_reference = VDP_INVALID_HANDLE;
+ render->info.vc1.backward_reference = VDP_INVALID_HANDLE;
+
+ if (v->bi_type)
+ render->info.vc1.picture_type = 4;
+ else
+ render->info.vc1.picture_type = s->pict_type - 1 + s->pict_type / 3;
+
+ switch(s->pict_type){
+ case AV_PICTURE_TYPE_B:
+ next = (struct vdpau_render_state *)s->next_picture.f->data[0];
+ assert(next);
+ render->info.vc1.backward_reference = next->surface;
+ // no break here, going to set forward prediction
+ case AV_PICTURE_TYPE_P:
+ last = (struct vdpau_render_state *)s->last_picture.f->data[0];
+ if (!last) // FIXME: Does this test make sense?
+ last = render; // predict second field from the first
+ render->info.vc1.forward_reference = last->surface;
+ }
+
+ ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
+
+ render->info.vc1.slice_count = 1;
+
+ ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
+ render->bitstream_buffers_used = 0;
+}
+#endif /* (CONFIG_VC1_VDPAU_DECODER */
+
+#if CONFIG_MPEG4_VDPAU_DECODER
+void ff_vdpau_mpeg4_decode_picture(Mpeg4DecContext *ctx, const uint8_t *buf,
+ int buf_size)
+{
+ MpegEncContext *s = &ctx->m;
+ struct vdpau_render_state *render, *last, *next;
+ int i;
+
+ if (!s->current_picture_ptr) return;
+
+ render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0];
+ assert(render);
+
+ /* fill VdpPictureInfoMPEG4Part2 struct */
+ render->info.mpeg4.trd[0] = s->pp_time;
+ render->info.mpeg4.trb[0] = s->pb_time;
+ render->info.mpeg4.trd[1] = s->pp_field_time >> 1;
+ render->info.mpeg4.trb[1] = s->pb_field_time >> 1;
+ render->info.mpeg4.vop_time_increment_resolution = s->avctx->time_base.den;
+ render->info.mpeg4.vop_coding_type = 0;
+ render->info.mpeg4.vop_fcode_forward = s->f_code;
+ render->info.mpeg4.vop_fcode_backward = s->b_code;
+ render->info.mpeg4.resync_marker_disable = !ctx->resync_marker;
+ render->info.mpeg4.interlaced = !s->progressive_sequence;
+ render->info.mpeg4.quant_type = s->mpeg_quant;
+ render->info.mpeg4.quarter_sample = s->quarter_sample;
+ render->info.mpeg4.short_video_header = s->avctx->codec->id == AV_CODEC_ID_H263;
+ render->info.mpeg4.rounding_control = s->no_rounding;
+ render->info.mpeg4.alternate_vertical_scan_flag = s->alternate_scan;
+ render->info.mpeg4.top_field_first = s->top_field_first;
+ for (i = 0; i < 64; ++i) {
+ render->info.mpeg4.intra_quantizer_matrix[i] = s->intra_matrix[i];
+ render->info.mpeg4.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
+ }
+ render->info.mpeg4.forward_reference = VDP_INVALID_HANDLE;
+ render->info.mpeg4.backward_reference = VDP_INVALID_HANDLE;
+
+ switch (s->pict_type) {
+ case AV_PICTURE_TYPE_B:
+ next = (struct vdpau_render_state *)s->next_picture.f->data[0];
+ assert(next);
+ render->info.mpeg4.backward_reference = next->surface;
+ render->info.mpeg4.vop_coding_type = 2;
+ // no break here, going to set forward prediction
+ case AV_PICTURE_TYPE_P:
+ last = (struct vdpau_render_state *)s->last_picture.f->data[0];
+ assert(last);
+ render->info.mpeg4.forward_reference = last->surface;
+ }
+
+ ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size);
+
+ ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
+ render->bitstream_buffers_used = 0;
+}
+#endif /* CONFIG_MPEG4_VDPAU_DECODER */
+#endif /* FF_API_VDPAU */
+
#if FF_API_VDPAU_PROFILE
int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile)
{
More information about the ffmpeg-cvslog
mailing list