[FFmpeg-devel] [PATCH] ffv1dec: add support for hardware acceleration
Lynne
dev at lynne.ee
Thu Dec 19 19:44:43 EET 2024
---
libavcodec/ffv1.h | 2 +
libavcodec/ffv1dec.c | 451 +++++++++++++++++++++++++------------------
2 files changed, 268 insertions(+), 185 deletions(-)
diff --git a/libavcodec/ffv1.h b/libavcodec/ffv1.h
index ca03fd2b10..93174bd45e 100644
--- a/libavcodec/ffv1.h
+++ b/libavcodec/ffv1.h
@@ -121,7 +121,9 @@ typedef struct FFV1Context {
int64_t picture_number;
int key_frame;
ProgressFrame picture, last_picture;
+ void *hwaccel_picture_private, *hwaccel_last_picture_private;
uint32_t crcref;
+ enum AVPixelFormat pix_fmt;
const AVFrame *cur_enc_frame;
int plane_count;
diff --git a/libavcodec/ffv1dec.c b/libavcodec/ffv1dec.c
index 7845815873..7dc1aaedc6 100644
--- a/libavcodec/ffv1dec.c
+++ b/libavcodec/ffv1dec.c
@@ -40,6 +40,9 @@
#include "progressframe.h"
#include "libavutil/refstruct.h"
#include "thread.h"
+#include "decode.h"
+#include "hwconfig.h"
+#include "hwaccel_internal.h"
static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state,
int is_signed)
@@ -268,7 +271,7 @@ static int decode_slice(AVCodecContext *c, void *arg)
FFV1Context *f = c->priv_data;
FFV1SliceContext *sc = arg;
int width, height, x, y, ret;
- const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step;
+ const int ps = av_pix_fmt_desc_get(f->pix_fmt)->comp[0].step;
AVFrame * const p = f->picture.f;
const int si = sc - f->slices;
GetBitContext gb;
@@ -537,178 +540,114 @@ static int read_extra_header(FFV1Context *f)
return 0;
}
-static int read_header(FFV1Context *f)
+static int setup_format(FFV1Context *f)
{
- uint8_t state[CONTEXT_SIZE];
- int context_count = -1; //-1 to avoid warning
- RangeCoder *const c = &f->slices[0].c;
-
- memset(state, 128, sizeof(state));
-
- if (f->version < 2) {
- int chroma_planes, chroma_h_shift, chroma_v_shift, transparency, colorspace, bits_per_raw_sample;
- unsigned v= get_symbol(c, state, 0);
- if (v >= 2) {
- av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
- return AVERROR_INVALIDDATA;
- }
- f->version = v;
- f->ac = get_symbol(c, state, 0);
-
- if (f->ac == AC_RANGE_CUSTOM_TAB) {
- for (int i = 1; i < 256; i++) {
- int st = get_symbol(c, state, 1) + c->one_state[i];
- if (st < 1 || st > 255) {
- av_log(f->avctx, AV_LOG_ERROR, "invalid state transition %d\n", st);
- return AVERROR_INVALIDDATA;
- }
- f->state_transition[i] = st;
- }
- }
-
- colorspace = get_symbol(c, state, 0); //YUV cs type
- bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
- chroma_planes = get_rac(c, state);
- chroma_h_shift = get_symbol(c, state, 0);
- chroma_v_shift = get_symbol(c, state, 0);
- transparency = get_rac(c, state);
- if (colorspace == 0 && f->avctx->skip_alpha)
- transparency = 0;
-
- if (f->plane_count) {
- if (colorspace != f->colorspace ||
- bits_per_raw_sample != f->avctx->bits_per_raw_sample ||
- chroma_planes != f->chroma_planes ||
- chroma_h_shift != f->chroma_h_shift ||
- chroma_v_shift != f->chroma_v_shift ||
- transparency != f->transparency) {
- av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
- return AVERROR_INVALIDDATA;
- }
- }
-
- if (chroma_h_shift > 4U || chroma_v_shift > 4U) {
- av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n",
- chroma_h_shift, chroma_v_shift);
- return AVERROR_INVALIDDATA;
- }
-
- f->colorspace = colorspace;
- f->avctx->bits_per_raw_sample = bits_per_raw_sample;
- f->chroma_planes = chroma_planes;
- f->chroma_h_shift = chroma_h_shift;
- f->chroma_v_shift = chroma_v_shift;
- f->transparency = transparency;
-
- f->plane_count = 2 + f->transparency;
- }
-
if (f->colorspace == 0) {
if (!f->transparency && !f->chroma_planes) {
if (f->avctx->bits_per_raw_sample <= 8)
- f->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
+ f->pix_fmt = AV_PIX_FMT_GRAY8;
else if (f->avctx->bits_per_raw_sample == 9) {
f->packed_at_lsb = 1;
- f->avctx->pix_fmt = AV_PIX_FMT_GRAY9;
+ f->pix_fmt = AV_PIX_FMT_GRAY9;
} else if (f->avctx->bits_per_raw_sample == 10) {
f->packed_at_lsb = 1;
- f->avctx->pix_fmt = AV_PIX_FMT_GRAY10;
+ f->pix_fmt = AV_PIX_FMT_GRAY10;
} else if (f->avctx->bits_per_raw_sample == 12) {
f->packed_at_lsb = 1;
- f->avctx->pix_fmt = AV_PIX_FMT_GRAY12;
+ f->pix_fmt = AV_PIX_FMT_GRAY12;
} else if (f->avctx->bits_per_raw_sample == 14) {
f->packed_at_lsb = 1;
- f->avctx->pix_fmt = AV_PIX_FMT_GRAY14;
+ f->pix_fmt = AV_PIX_FMT_GRAY14;
} else if (f->avctx->bits_per_raw_sample == 16) {
f->packed_at_lsb = 1;
- f->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
+ f->pix_fmt = AV_PIX_FMT_GRAY16;
} else if (f->avctx->bits_per_raw_sample < 16) {
- f->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
+ f->pix_fmt = AV_PIX_FMT_GRAY16;
} else
return AVERROR(ENOSYS);
} else if (f->transparency && !f->chroma_planes) {
if (f->avctx->bits_per_raw_sample <= 8)
- f->avctx->pix_fmt = AV_PIX_FMT_YA8;
+ f->pix_fmt = AV_PIX_FMT_YA8;
else
return AVERROR(ENOSYS);
} else if (f->avctx->bits_per_raw_sample<=8 && !f->transparency) {
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P; break;
- case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P; break;
- case 0x20: f->avctx->pix_fmt = AV_PIX_FMT_YUV411P; break;
- case 0x22: f->avctx->pix_fmt = AV_PIX_FMT_YUV410P; break;
+ case 0x00: f->pix_fmt = AV_PIX_FMT_YUV444P; break;
+ case 0x01: f->pix_fmt = AV_PIX_FMT_YUV440P; break;
+ case 0x10: f->pix_fmt = AV_PIX_FMT_YUV422P; break;
+ case 0x11: f->pix_fmt = AV_PIX_FMT_YUV420P; break;
+ case 0x20: f->pix_fmt = AV_PIX_FMT_YUV411P; break;
+ case 0x22: f->pix_fmt = AV_PIX_FMT_YUV410P; break;
}
} else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
switch(16*f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
+ case 0x00: f->pix_fmt = AV_PIX_FMT_YUVA444P; break;
+ case 0x10: f->pix_fmt = AV_PIX_FMT_YUVA422P; break;
+ case 0x11: f->pix_fmt = AV_PIX_FMT_YUVA420P; break;
}
} else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
+ case 0x00: f->pix_fmt = AV_PIX_FMT_YUV444P9; break;
+ case 0x10: f->pix_fmt = AV_PIX_FMT_YUV422P9; break;
+ case 0x11: f->pix_fmt = AV_PIX_FMT_YUV420P9; break;
}
} else if (f->avctx->bits_per_raw_sample == 9 && f->transparency) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P9; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P9; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P9; break;
+ case 0x00: f->pix_fmt = AV_PIX_FMT_YUVA444P9; break;
+ case 0x10: f->pix_fmt = AV_PIX_FMT_YUVA422P9; break;
+ case 0x11: f->pix_fmt = AV_PIX_FMT_YUVA420P9; break;
}
} else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
- case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P10; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
+ case 0x00: f->pix_fmt = AV_PIX_FMT_YUV444P10; break;
+ case 0x01: f->pix_fmt = AV_PIX_FMT_YUV440P10; break;
+ case 0x10: f->pix_fmt = AV_PIX_FMT_YUV422P10; break;
+ case 0x11: f->pix_fmt = AV_PIX_FMT_YUV420P10; break;
}
} else if (f->avctx->bits_per_raw_sample == 10 && f->transparency) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P10; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P10; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P10; break;
+ case 0x00: f->pix_fmt = AV_PIX_FMT_YUVA444P10; break;
+ case 0x10: f->pix_fmt = AV_PIX_FMT_YUVA422P10; break;
+ case 0x11: f->pix_fmt = AV_PIX_FMT_YUVA420P10; break;
}
} else if (f->avctx->bits_per_raw_sample == 12 && !f->transparency) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P12; break;
- case 0x01: f->avctx->pix_fmt = AV_PIX_FMT_YUV440P12; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P12; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P12; break;
+ case 0x00: f->pix_fmt = AV_PIX_FMT_YUV444P12; break;
+ case 0x01: f->pix_fmt = AV_PIX_FMT_YUV440P12; break;
+ case 0x10: f->pix_fmt = AV_PIX_FMT_YUV422P12; break;
+ case 0x11: f->pix_fmt = AV_PIX_FMT_YUV420P12; break;
}
} else if (f->avctx->bits_per_raw_sample == 12 && f->transparency) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P12; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P12; break;
+ case 0x00: f->pix_fmt = AV_PIX_FMT_YUVA444P12; break;
+ case 0x10: f->pix_fmt = AV_PIX_FMT_YUVA422P12; break;
}
} else if (f->avctx->bits_per_raw_sample == 14 && !f->transparency) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P14; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P14; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P14; break;
+ case 0x00: f->pix_fmt = AV_PIX_FMT_YUV444P14; break;
+ case 0x10: f->pix_fmt = AV_PIX_FMT_YUV422P14; break;
+ case 0x11: f->pix_fmt = AV_PIX_FMT_YUV420P14; break;
}
} else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency){
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
+ case 0x00: f->pix_fmt = AV_PIX_FMT_YUV444P16; break;
+ case 0x10: f->pix_fmt = AV_PIX_FMT_YUV422P16; break;
+ case 0x11: f->pix_fmt = AV_PIX_FMT_YUV420P16; break;
}
} else if (f->avctx->bits_per_raw_sample == 16 && f->transparency){
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P16; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P16; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16; break;
+ case 0x00: f->pix_fmt = AV_PIX_FMT_YUVA444P16; break;
+ case 0x10: f->pix_fmt = AV_PIX_FMT_YUVA422P16; break;
+ case 0x11: f->pix_fmt = AV_PIX_FMT_YUVA420P16; break;
}
}
} else if (f->colorspace == 1) {
@@ -718,42 +657,128 @@ static int read_header(FFV1Context *f)
return AVERROR(ENOSYS);
}
if ( f->avctx->bits_per_raw_sample <= 8 && !f->transparency)
- f->avctx->pix_fmt = AV_PIX_FMT_0RGB32;
+ f->pix_fmt = AV_PIX_FMT_0RGB32;
else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency)
- f->avctx->pix_fmt = AV_PIX_FMT_RGB32;
+ f->pix_fmt = AV_PIX_FMT_RGB32;
else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency)
- f->avctx->pix_fmt = AV_PIX_FMT_GBRP9;
+ f->pix_fmt = AV_PIX_FMT_GBRP9;
else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency)
- f->avctx->pix_fmt = AV_PIX_FMT_GBRP10;
+ f->pix_fmt = AV_PIX_FMT_GBRP10;
else if (f->avctx->bits_per_raw_sample == 10 && f->transparency)
- f->avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
+ f->pix_fmt = AV_PIX_FMT_GBRAP10;
else if (f->avctx->bits_per_raw_sample == 12 && !f->transparency)
- f->avctx->pix_fmt = AV_PIX_FMT_GBRP12;
+ f->pix_fmt = AV_PIX_FMT_GBRP12;
else if (f->avctx->bits_per_raw_sample == 12 && f->transparency)
- f->avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
+ f->pix_fmt = AV_PIX_FMT_GBRAP12;
else if (f->avctx->bits_per_raw_sample == 14 && !f->transparency)
- f->avctx->pix_fmt = AV_PIX_FMT_GBRP14;
+ f->pix_fmt = AV_PIX_FMT_GBRP14;
else if (f->avctx->bits_per_raw_sample == 14 && f->transparency)
- f->avctx->pix_fmt = AV_PIX_FMT_GBRAP14;
- else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency) {
- f->avctx->pix_fmt = AV_PIX_FMT_GBRP16;
- f->use32bit = 1;
- }
- else if (f->avctx->bits_per_raw_sample == 16 && f->transparency) {
- f->avctx->pix_fmt = AV_PIX_FMT_GBRAP16;
- f->use32bit = 1;
- }
+ f->pix_fmt = AV_PIX_FMT_GBRAP14;
+ else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency)
+ f->pix_fmt = AV_PIX_FMT_GBRP16;
+ else if (f->avctx->bits_per_raw_sample == 16 && f->transparency)
+ f->pix_fmt = AV_PIX_FMT_GBRAP16;
} else {
av_log(f->avctx, AV_LOG_ERROR, "colorspace not supported\n");
return AVERROR(ENOSYS);
}
- if (f->avctx->pix_fmt == AV_PIX_FMT_NONE) {
+ if (f->pix_fmt == AV_PIX_FMT_NONE) {
av_log(f->avctx, AV_LOG_ERROR, "format not supported\n");
return AVERROR(ENOSYS);
}
+ return 0;
+}
+
+static enum AVPixelFormat get_pixel_format(FFV1Context *f)
+{
+ enum AVPixelFormat pix_fmts[] = {
+ f->pix_fmt,
+ AV_PIX_FMT_NONE,
+ };
+
+ return ff_get_format(f->avctx, pix_fmts);
+}
+
+static int read_header(FFV1Context *f, RangeCoder *c)
+{
+ int err;
+ uint8_t state[CONTEXT_SIZE];
+ int context_count = -1; //-1 to avoid warning
+
+ memset(state, 128, sizeof(state));
+
+ if (f->version < 2) {
+ int chroma_planes, chroma_h_shift, chroma_v_shift, transparency, colorspace, bits_per_raw_sample;
+ unsigned v= get_symbol(c, state, 0);
+ if (v >= 2) {
+ av_log(f->avctx, AV_LOG_ERROR, "invalid version %d in ver01 header\n", v);
+ return AVERROR_INVALIDDATA;
+ }
+ f->version = v;
+ f->ac = get_symbol(c, state, 0);
+
+ if (f->ac == AC_RANGE_CUSTOM_TAB) {
+ for (int i = 1; i < 256; i++) {
+ int st = get_symbol(c, state, 1) + c->one_state[i];
+ if (st < 1 || st > 255) {
+ av_log(f->avctx, AV_LOG_ERROR, "invalid state transition %d\n", st);
+ return AVERROR_INVALIDDATA;
+ }
+ f->state_transition[i] = st;
+ }
+ }
+
+ colorspace = get_symbol(c, state, 0); //YUV cs type
+ bits_per_raw_sample = f->version > 0 ? get_symbol(c, state, 0) : f->avctx->bits_per_raw_sample;
+ chroma_planes = get_rac(c, state);
+ chroma_h_shift = get_symbol(c, state, 0);
+ chroma_v_shift = get_symbol(c, state, 0);
+ transparency = get_rac(c, state);
+ if (colorspace == 0 && f->avctx->skip_alpha)
+ transparency = 0;
+
+ if (f->plane_count) {
+ if (colorspace != f->colorspace ||
+ bits_per_raw_sample != f->avctx->bits_per_raw_sample ||
+ chroma_planes != f->chroma_planes ||
+ chroma_h_shift != f->chroma_h_shift ||
+ chroma_v_shift != f->chroma_v_shift ||
+ transparency != f->transparency) {
+ av_log(f->avctx, AV_LOG_ERROR, "Invalid change of global parameters\n");
+ return AVERROR_INVALIDDATA;
+ }
+ }
+
+ if (chroma_h_shift > 4U || chroma_v_shift > 4U) {
+ av_log(f->avctx, AV_LOG_ERROR, "chroma shift parameters %d %d are invalid\n",
+ chroma_h_shift, chroma_v_shift);
+ return AVERROR_INVALIDDATA;
+ }
+
+ f->colorspace = colorspace;
+ f->avctx->bits_per_raw_sample = bits_per_raw_sample;
+ f->chroma_planes = chroma_planes;
+ f->chroma_h_shift = chroma_h_shift;
+ f->chroma_v_shift = chroma_v_shift;
+ f->transparency = transparency;
+
+ f->plane_count = 2 + f->transparency;
+ }
+
+ if (f->colorspace && f->avctx->bits_per_raw_sample == 16)
+ f->use32bit = 1;
+
+ err = setup_format(f);
+ if (err < 0)
+ return err;
+
+ f->avctx->pix_fmt = get_pixel_format(f);
+ if (f->avctx->pix_fmt < 0)
+ return AVERROR(EINVAL);
+
ff_dlog(f->avctx, "%d %d %d\n",
- f->chroma_h_shift, f->chroma_v_shift, f->avctx->pix_fmt);
+ f->chroma_h_shift, f->chroma_v_shift, f->pix_fmt);
if (f->version < 2) {
context_count = read_quant_tables(c, f->quant_tables[0]);
if (context_count < 0) {
@@ -784,7 +809,6 @@ static int read_header(FFV1Context *f)
f->slice_damaged = av_refstruct_allocz(f->slice_count * sizeof(*f->slice_damaged));
if (!f->slice_damaged)
return AVERROR(ENOMEM);
-
for (int j = 0; j < f->slice_count; j++) {
FFV1SliceContext *sc = &f->slices[j];
@@ -855,31 +879,21 @@ static av_cold int decode_init(AVCodecContext *avctx)
return 0;
}
-static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
- int *got_frame, AVPacket *avpkt)
+static int decode_header(AVCodecContext *avctx,
+ uint8_t *buf, size_t buf_size)
{
- uint8_t *buf = avpkt->data;
- int buf_size = avpkt->size;
- FFV1Context *f = avctx->priv_data;
- RangeCoder *const c = &f->slices[0].c;
- int ret, key_frame;
+ int ret;
+ FFV1Context *f = avctx->priv_data;
+ RangeCoder c;
uint8_t keystate = 128;
- uint8_t *buf_p;
- AVFrame *p;
-
- ff_progress_frame_unref(&f->last_picture);
- FFSWAP(ProgressFrame, f->picture, f->last_picture);
-
- f->avctx = avctx;
- f->frame_damaged = 0;
- ff_init_range_decoder(c, buf, buf_size);
- ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
+ ff_init_range_decoder(&c, buf, buf_size);
+ ff_build_rac_states(&c, 0.05 * (1LL << 32), 256 - 8);
- if (get_rac(c, &keystate)) {
- key_frame = AV_FRAME_FLAG_KEY;
+ if (get_rac(&c, &keystate)) {
+ f->key_frame = AV_FRAME_FLAG_KEY;
f->key_frame_ok = 0;
- if ((ret = read_header(f)) < 0)
+ if ((ret = read_header(f, &c)) < 0)
return ret;
f->key_frame_ok = 1;
} else {
@@ -888,7 +902,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
"Cannot decode non-keyframe without valid keyframe\n");
return AVERROR_INVALIDDATA;
}
- key_frame = 0;
+ f->key_frame = 0;
}
if (f->ac != AC_GOLOMB_RICE) {
@@ -907,30 +921,22 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
return AVERROR_INVALIDDATA;
}
- ret = ff_progress_frame_get_buffer(avctx, &f->picture,
- AV_GET_BUFFER_FLAG_REF);
- if (ret < 0)
- return ret;
-
- p = f->picture.f;
-
- p->pict_type = AV_PICTURE_TYPE_I; //FIXME I vs. P
- p->flags = (p->flags & ~AV_FRAME_FLAG_KEY) | key_frame;
-
- if (f->version < 3 && avctx->field_order > AV_FIELD_PROGRESSIVE) {
- /* we have interlaced material flagged in container */
- p->flags |= AV_FRAME_FLAG_INTERLACED;
- if (avctx->field_order == AV_FIELD_TT || avctx->field_order == AV_FIELD_TB)
- p->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST;
- }
+ return 0;
+}
- if (avctx->debug & FF_DEBUG_PICT_INFO)
- av_log(avctx, AV_LOG_DEBUG, "ver:%d keyframe:%d coder:%d ec:%d slices:%d bps:%d\n",
- f->version, !!(p->flags & AV_FRAME_FLAG_KEY), f->ac, f->ec, f->slice_count, f->avctx->bits_per_raw_sample);
+static int decode_slices(AVCodecContext *avctx,
+ uint8_t *buf, size_t buf_size)
+{
+ FFV1Context *f = avctx->priv_data;
+ RangeCoder *const c = &f->slices[0].c;
+ AVFrame *p = p = f->picture.f;
+ uint8_t *buf_p = buf + buf_size;
+ uint8_t keystate = 128;
- ff_thread_finish_setup(avctx);
+ ff_init_range_decoder(c, buf, buf_size);
+ ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
+ get_rac(c, &keystate);
- buf_p = buf + buf_size;
for (int i = f->slice_count - 1; i >= 0; i--) {
FFV1SliceContext *sc = &f->slices[i];
int trailer = 3 + 5*!!f->ec;
@@ -952,15 +958,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
if (f->ec) {
unsigned crc = av_crc(av_crc_get_table(AV_CRC_32_IEEE), f->crcref, buf_p, v);
if (crc != f->crcref) {
- int64_t ts = avpkt->pts != AV_NOPTS_VALUE ? avpkt->pts : avpkt->dts;
- av_log(f->avctx, AV_LOG_ERROR, "slice CRC mismatch %X!", crc);
- if (ts != AV_NOPTS_VALUE && avctx->pkt_timebase.num) {
- av_log(f->avctx, AV_LOG_ERROR, "at %f seconds\n", ts*av_q2d(avctx->pkt_timebase));
- } else if (ts != AV_NOPTS_VALUE) {
- av_log(f->avctx, AV_LOG_ERROR, "at %"PRId64"\n", ts);
- } else {
- av_log(f->avctx, AV_LOG_ERROR, "\n");
- }
+ av_log(f->avctx, AV_LOG_ERROR, "slice CRC mismatch %X!\n", crc);
slice_set_damaged(f, sc);
}
if (avctx->debug & FF_DEBUG_PICT_INFO) {
@@ -986,7 +984,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
for (int i = f->slice_count - 1; i >= 0; i--) {
FFV1SliceContext *sc = &f->slices[i];
if (sc->slice_damaged && f->last_picture.f) {
- const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
+ const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(f->pix_fmt);
const uint8_t *src[4];
uint8_t *dst[4];
ff_progress_frame_await(&f->last_picture, INT_MAX);
@@ -1003,22 +1001,99 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
av_image_copy(dst, p->linesize, src,
f->last_picture.f->linesize,
- avctx->pix_fmt,
+ f->pix_fmt,
sc->slice_width,
sc->slice_height);
f->slice_damaged[i] = 1;
}
}
+
+ return 0;
+}
+
+static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
+ int *got_frame, AVPacket *avpkt)
+{
+ FFV1Context *f = avctx->priv_data;
+ int ret;
+ AVFrame *p;
+ const FFHWAccel *hwaccel = NULL;
+
+ ff_progress_frame_unref(&f->last_picture);
+ av_refstruct_unref(&f->hwaccel_last_picture_private);
+ FFSWAP(ProgressFrame, f->picture, f->last_picture);
+ FFSWAP(void *, f->hwaccel_picture_private, f->hwaccel_last_picture_private);
+
+ f->avctx = avctx;
+ f->frame_damaged = 0;
+
+ ret = decode_header(avctx, avpkt->data, avpkt->size);
+ if (ret < 0)
+ return ret;
+
+ if (avctx->hwaccel)
+ hwaccel = ffhwaccel(avctx->hwaccel);
+
+ ret = ff_progress_frame_get_buffer(avctx, &f->picture,
+ AV_GET_BUFFER_FLAG_REF);
+ if (ret < 0)
+ return ret;
+
+ ret = ff_hwaccel_frame_priv_alloc(avctx, &f->hwaccel_picture_private);
+ if (ret < 0)
+ return ret;
+
+ p = f->picture.f;
+
+ p->pict_type = AV_PICTURE_TYPE_I; //FIXME I vs. P
+ p->flags = (p->flags & ~AV_FRAME_FLAG_KEY) | f->key_frame;
+
+ if (f->version < 3 && avctx->field_order > AV_FIELD_PROGRESSIVE) {
+ /* we have interlaced material flagged in container */
+ p->flags |= AV_FRAME_FLAG_INTERLACED;
+ if (avctx->field_order == AV_FIELD_TT || avctx->field_order == AV_FIELD_TB)
+ p->flags |= AV_FRAME_FLAG_TOP_FIELD_FIRST;
+ }
+
+ if (avctx->debug & FF_DEBUG_PICT_INFO)
+ av_log(avctx, AV_LOG_DEBUG, "ver:%d keyframe:%d coder:%d ec:%d slices:%d bps:%d\n",
+ f->version, !!(p->flags & AV_FRAME_FLAG_KEY), f->ac, f->ec, f->slice_count, f->avctx->bits_per_raw_sample);
+
+ /* Decode header */
+ if (hwaccel) {
+ ret = hwaccel->start_frame(avctx, avpkt->data, avpkt->size);
+ if (ret < 0)
+ return ret;
+ }
+
+ ff_thread_finish_setup(avctx);
+
+ /* Decode slices */
+ if (hwaccel)
+ ret = hwaccel->decode_slice(avctx, avpkt->data, avpkt->size);
+ else
+ ret = decode_slices(avctx, avpkt->data, avpkt->size);
+ if (ret < 0)
+ return ret;
+
+ /* Trailer */
+ if (hwaccel) {
+ ret = hwaccel->end_frame(avctx);
+ if (ret < 0)
+ return ret;
+ }
+
ff_progress_frame_report(&f->picture, INT_MAX);
ff_progress_frame_unref(&f->last_picture);
+ av_refstruct_unref(&f->hwaccel_last_picture_private);
if ((ret = av_frame_ref(rframe, f->picture.f)) < 0)
return ret;
*got_frame = 1;
- return buf_size;
+ return avpkt->size;
}
#if HAVE_THREADS
@@ -1084,7 +1159,10 @@ static av_cold int ffv1_decode_close(AVCodecContext *avctx)
FFV1Context *const s = avctx->priv_data;
ff_progress_frame_unref(&s->picture);
+ av_refstruct_unref(&s->hwaccel_picture_private);
+
ff_progress_frame_unref(&s->last_picture);
+ av_refstruct_unref(&s->hwaccel_last_picture_private);
return ff_ffv1_close(avctx);
}
@@ -1103,4 +1181,7 @@ const FFCodec ff_ffv1_decoder = {
AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_USES_PROGRESSFRAMES,
+ .hw_configs = (const AVCodecHWConfigInternal *const []) {
+ NULL
+ },
};
--
2.45.2
More information about the ffmpeg-devel
mailing list