[FFmpeg-devel] [PATCH] Make decoding alpha optional for some codecs.
Reimar Döffinger
Reimar.Doeffinger at gmx.de
Wed Sep 18 00:41:26 CEST 2013
For codecs where decoding of a whole plane can simply
be skipped, we should offer applications to not decode
alpha for better performance (ca. 30% less CPU usage
and 40% reduced memory bandwidth).
It also means applications do not need to implement support
(even if it is rather simple) for YUVA formats in order to be
able to play these files.
Tested by manually hacking avcodec_default_get_format,
suggestions for how to test in FATE welcome.
Signed-off-by: Reimar Döffinger <Reimar.Doeffinger at gmx.de>
---
Changelog | 2 ++
libavcodec/ffv1dec.c | 50 ++++++++++++++-------------------------------
libavcodec/internal.h | 11 ++++++++++
libavcodec/proresdec2.c | 6 +-----
libavcodec/proresdec_lgpl.c | 8 +++-----
libavcodec/utils.c | 36 ++++++++++++++++++++++++++++++++
libavcodec/vp56.c | 9 ++++----
7 files changed, 73 insertions(+), 49 deletions(-)
diff --git a/Changelog b/Changelog
index 3e4653b..a0543c8 100644
--- a/Changelog
+++ b/Changelog
@@ -25,6 +25,8 @@ version <next>
more consistent with other muxers.
- adelay filter
- pullup filter ported from libmpcodecs
+- make decoding alpha optional for prores, ffv1 and vp6 by chosing the
+ non-alpha format in the get_format callback.
version 2.0:
diff --git a/libavcodec/ffv1dec.c b/libavcodec/ffv1dec.c
index 87cc2ca..d60c081 100644
--- a/libavcodec/ffv1dec.c
+++ b/libavcodec/ffv1dec.c
@@ -657,49 +657,29 @@ static int read_header(FFV1Context *f)
}
} else if (f->avctx->bits_per_raw_sample <= 8 && f->transparency) {
switch(16*f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P; break;
+ case 0x00: ff_get_format_alpha(f->avctx, AV_PIX_FMT_YUVA444P, &f->transparency); break;
+ case 0x10: ff_get_format_alpha(f->avctx, AV_PIX_FMT_YUVA422P, &f->transparency); break;
+ case 0x11: ff_get_format_alpha(f->avctx, AV_PIX_FMT_YUVA420P, &f->transparency); break;
}
- } else if (f->avctx->bits_per_raw_sample == 9 && !f->transparency) {
+ } else if (f->avctx->bits_per_raw_sample == 9) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P9; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P9; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P9; break;
+ case 0x00: ff_get_format_alpha(f->avctx, AV_PIX_FMT_YUVA444P9, &f->transparency); break;
+ case 0x10: ff_get_format_alpha(f->avctx, AV_PIX_FMT_YUVA422P9, &f->transparency); break;
+ case 0x11: ff_get_format_alpha(f->avctx, AV_PIX_FMT_YUVA420P9, &f->transparency); break;
}
- } else if (f->avctx->bits_per_raw_sample == 9 && f->transparency) {
+ } else if (f->avctx->bits_per_raw_sample == 10) {
f->packed_at_lsb = 1;
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P9; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P9; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P9; break;
+ case 0x00: ff_get_format_alpha(f->avctx, AV_PIX_FMT_YUVA444P10, &f->transparency); break;
+ case 0x10: ff_get_format_alpha(f->avctx, AV_PIX_FMT_YUVA422P10, &f->transparency); break;
+ case 0x11: ff_get_format_alpha(f->avctx, AV_PIX_FMT_YUVA420P10, &f->transparency); break;
}
- } else if (f->avctx->bits_per_raw_sample == 10 && !f->transparency) {
- f->packed_at_lsb = 1;
- switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P10; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P10; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P10; break;
- }
- } else if (f->avctx->bits_per_raw_sample == 10 && f->transparency) {
- f->packed_at_lsb = 1;
- switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P10; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P10; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P10; break;
- }
- } else if (f->avctx->bits_per_raw_sample == 16 && !f->transparency){
- switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUV444P16; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUV422P16; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUV420P16; break;
- }
- } else if (f->avctx->bits_per_raw_sample == 16 && f->transparency){
+ } else if (f->avctx->bits_per_raw_sample == 16){
switch(16 * f->chroma_h_shift + f->chroma_v_shift) {
- case 0x00: f->avctx->pix_fmt = AV_PIX_FMT_YUVA444P16; break;
- case 0x10: f->avctx->pix_fmt = AV_PIX_FMT_YUVA422P16; break;
- case 0x11: f->avctx->pix_fmt = AV_PIX_FMT_YUVA420P16; break;
+ case 0x00: ff_get_format_alpha(f->avctx, AV_PIX_FMT_YUVA444P16, &f->transparency); break;
+ case 0x10: ff_get_format_alpha(f->avctx, AV_PIX_FMT_YUVA422P16, &f->transparency); break;
+ case 0x11: ff_get_format_alpha(f->avctx, AV_PIX_FMT_YUVA420P16, &f->transparency); break;
}
}
} else if (f->colorspace == 1) {
diff --git a/libavcodec/internal.h b/libavcodec/internal.h
index 96976e1..2b5a691 100644
--- a/libavcodec/internal.h
+++ b/libavcodec/internal.h
@@ -225,4 +225,15 @@ const uint8_t *avpriv_find_start_code(const uint8_t *p,
const uint8_t *end,
uint32_t *state);
+/**
+ * Helper function to decide whether to decode alpha or not.
+ * Sets avctx->pix_fmt to the chosen format.
+ *
+ * @param alpha Must be initialized to not-0 if the format can support alpha, 0 otherwise.
+ * If set to 0, the non-alpha format corresponding to fmt will be used.
+ * Will be reset to 0 if user selected non-alpha format.
+ * @param fmt a format with alpha component.
+ */
+void ff_get_format_alpha(AVCodecContext *avctx, enum AVPixelFormat fmt, int *alpha);
+
#endif /* AVCODEC_INTERNAL_H */
diff --git a/libavcodec/proresdec2.c b/libavcodec/proresdec2.c
index 9a8861c..ce0e3c1 100644
--- a/libavcodec/proresdec2.c
+++ b/libavcodec/proresdec2.c
@@ -108,11 +108,7 @@ static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
ctx->frame->top_field_first = ctx->frame_type == 1;
}
- if (ctx->alpha_info) {
- avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P10 : AV_PIX_FMT_YUVA422P10;
- } else {
- avctx->pix_fmt = (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUV444P10 : AV_PIX_FMT_YUV422P10;
- }
+ ff_get_format_alpha(avctx, (buf[12] & 0xC0) == 0xC0 ? AV_PIX_FMT_YUVA444P10 : AV_PIX_FMT_YUVA422P10, &ctx->alpha_info);
ptr = buf + 20;
flags = buf[19];
diff --git a/libavcodec/proresdec_lgpl.c b/libavcodec/proresdec_lgpl.c
index 2fef2c6..96c649b 100644
--- a/libavcodec/proresdec_lgpl.c
+++ b/libavcodec/proresdec_lgpl.c
@@ -143,12 +143,10 @@ static int decode_frame_header(ProresContext *ctx, const uint8_t *buf,
switch (ctx->chroma_factor) {
case 2:
- avctx->pix_fmt = ctx->alpha_info ? AV_PIX_FMT_YUVA422P10
- : AV_PIX_FMT_YUV422P10;
+ ff_get_format_alpha(avctx, AV_PIX_FMT_YUVA422P10, &ctx->alpha_info);
break;
case 3:
- avctx->pix_fmt = ctx->alpha_info ? AV_PIX_FMT_YUVA444P10
- : AV_PIX_FMT_YUV444P10;
+ ff_get_format_alpha(avctx, AV_PIX_FMT_YUVA444P10, &ctx->alpha_info);
break;
default:
av_log(avctx, AV_LOG_ERROR,
@@ -609,7 +607,7 @@ static int decode_slice(AVCodecContext *avctx, void *tdata)
coff[2] = coff[1] + u_data_size;
v_data_size = hdr_size > 7 ? AV_RB16(buf + 6) : slice_data_size - coff[2];
coff[3] = coff[2] + v_data_size;
- a_data_size = slice_data_size - coff[3];
+ a_data_size = ctx->alpha_info ? slice_data_size - coff[3] : 0;
/* if V or alpha component size is negative that means that previous
component sizes are too large */
diff --git a/libavcodec/utils.c b/libavcodec/utils.c
index 92b6443..4b1ebb9 100644
--- a/libavcodec/utils.c
+++ b/libavcodec/utils.c
@@ -3334,3 +3334,39 @@ const uint8_t *avpriv_find_start_code(const uint8_t *av_restrict p,
return p + 4;
}
+
+void ff_get_format_alpha(AVCodecContext *avctx, enum AVPixelFormat fmt, int *alpha)
+{
+ enum AVPixelFormat base_fmt = AV_PIX_FMT_NONE;
+ switch (fmt) {
+ case AV_PIX_FMT_YUVA420P: base_fmt = AV_PIX_FMT_YUV420P; break;
+ case AV_PIX_FMT_YUVA420P9BE: base_fmt = AV_PIX_FMT_YUV420P9BE; break;
+ case AV_PIX_FMT_YUVA420P9LE: base_fmt = AV_PIX_FMT_YUV420P9LE; break;
+ case AV_PIX_FMT_YUVA422P9BE: base_fmt = AV_PIX_FMT_YUV422P9BE; break;
+ case AV_PIX_FMT_YUVA422P9LE: base_fmt = AV_PIX_FMT_YUV422P9LE; break;
+ case AV_PIX_FMT_YUVA444P9BE: base_fmt = AV_PIX_FMT_YUV444P9BE; break;
+ case AV_PIX_FMT_YUVA444P9LE: base_fmt = AV_PIX_FMT_YUV444P9LE; break;
+ case AV_PIX_FMT_YUVA420P10BE: base_fmt = AV_PIX_FMT_YUV420P10BE; break;
+ case AV_PIX_FMT_YUVA420P10LE: base_fmt = AV_PIX_FMT_YUV420P10LE; break;
+ case AV_PIX_FMT_YUVA422P10BE: base_fmt = AV_PIX_FMT_YUV422P10BE; break;
+ case AV_PIX_FMT_YUVA422P10LE: base_fmt = AV_PIX_FMT_YUV422P10LE; break;
+ case AV_PIX_FMT_YUVA444P10BE: base_fmt = AV_PIX_FMT_YUV444P10BE; break;
+ case AV_PIX_FMT_YUVA444P10LE: base_fmt = AV_PIX_FMT_YUV444P10LE; break;
+ case AV_PIX_FMT_YUVA420P16BE: base_fmt = AV_PIX_FMT_YUV420P16BE; break;
+ case AV_PIX_FMT_YUVA420P16LE: base_fmt = AV_PIX_FMT_YUV420P16LE; break;
+ case AV_PIX_FMT_YUVA422P16BE: base_fmt = AV_PIX_FMT_YUV422P16BE; break;
+ case AV_PIX_FMT_YUVA422P16LE: base_fmt = AV_PIX_FMT_YUV422P16LE; break;
+ case AV_PIX_FMT_YUVA444P16BE: base_fmt = AV_PIX_FMT_YUV444P16BE; break;
+ case AV_PIX_FMT_YUVA444P16LE: base_fmt = AV_PIX_FMT_YUV444P16LE; break;
+ case AV_PIX_FMT_YUVA444P: base_fmt = AV_PIX_FMT_YUV444P; break;
+ case AV_PIX_FMT_YUVA422P: base_fmt = AV_PIX_FMT_YUV422P; break;
+ }
+ av_assert0(base_fmt != AV_PIX_FMT_NONE);
+ if (*alpha) {
+ enum AVPixelFormat fmts[3] = {fmt, base_fmt, AV_PIX_FMT_NONE};
+ avctx->pix_fmt = avctx->get_format(avctx, fmts);
+ if (avctx->pix_fmt == base_fmt)
+ *alpha = 0;
+ } else
+ avctx->pix_fmt = base_fmt;
+}
diff --git a/libavcodec/vp56.c b/libavcodec/vp56.c
index 25801ea..a469356 100644
--- a/libavcodec/vp56.c
+++ b/libavcodec/vp56.c
@@ -530,7 +530,7 @@ int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if (ff_get_buffer(avctx, p, AV_GET_BUFFER_FLAG_REF) < 0)
return -1;
- if (s->has_alpha) {
+ if (avctx->pix_fmt == AV_PIX_FMT_YUVA420P) {
av_frame_unref(s->alpha_context->frames[VP56_FRAME_CURRENT]);
if ((ret = av_frame_ref(s->alpha_context->frames[VP56_FRAME_CURRENT], p)) < 0) {
av_frame_unref(p);
@@ -545,7 +545,7 @@ int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
}
}
- if (s->has_alpha) {
+ if (avctx->pix_fmt == AV_PIX_FMT_YUVA420P) {
int bak_w = avctx->width;
int bak_h = avctx->height;
int bak_cw = avctx->coded_width;
@@ -567,7 +567,7 @@ int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
}
}
- avctx->execute2(avctx, ff_vp56_decode_mbs, 0, 0, s->has_alpha + 1);
+ avctx->execute2(avctx, ff_vp56_decode_mbs, 0, 0, avctx->pix_fmt == AV_PIX_FMT_YUVA420P ? 2 : 1);
if ((res = av_frame_ref(data, p)) < 0)
return res;
@@ -687,9 +687,10 @@ av_cold int ff_vp56_init_context(AVCodecContext *avctx, VP56Context *s,
int flip, int has_alpha)
{
int i;
+ int has_alpha_tmp = has_alpha;
s->avctx = avctx;
- avctx->pix_fmt = has_alpha ? AV_PIX_FMT_YUVA420P : AV_PIX_FMT_YUV420P;
+ ff_get_format_alpha(avctx, AV_PIX_FMT_YUVA420P, &has_alpha_tmp);
ff_h264chroma_init(&s->h264chroma, 8);
ff_hpeldsp_init(&s->hdsp, avctx->flags);
--
1.8.4.rc3
More information about the ffmpeg-devel
mailing list