[FFmpeg-devel] [PATCH 1/4] libavcodec/vc2enc: Split out common functions between software and hardware encoders
IndecisiveTurtle
geoster3d at gmail.com
Sat Mar 8 15:52:16 EET 2025
Hello Andreas
Thank you for your feedback. I will resubmit the patch with the proper
implementation you described.
Could you elaborate on the array duplication comment? I don't see any other
place it's defined
Στις Σάβ 8 Μαρ 2025 στις 3:14 μ.μ., ο/η Andreas Rheinhardt <
andreas.rheinhardt at outlook.com> έγραψε:
> IndecisiveTurtle:
> > From: IndecisiveTurtle <geoster3d at gmail.com>
> >
> > ---
> > libavcodec/Makefile | 2 +-
> > libavcodec/vc2enc.c | 515 +------------------------------------
> > libavcodec/vc2enc_common.c | 321 +++++++++++++++++++++++
> > libavcodec/vc2enc_common.h | 323 +++++++++++++++++++++++
> > 4 files changed, 653 insertions(+), 508 deletions(-)
> > create mode 100644 libavcodec/vc2enc_common.c
> > create mode 100644 libavcodec/vc2enc_common.h
> >
> > diff --git a/libavcodec/Makefile b/libavcodec/Makefile
> > index 499f826635..a96c700745 100644
> > --- a/libavcodec/Makefile
> > +++ b/libavcodec/Makefile
> > @@ -768,7 +768,7 @@ OBJS-$(CONFIG_VC1_CUVID_DECODER) += cuviddec.o
> > OBJS-$(CONFIG_VC1_MMAL_DECODER) += mmaldec.o
> > OBJS-$(CONFIG_VC1_QSV_DECODER) += qsvdec.o
> > OBJS-$(CONFIG_VC1_V4L2M2M_DECODER) += v4l2_m2m_dec.o
> > -OBJS-$(CONFIG_VC2_ENCODER) += vc2enc.o vc2enc_dwt.o
> diractab.o
> > +OBJS-$(CONFIG_VC2_ENCODER) += vc2enc.o vc2enc_dwt.o
> vc2enc_common.o diractab.o
> > OBJS-$(CONFIG_VCR1_DECODER) += vcr1.o
> > OBJS-$(CONFIG_VMDAUDIO_DECODER) += vmdaudio.o
> > OBJS-$(CONFIG_VMDVIDEO_DECODER) += vmdvideo.o
> > diff --git a/libavcodec/vc2enc.c b/libavcodec/vc2enc.c
> > index b82370a753..712d0cf68a 100644
> > --- a/libavcodec/vc2enc.c
> > +++ b/libavcodec/vc2enc.c
> > @@ -29,506 +29,7 @@
> > #include "put_bits.h"
> > #include "version.h"
> >
> > -#include "vc2enc_dwt.h"
> > -#include "diractab.h"
> > -
> > -/* The limited size resolution of each slice forces us to do this */
> > -#define SSIZE_ROUND(b) (FFALIGN((b), s->size_scaler) + 4 +
> s->prefix_bytes)
> > -
> > -/* Decides the cutoff point in # of slices to distribute the leftover
> bytes */
> > -#define SLICE_REDIST_TOTAL 150
> > -
> > -typedef struct VC2BaseVideoFormat {
> > - enum AVPixelFormat pix_fmt;
> > - AVRational time_base;
> > - int width, height;
> > - uint8_t interlaced, level;
> > - char name[13];
> > -} VC2BaseVideoFormat;
> > -
> > -static const VC2BaseVideoFormat base_video_fmts[] = {
> > - { 0 }, /* Custom format, here just to make indexing equal to
> base_vf */
> > - { AV_PIX_FMT_YUV420P, { 1001, 15000 }, 176, 120, 0, 1,
> "QSIF525" },
> > - { AV_PIX_FMT_YUV420P, { 2, 25 }, 176, 144, 0, 1,
> "QCIF" },
> > - { AV_PIX_FMT_YUV420P, { 1001, 15000 }, 352, 240, 0, 1,
> "SIF525" },
> > - { AV_PIX_FMT_YUV420P, { 2, 25 }, 352, 288, 0, 1,
> "CIF" },
> > - { AV_PIX_FMT_YUV420P, { 1001, 15000 }, 704, 480, 0, 1,
> "4SIF525" },
> > - { AV_PIX_FMT_YUV420P, { 2, 25 }, 704, 576, 0, 1,
> "4CIF" },
> > -
> > - { AV_PIX_FMT_YUV422P10, { 1001, 30000 }, 720, 480, 1, 2,
> "SD480I-60" },
> > - { AV_PIX_FMT_YUV422P10, { 1, 25 }, 720, 576, 1, 2,
> "SD576I-50" },
> > -
> > - { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 1280, 720, 0, 3,
> "HD720P-60" },
> > - { AV_PIX_FMT_YUV422P10, { 1, 50 }, 1280, 720, 0, 3,
> "HD720P-50" },
> > - { AV_PIX_FMT_YUV422P10, { 1001, 30000 }, 1920, 1080, 1, 3,
> "HD1080I-60" },
> > - { AV_PIX_FMT_YUV422P10, { 1, 25 }, 1920, 1080, 1, 3,
> "HD1080I-50" },
> > - { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 1920, 1080, 0, 3,
> "HD1080P-60" },
> > - { AV_PIX_FMT_YUV422P10, { 1, 50 }, 1920, 1080, 0, 3,
> "HD1080P-50" },
> > -
> > - { AV_PIX_FMT_YUV444P12, { 1, 24 }, 2048, 1080, 0, 4,
> "DC2K" },
> > - { AV_PIX_FMT_YUV444P12, { 1, 24 }, 4096, 2160, 0, 5,
> "DC4K" },
> > -
> > - { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 3840, 2160, 0, 6, "UHDTV
> 4K-60" },
> > - { AV_PIX_FMT_YUV422P10, { 1, 50 }, 3840, 2160, 0, 6, "UHDTV
> 4K-50" },
> > -
> > - { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 7680, 4320, 0, 7, "UHDTV
> 8K-60" },
> > - { AV_PIX_FMT_YUV422P10, { 1, 50 }, 7680, 4320, 0, 7, "UHDTV
> 8K-50" },
> > -
> > - { AV_PIX_FMT_YUV422P10, { 1001, 24000 }, 1920, 1080, 0, 3,
> "HD1080P-24" },
> > - { AV_PIX_FMT_YUV422P10, { 1001, 30000 }, 720, 486, 1, 2, "SD
> Pro486" },
> > -};
> > -static const int base_video_fmts_len = FF_ARRAY_ELEMS(base_video_fmts);
> > -
> > -enum VC2_QM {
> > - VC2_QM_DEF = 0,
> > - VC2_QM_COL,
> > - VC2_QM_FLAT,
> > -
> > - VC2_QM_NB
> > -};
> > -
> > -typedef struct SubBand {
> > - dwtcoef *buf;
> > - ptrdiff_t stride;
> > - int width;
> > - int height;
> > -} SubBand;
> > -
> > -typedef struct Plane {
> > - SubBand band[MAX_DWT_LEVELS][4];
> > - dwtcoef *coef_buf;
> > - int width;
> > - int height;
> > - int dwt_width;
> > - int dwt_height;
> > - ptrdiff_t coef_stride;
> > -} Plane;
> > -
> > -typedef struct SliceArgs {
> > - const struct VC2EncContext *ctx;
> > - union {
> > - int cache[DIRAC_MAX_QUANT_INDEX];
> > - uint8_t *buf;
> > - };
> > - int x;
> > - int y;
> > - int quant_idx;
> > - int bits_ceil;
> > - int bits_floor;
> > - int bytes;
> > -} SliceArgs;
> > -
> > -typedef struct TransformArgs {
> > - const struct VC2EncContext *ctx;
> > - Plane *plane;
> > - const void *idata;
> > - ptrdiff_t istride;
> > - int field;
> > - VC2TransformContext t;
> > -} TransformArgs;
> > -
> > -typedef struct VC2EncContext {
> > - AVClass *av_class;
> > - PutBitContext pb;
> > - Plane plane[3];
> > - AVCodecContext *avctx;
> > - DiracVersionInfo ver;
> > -
> > - SliceArgs *slice_args;
> > - TransformArgs transform_args[3];
> > -
> > - /* For conversion from unsigned pixel values to signed */
> > - int diff_offset;
> > - int bpp;
> > - int bpp_idx;
> > -
> > - /* Picture number */
> > - uint32_t picture_number;
> > -
> > - /* Base video format */
> > - int base_vf;
> > - int level;
> > - int profile;
> > -
> > - /* Quantization matrix */
> > - uint8_t quant[MAX_DWT_LEVELS][4];
> > - int custom_quant_matrix;
> > -
> > - /* Division LUT */
> > - uint32_t qmagic_lut[116][2];
> > -
> > - int num_x; /* #slices horizontally */
> > - int num_y; /* #slices vertically */
> > - int prefix_bytes;
> > - int size_scaler;
> > - int chroma_x_shift;
> > - int chroma_y_shift;
> > -
> > - /* Rate control stuff */
> > - int frame_max_bytes;
> > - int slice_max_bytes;
> > - int slice_min_bytes;
> > - int q_ceil;
> > - int q_avg;
> > -
> > - /* Options */
> > - double tolerance;
> > - int wavelet_idx;
> > - int wavelet_depth;
> > - int strict_compliance;
> > - int slice_height;
> > - int slice_width;
> > - int interlaced;
> > - enum VC2_QM quant_matrix;
> > -
> > - /* Parse code state */
> > - uint32_t next_parse_offset;
> > - enum DiracParseCodes last_parse_code;
> > -} VC2EncContext;
> > -
> > -static av_always_inline void put_vc2_ue_uint(PutBitContext *pb,
> uint32_t val)
> > -{
> > - int i;
> > - int bits = 0;
> > - unsigned topbit = 1, maxval = 1;
> > - uint64_t pbits = 0;
> > -
> > - if (!val++) {
> > - put_bits(pb, 1, 1);
> > - return;
> > - }
> > -
> > - while (val > maxval) {
> > - topbit <<= 1;
> > - maxval <<= 1;
> > - maxval |= 1;
> > - }
> > -
> > - bits = ff_log2(topbit);
> > -
> > - for (i = 0; i < bits; i++) {
> > - topbit >>= 1;
> > - av_assert2(pbits <= UINT64_MAX>>3);
> > - pbits <<= 2;
> > - if (val & topbit)
> > - pbits |= 0x1;
> > - }
> > -
> > - put_bits64(pb, bits*2 + 1, (pbits << 1) | 1);
> > -}
> > -
> > -static av_always_inline int count_vc2_ue_uint(uint32_t val)
> > -{
> > - int topbit = 1, maxval = 1;
> > -
> > - if (!val++)
> > - return 1;
> > -
> > - while (val > maxval) {
> > - topbit <<= 1;
> > - maxval <<= 1;
> > - maxval |= 1;
> > - }
> > -
> > - return ff_log2(topbit)*2 + 1;
> > -}
> > -
> > -/* VC-2 10.4 - parse_info() */
> > -static void encode_parse_info(VC2EncContext *s, enum DiracParseCodes
> pcode)
> > -{
> > - uint32_t cur_pos, dist;
> > -
> > - align_put_bits(&s->pb);
> > -
> > - cur_pos = put_bytes_count(&s->pb, 0);
> > -
> > - /* Magic string */
> > - ff_put_string(&s->pb, "BBCD", 0);
> > -
> > - /* Parse code */
> > - put_bits(&s->pb, 8, pcode);
> > -
> > - /* Next parse offset */
> > - dist = cur_pos - s->next_parse_offset;
> > - AV_WB32(s->pb.buf + s->next_parse_offset + 5, dist);
> > - s->next_parse_offset = cur_pos;
> > - put_bits32(&s->pb, pcode == DIRAC_PCODE_END_SEQ ? 13 : 0);
> > -
> > - /* Last parse offset */
> > - put_bits32(&s->pb, s->last_parse_code == DIRAC_PCODE_END_SEQ ? 13 :
> dist);
> > -
> > - s->last_parse_code = pcode;
> > -}
> > -
> > -/* VC-2 11.1 - parse_parameters()
> > - * The level dictates what the decoder should expect in terms of
> resolution
> > - * and allows it to quickly reject whatever it can't support. Remember,
> > - * this codec kinda targets cheapo FPGAs without much memory.
> Unfortunately
> > - * it also limits us greatly in our choice of formats, hence the flag
> to disable
> > - * strict_compliance */
> > -static void encode_parse_params(VC2EncContext *s)
> > -{
> > - put_vc2_ue_uint(&s->pb, s->ver.major); /* VC-2 demands this to be 2
> */
> > - put_vc2_ue_uint(&s->pb, s->ver.minor); /* ^^ and this to be 0
> */
> > - put_vc2_ue_uint(&s->pb, s->profile); /* 3 to signal HQ profile
> */
> > - put_vc2_ue_uint(&s->pb, s->level); /* 3 - 1080/720, 6 - 4K
> */
> > -}
> > -
> > -/* VC-2 11.3 - frame_size() */
> > -static void encode_frame_size(VC2EncContext *s)
> > -{
> > - put_bits(&s->pb, 1, !s->strict_compliance);
> > - if (!s->strict_compliance) {
> > - AVCodecContext *avctx = s->avctx;
> > - put_vc2_ue_uint(&s->pb, avctx->width);
> > - put_vc2_ue_uint(&s->pb, avctx->height);
> > - }
> > -}
> > -
> > -/* VC-2 11.3.3 - color_diff_sampling_format() */
> > -static void encode_sample_fmt(VC2EncContext *s)
> > -{
> > - put_bits(&s->pb, 1, !s->strict_compliance);
> > - if (!s->strict_compliance) {
> > - int idx;
> > - if (s->chroma_x_shift == 1 && s->chroma_y_shift == 0)
> > - idx = 1; /* 422 */
> > - else if (s->chroma_x_shift == 1 && s->chroma_y_shift == 1)
> > - idx = 2; /* 420 */
> > - else
> > - idx = 0; /* 444 */
> > - put_vc2_ue_uint(&s->pb, idx);
> > - }
> > -}
> > -
> > -/* VC-2 11.3.4 - scan_format() */
> > -static void encode_scan_format(VC2EncContext *s)
> > -{
> > - put_bits(&s->pb, 1, !s->strict_compliance);
> > - if (!s->strict_compliance)
> > - put_vc2_ue_uint(&s->pb, s->interlaced);
> > -}
> > -
> > -/* VC-2 11.3.5 - frame_rate() */
> > -static void encode_frame_rate(VC2EncContext *s)
> > -{
> > - put_bits(&s->pb, 1, !s->strict_compliance);
> > - if (!s->strict_compliance) {
> > - AVCodecContext *avctx = s->avctx;
> > - put_vc2_ue_uint(&s->pb, 0);
> > - put_vc2_ue_uint(&s->pb, avctx->time_base.den);
> > - put_vc2_ue_uint(&s->pb, avctx->time_base.num);
> > - }
> > -}
> > -
> > -/* VC-2 11.3.6 - aspect_ratio() */
> > -static void encode_aspect_ratio(VC2EncContext *s)
> > -{
> > - put_bits(&s->pb, 1, !s->strict_compliance);
> > - if (!s->strict_compliance) {
> > - AVCodecContext *avctx = s->avctx;
> > - put_vc2_ue_uint(&s->pb, 0);
> > - put_vc2_ue_uint(&s->pb, avctx->sample_aspect_ratio.num);
> > - put_vc2_ue_uint(&s->pb, avctx->sample_aspect_ratio.den);
> > - }
> > -}
> > -
> > -/* VC-2 11.3.7 - clean_area() */
> > -static void encode_clean_area(VC2EncContext *s)
> > -{
> > - put_bits(&s->pb, 1, 0);
> > -}
> > -
> > -/* VC-2 11.3.8 - signal_range() */
> > -static void encode_signal_range(VC2EncContext *s)
> > -{
> > - put_bits(&s->pb, 1, !s->strict_compliance);
> > - if (!s->strict_compliance)
> > - put_vc2_ue_uint(&s->pb, s->bpp_idx);
> > -}
> > -
> > -/* VC-2 11.3.9 - color_spec() */
> > -static void encode_color_spec(VC2EncContext *s)
> > -{
> > - AVCodecContext *avctx = s->avctx;
> > - put_bits(&s->pb, 1, !s->strict_compliance);
> > - if (!s->strict_compliance) {
> > - int val;
> > - put_vc2_ue_uint(&s->pb, 0);
> > -
> > - /* primaries */
> > - put_bits(&s->pb, 1, 1);
> > - if (avctx->color_primaries == AVCOL_PRI_BT470BG)
> > - val = 2;
> > - else if (avctx->color_primaries == AVCOL_PRI_SMPTE170M)
> > - val = 1;
> > - else if (avctx->color_primaries == AVCOL_PRI_SMPTE240M)
> > - val = 1;
> > - else
> > - val = 0;
> > - put_vc2_ue_uint(&s->pb, val);
> > -
> > - /* color matrix */
> > - put_bits(&s->pb, 1, 1);
> > - if (avctx->colorspace == AVCOL_SPC_RGB)
> > - val = 3;
> > - else if (avctx->colorspace == AVCOL_SPC_YCOCG)
> > - val = 2;
> > - else if (avctx->colorspace == AVCOL_SPC_BT470BG)
> > - val = 1;
> > - else
> > - val = 0;
> > - put_vc2_ue_uint(&s->pb, val);
> > -
> > - /* transfer function */
> > - put_bits(&s->pb, 1, 1);
> > - if (avctx->color_trc == AVCOL_TRC_LINEAR)
> > - val = 2;
> > - else if (avctx->color_trc == AVCOL_TRC_BT1361_ECG)
> > - val = 1;
> > - else
> > - val = 0;
> > - put_vc2_ue_uint(&s->pb, val);
> > - }
> > -}
> > -
> > -/* VC-2 11.3 - source_parameters() */
> > -static void encode_source_params(VC2EncContext *s)
> > -{
> > - encode_frame_size(s);
> > - encode_sample_fmt(s);
> > - encode_scan_format(s);
> > - encode_frame_rate(s);
> > - encode_aspect_ratio(s);
> > - encode_clean_area(s);
> > - encode_signal_range(s);
> > - encode_color_spec(s);
> > -}
> > -
> > -/* VC-2 11 - sequence_header() */
> > -static void encode_seq_header(VC2EncContext *s)
> > -{
> > - align_put_bits(&s->pb);
> > - encode_parse_params(s);
> > - put_vc2_ue_uint(&s->pb, s->base_vf);
> > - encode_source_params(s);
> > - put_vc2_ue_uint(&s->pb, s->interlaced); /* Frames or fields coding
> */
> > -}
> > -
> > -/* VC-2 12.1 - picture_header() */
> > -static void encode_picture_header(VC2EncContext *s)
> > -{
> > - align_put_bits(&s->pb);
> > - put_bits32(&s->pb, s->picture_number++);
> > -}
> > -
> > -/* VC-2 12.3.4.1 - slice_parameters() */
> > -static void encode_slice_params(VC2EncContext *s)
> > -{
> > - put_vc2_ue_uint(&s->pb, s->num_x);
> > - put_vc2_ue_uint(&s->pb, s->num_y);
> > - put_vc2_ue_uint(&s->pb, s->prefix_bytes);
> > - put_vc2_ue_uint(&s->pb, s->size_scaler);
> > -}
> > -
> > -/* 1st idx = LL, second - vertical, third - horizontal, fourth - total
> */
> > -static const uint8_t vc2_qm_col_tab[][4] = {
> > - {20, 9, 15, 4},
> > - { 0, 6, 6, 4},
> > - { 0, 3, 3, 5},
> > - { 0, 3, 5, 1},
> > - { 0, 11, 10, 11}
> > -};
> > -
> > -static const uint8_t vc2_qm_flat_tab[][4] = {
> > - { 0, 0, 0, 0},
> > - { 0, 0, 0, 0},
> > - { 0, 0, 0, 0},
> > - { 0, 0, 0, 0},
> > - { 0, 0, 0, 0}
> > -};
> > -
> > -static void init_quant_matrix(VC2EncContext *s)
> > -{
> > - int level, orientation;
> > -
> > - if (s->wavelet_depth <= 4 && s->quant_matrix == VC2_QM_DEF) {
> > - s->custom_quant_matrix = 0;
> > - for (level = 0; level < s->wavelet_depth; level++) {
> > - s->quant[level][0] =
> ff_dirac_default_qmat[s->wavelet_idx][level][0];
> > - s->quant[level][1] =
> ff_dirac_default_qmat[s->wavelet_idx][level][1];
> > - s->quant[level][2] =
> ff_dirac_default_qmat[s->wavelet_idx][level][2];
> > - s->quant[level][3] =
> ff_dirac_default_qmat[s->wavelet_idx][level][3];
> > - }
> > - return;
> > - }
> > -
> > - s->custom_quant_matrix = 1;
> > -
> > - if (s->quant_matrix == VC2_QM_DEF) {
> > - for (level = 0; level < s->wavelet_depth; level++) {
> > - for (orientation = 0; orientation < 4; orientation++) {
> > - if (level <= 3)
> > - s->quant[level][orientation] =
> ff_dirac_default_qmat[s->wavelet_idx][level][orientation];
> > - else
> > - s->quant[level][orientation] =
> vc2_qm_col_tab[level][orientation];
> > - }
> > - }
> > - } else if (s->quant_matrix == VC2_QM_COL) {
> > - for (level = 0; level < s->wavelet_depth; level++) {
> > - for (orientation = 0; orientation < 4; orientation++) {
> > - s->quant[level][orientation] =
> vc2_qm_col_tab[level][orientation];
> > - }
> > - }
> > - } else {
> > - for (level = 0; level < s->wavelet_depth; level++) {
> > - for (orientation = 0; orientation < 4; orientation++) {
> > - s->quant[level][orientation] =
> vc2_qm_flat_tab[level][orientation];
> > - }
> > - }
> > - }
> > -}
> > -
> > -/* VC-2 12.3.4.2 - quant_matrix() */
> > -static void encode_quant_matrix(VC2EncContext *s)
> > -{
> > - int level;
> > - put_bits(&s->pb, 1, s->custom_quant_matrix);
> > - if (s->custom_quant_matrix) {
> > - put_vc2_ue_uint(&s->pb, s->quant[0][0]);
> > - for (level = 0; level < s->wavelet_depth; level++) {
> > - put_vc2_ue_uint(&s->pb, s->quant[level][1]);
> > - put_vc2_ue_uint(&s->pb, s->quant[level][2]);
> > - put_vc2_ue_uint(&s->pb, s->quant[level][3]);
> > - }
> > - }
> > -}
> > -
> > -/* VC-2 12.3 - transform_parameters() */
> > -static void encode_transform_params(VC2EncContext *s)
> > -{
> > - put_vc2_ue_uint(&s->pb, s->wavelet_idx);
> > - put_vc2_ue_uint(&s->pb, s->wavelet_depth);
> > -
> > - encode_slice_params(s);
> > - encode_quant_matrix(s);
> > -}
> > -
> > -/* VC-2 12.2 - wavelet_transform() */
> > -static void encode_wavelet_transform(VC2EncContext *s)
> > -{
> > - encode_transform_params(s);
> > - align_put_bits(&s->pb);
> > -}
> > -
> > -/* VC-2 12 - picture_parse() */
> > -static void encode_picture_start(VC2EncContext *s)
> > -{
> > - align_put_bits(&s->pb);
> > - encode_picture_header(s);
> > - align_put_bits(&s->pb);
> > - encode_wavelet_transform(s);
> > -}
> > +#include "vc2enc_common.h"
> >
> > #define QUANT(c, mul, add, shift) (((mul) * (c) + (add)) >> (shift))
> >
> > @@ -658,7 +159,7 @@ static int calc_slice_sizes(VC2EncContext *s)
> > SliceArgs *enc_args = s->slice_args;
> > SliceArgs *top_loc[SLICE_REDIST_TOTAL] = {NULL};
> >
> > - init_quant_matrix(s);
> > + ff_vc2_init_quant_matrix(s);
> >
> > for (slice_y = 0; slice_y < s->num_y; slice_y++) {
> > for (slice_x = 0; slice_x < s->num_x; slice_x++) {
> > @@ -931,24 +432,24 @@ static int encode_frame(VC2EncContext *s, AVPacket
> *avpkt, const AVFrame *frame,
> > }
> >
> > /* Sequence header */
> > - encode_parse_info(s, DIRAC_PCODE_SEQ_HEADER);
> > - encode_seq_header(s);
> > + ff_vc2_encode_parse_info(s, DIRAC_PCODE_SEQ_HEADER);
> > + ff_vc2_encode_seq_header(s);
> >
> > /* Encoder version */
> > if (aux_data) {
> > - encode_parse_info(s, DIRAC_PCODE_AUX);
> > + ff_vc2_encode_parse_info(s, DIRAC_PCODE_AUX);
> > ff_put_string(&s->pb, aux_data, 1);
> > }
> >
> > /* Picture header */
> > - encode_parse_info(s, DIRAC_PCODE_PICTURE_HQ);
> > - encode_picture_start(s);
> > + ff_vc2_encode_parse_info(s, DIRAC_PCODE_PICTURE_HQ);
> > + ff_vc2_encode_picture_start(s);
> >
> > /* Encode slices */
> > encode_slices(s);
> >
> > /* End sequence */
> > - encode_parse_info(s, DIRAC_PCODE_END_SEQ);
> > + ff_vc2_encode_parse_info(s, DIRAC_PCODE_END_SEQ);
> >
> > return 0;
> > }
> > diff --git a/libavcodec/vc2enc_common.c b/libavcodec/vc2enc_common.c
> > new file mode 100644
> > index 0000000000..2f40814f5a
> > --- /dev/null
> > +++ b/libavcodec/vc2enc_common.c
> > @@ -0,0 +1,321 @@
> > +/*
> > + * Copyright (C) 2016 Open Broadcast Systems Ltd.
> > + * Author 2016 Rostislav Pehlivanov <atomnuker at gmail.com>
> > + *
> > + * This file is part of FFmpeg.
> > + *
> > + * FFmpeg is free software; you can redistribute it and/or
> > + * modify it under the terms of the GNU Lesser General Public
> > + * License as published by the Free Software Foundation; either
> > + * version 2.1 of the License, or (at your option) any later version.
> > + *
> > + * FFmpeg is distributed in the hope that it will be useful,
> > + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> > + * Lesser General Public License for more details.
> > + *
> > + * You should have received a copy of the GNU Lesser General Public
> > + * License along with FFmpeg; if not, write to the Free Software
> > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
> 02110-1301 USA
> > + */
> > +
> > +#include "vc2enc_common.h"
> > +
> > +/* VC-2 10.4 - parse_info() */
> > +void ff_vc2_encode_parse_info(VC2EncContext *s, enum DiracParseCodes
> pcode)
> > +{
> > + uint32_t cur_pos, dist;
> > +
> > + align_put_bits(&s->pb);
> > +
> > + cur_pos = put_bytes_count(&s->pb, 0);
> > +
> > + /* Magic string */
> > + ff_put_string(&s->pb, "BBCD", 0);
> > +
> > + /* Parse code */
> > + put_bits(&s->pb, 8, pcode);
> > +
> > + /* Next parse offset */
> > + dist = cur_pos - s->next_parse_offset;
> > + AV_WB32(s->pb.buf + s->next_parse_offset + 5, dist);
> > + s->next_parse_offset = cur_pos;
> > + put_bits32(&s->pb, pcode == DIRAC_PCODE_END_SEQ ? 13 : 0);
> > +
> > + cur_pos = put_bytes_count(&s->pb, 0);
> > +
> > + /* Last parse offset */
> > + put_bits32(&s->pb, s->last_parse_code == DIRAC_PCODE_END_SEQ ? 13 :
> dist);
> > +
> > + s->last_parse_code = pcode;
> > +}
> > +
> > +/* VC-2 11.1 - parse_parameters()
> > + * The level dictates what the decoder should expect in terms of
> resolution
> > + * and allows it to quickly reject whatever it can't support. Remember,
> > + * this codec kinda targets cheapo FPGAs without much memory.
> Unfortunately
> > + * it also limits us greatly in our choice of formats, hence the flag
> to disable
> > + * strict_compliance */
> > +static void encode_parse_params(VC2EncContext *s)
> > +{
> > + put_vc2_ue_uint(&s->pb, s->ver.major); /* VC-2 demands this to be 2
> */
> > + put_vc2_ue_uint(&s->pb, s->ver.minor); /* ^^ and this to be 0
> */
> > + put_vc2_ue_uint(&s->pb, s->profile); /* 3 to signal HQ profile
> */
> > + put_vc2_ue_uint(&s->pb, s->level); /* 3 - 1080/720, 6 - 4K
> */
> > +}
> > +
> > +/* VC-2 11.3 - frame_size() */
> > +static void encode_frame_size(VC2EncContext *s)
> > +{
> > + put_bits(&s->pb, 1, !s->strict_compliance);
> > + if (!s->strict_compliance) {
> > + AVCodecContext *avctx = s->avctx;
> > + put_vc2_ue_uint(&s->pb, avctx->width);
> > + put_vc2_ue_uint(&s->pb, avctx->height);
> > + }
> > +}
> > +
> > +/* VC-2 11.3.3 - color_diff_sampling_format() */
> > +static void encode_sample_fmt(VC2EncContext *s)
> > +{
> > + put_bits(&s->pb, 1, !s->strict_compliance);
> > + if (!s->strict_compliance) {
> > + int idx;
> > + if (s->chroma_x_shift == 1 && s->chroma_y_shift == 0)
> > + idx = 1; /* 422 */
> > + else if (s->chroma_x_shift == 1 && s->chroma_y_shift == 1)
> > + idx = 2; /* 420 */
> > + else
> > + idx = 0; /* 444 */
> > + put_vc2_ue_uint(&s->pb, idx);
> > + }
> > +}
> > +
> > +/* VC-2 11.3.4 - scan_format() */
> > +static void encode_scan_format(VC2EncContext *s)
> > +{
> > + put_bits(&s->pb, 1, !s->strict_compliance);
> > + if (!s->strict_compliance)
> > + put_vc2_ue_uint(&s->pb, s->interlaced);
> > +}
> > +
> > +/* VC-2 11.3.5 - frame_rate() */
> > +static void encode_frame_rate(VC2EncContext *s)
> > +{
> > + put_bits(&s->pb, 1, !s->strict_compliance);
> > + if (!s->strict_compliance) {
> > + AVCodecContext *avctx = s->avctx;
> > + put_vc2_ue_uint(&s->pb, 0);
> > + put_vc2_ue_uint(&s->pb, avctx->time_base.den);
> > + put_vc2_ue_uint(&s->pb, avctx->time_base.num);
> > + }
> > +}
> > +
> > +/* VC-2 11.3.6 - aspect_ratio() */
> > +static void encode_aspect_ratio(VC2EncContext *s)
> > +{
> > + put_bits(&s->pb, 1, !s->strict_compliance);
> > + if (!s->strict_compliance) {
> > + AVCodecContext *avctx = s->avctx;
> > + put_vc2_ue_uint(&s->pb, 0);
> > + put_vc2_ue_uint(&s->pb, avctx->sample_aspect_ratio.num);
> > + put_vc2_ue_uint(&s->pb, avctx->sample_aspect_ratio.den);
> > + }
> > +}
> > +
> > +/* VC-2 11.3.7 - clean_area() */
> > +static void encode_clean_area(VC2EncContext *s)
> > +{
> > + put_bits(&s->pb, 1, 0);
> > +}
> > +
> > +/* VC-2 11.3.8 - signal_range() */
> > +static void encode_signal_range(VC2EncContext *s)
> > +{
> > + put_bits(&s->pb, 1, !s->strict_compliance);
> > + if (!s->strict_compliance)
> > + put_vc2_ue_uint(&s->pb, s->bpp_idx);
> > +}
> > +
> > +/* VC-2 11.3.9 - color_spec() */
> > +static void encode_color_spec(VC2EncContext *s)
> > +{
> > + AVCodecContext *avctx = s->avctx;
> > + put_bits(&s->pb, 1, !s->strict_compliance);
> > + if (!s->strict_compliance) {
> > + int val;
> > + put_vc2_ue_uint(&s->pb, 0);
> > +
> > + /* primaries */
> > + put_bits(&s->pb, 1, 1);
> > + if (avctx->color_primaries == AVCOL_PRI_BT470BG)
> > + val = 2;
> > + else if (avctx->color_primaries == AVCOL_PRI_SMPTE170M)
> > + val = 1;
> > + else if (avctx->color_primaries == AVCOL_PRI_SMPTE240M)
> > + val = 1;
> > + else
> > + val = 0;
> > + put_vc2_ue_uint(&s->pb, val);
> > +
> > + /* color matrix */
> > + put_bits(&s->pb, 1, 1);
> > + if (avctx->colorspace == AVCOL_SPC_RGB)
> > + val = 3;
> > + else if (avctx->colorspace == AVCOL_SPC_YCOCG)
> > + val = 2;
> > + else if (avctx->colorspace == AVCOL_SPC_BT470BG)
> > + val = 1;
> > + else
> > + val = 0;
> > + put_vc2_ue_uint(&s->pb, val);
> > +
> > + /* transfer function */
> > + put_bits(&s->pb, 1, 1);
> > + if (avctx->color_trc == AVCOL_TRC_LINEAR)
> > + val = 2;
> > + else if (avctx->color_trc == AVCOL_TRC_BT1361_ECG)
> > + val = 1;
> > + else
> > + val = 0;
> > + put_vc2_ue_uint(&s->pb, val);
> > + }
> > +}
> > +
> > +/* VC-2 11.3 - source_parameters() */
> > +static void encode_source_params(VC2EncContext *s)
> > +{
> > + encode_frame_size(s);
> > + encode_sample_fmt(s);
> > + encode_scan_format(s);
> > + encode_frame_rate(s);
> > + encode_aspect_ratio(s);
> > + encode_clean_area(s);
> > + encode_signal_range(s);
> > + encode_color_spec(s);
> > +}
> > +
> > +/* VC-2 11 - sequence_header() */
> > +void ff_vc2_encode_seq_header(VC2EncContext *s)
> > +{
> > + align_put_bits(&s->pb);
> > + encode_parse_params(s);
> > + put_vc2_ue_uint(&s->pb, s->base_vf);
> > + encode_source_params(s);
> > + put_vc2_ue_uint(&s->pb, s->interlaced); /* Frames or fields coding
> */
> > +}
> > +
> > +/* VC-2 12.1 - picture_header() */
> > +static void encode_picture_header(VC2EncContext *s)
> > +{
> > + align_put_bits(&s->pb);
> > + put_bits32(&s->pb, s->picture_number++);
> > +}
> > +
> > +/* VC-2 12.3.4.1 - slice_parameters() */
> > +static void encode_slice_params(VC2EncContext *s)
> > +{
> > + put_vc2_ue_uint(&s->pb, s->num_x);
> > + put_vc2_ue_uint(&s->pb, s->num_y);
> > + put_vc2_ue_uint(&s->pb, s->prefix_bytes);
> > + put_vc2_ue_uint(&s->pb, s->size_scaler);
> > +}
> > +
> > +/* 1st idx = LL, second - vertical, third - horizontal, fourth - total
> */
> > +static const uint8_t vc2_qm_col_tab[][4] = {
> > + {20, 9, 15, 4},
> > + { 0, 6, 6, 4},
> > + { 0, 3, 3, 5},
> > + { 0, 3, 5, 1},
> > + { 0, 11, 10, 11}
> > +};
> > +
> > +static const uint8_t vc2_qm_flat_tab[][4] = {
> > + { 0, 0, 0, 0},
> > + { 0, 0, 0, 0},
> > + { 0, 0, 0, 0},
> > + { 0, 0, 0, 0},
> > + { 0, 0, 0, 0}
> > +};
> > +
> > +void ff_vc2_init_quant_matrix(VC2EncContext *s)
> > +{
> > + int level, orientation;
> > +
> > + if (s->wavelet_depth <= 4 && s->quant_matrix == VC2_QM_DEF) {
> > + s->custom_quant_matrix = 0;
> > + for (level = 0; level < s->wavelet_depth; level++) {
> > + s->quant[level][0] =
> ff_dirac_default_qmat[s->wavelet_idx][level][0];
> > + s->quant[level][1] =
> ff_dirac_default_qmat[s->wavelet_idx][level][1];
> > + s->quant[level][2] =
> ff_dirac_default_qmat[s->wavelet_idx][level][2];
> > + s->quant[level][3] =
> ff_dirac_default_qmat[s->wavelet_idx][level][3];
> > + }
> > + return;
> > + }
> > +
> > + s->custom_quant_matrix = 1;
> > +
> > + if (s->quant_matrix == VC2_QM_DEF) {
> > + for (level = 0; level < s->wavelet_depth; level++) {
> > + for (orientation = 0; orientation < 4; orientation++) {
> > + if (level <= 3)
> > + s->quant[level][orientation] =
> ff_dirac_default_qmat[s->wavelet_idx][level][orientation];
> > + else
> > + s->quant[level][orientation] =
> vc2_qm_col_tab[level][orientation];
> > + }
> > + }
> > + } else if (s->quant_matrix == VC2_QM_COL) {
> > + for (level = 0; level < s->wavelet_depth; level++) {
> > + for (orientation = 0; orientation < 4; orientation++) {
> > + s->quant[level][orientation] =
> vc2_qm_col_tab[level][orientation];
> > + }
> > + }
> > + } else {
> > + for (level = 0; level < s->wavelet_depth; level++) {
> > + for (orientation = 0; orientation < 4; orientation++) {
> > + s->quant[level][orientation] =
> vc2_qm_flat_tab[level][orientation];
> > + }
> > + }
> > + }
> > +}
> > +
> > +/* VC-2 12.3.4.2 - quant_matrix() */
> > +static void encode_quant_matrix(VC2EncContext *s)
> > +{
> > + int level;
> > + put_bits(&s->pb, 1, s->custom_quant_matrix);
> > + if (s->custom_quant_matrix) {
> > + put_vc2_ue_uint(&s->pb, s->quant[0][0]);
> > + for (level = 0; level < s->wavelet_depth; level++) {
> > + put_vc2_ue_uint(&s->pb, s->quant[level][1]);
> > + put_vc2_ue_uint(&s->pb, s->quant[level][2]);
> > + put_vc2_ue_uint(&s->pb, s->quant[level][3]);
> > + }
> > + }
> > +}
> > +
> > +/* VC-2 12.3 - transform_parameters() */
> > +static void encode_transform_params(VC2EncContext *s)
> > +{
> > + put_vc2_ue_uint(&s->pb, s->wavelet_idx);
> > + put_vc2_ue_uint(&s->pb, s->wavelet_depth);
> > +
> > + encode_slice_params(s);
> > + encode_quant_matrix(s);
> > +}
> > +
> > +/* VC-2 12.2 - wavelet_transform() */
> > +static void encode_wavelet_transform(VC2EncContext *s)
> > +{
> > + encode_transform_params(s);
> > + align_put_bits(&s->pb);
> > +}
> > +
> > +/* VC-2 12 - picture_parse() */
> > +void ff_vc2_encode_picture_start(VC2EncContext *s)
> > +{
> > + align_put_bits(&s->pb);
> > + encode_picture_header(s);
> > + align_put_bits(&s->pb);
> > + encode_wavelet_transform(s);
> > +}
> > diff --git a/libavcodec/vc2enc_common.h b/libavcodec/vc2enc_common.h
> > new file mode 100644
> > index 0000000000..72e944c8e6
> > --- /dev/null
> > +++ b/libavcodec/vc2enc_common.h
> > @@ -0,0 +1,323 @@
> > +/*
> > + * Copyright (C) 2016 Open Broadcast Systems Ltd.
> > + * Author 2016 Rostislav Pehlivanov <atomnuker at gmail.com>
> > + *
> > + * This file is part of FFmpeg.
> > + *
> > + * FFmpeg is free software; you can redistribute it and/or
> > + * modify it under the terms of the GNU Lesser General Public
> > + * License as published by the Free Software Foundation; either
> > + * version 2.1 of the License, or (at your option) any later version.
> > + *
> > + * FFmpeg is distributed in the hope that it will be useful,
> > + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> > + * Lesser General Public License for more details.
> > + *
> > + * You should have received a copy of the GNU Lesser General Public
> > + * License along with FFmpeg; if not, write to the Free Software
> > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
> 02110-1301 USA
> > + */
> > +
> > +#ifndef AVCODEC_VC2ENC_COMMON_H
> > +#define AVCODEC_VC2ENC_COMMON_H
> > +
> > +#include "avcodec.h"
> > +#include "dirac.h"
> > +#include "put_bits.h"
> > +
> > +#include "vc2enc_dwt.h"
> > +#include "diractab.h"
> > +#include "libavutil/vulkan.h"
>
> This header relies on vulkan headers being present and must therefore
> not be included by any non-vulkan code.
>
> > +
> > +/* The limited size resolution of each slice forces us to do this */
> > +#define SSIZE_ROUND(b) (FFALIGN((b), s->size_scaler) + 4 +
> s->prefix_bytes)
> > +
> > +/* Decides the cutoff point in # of slices to distribute the leftover
> bytes */
> > +#define SLICE_REDIST_TOTAL 150
> > +
> > +typedef struct VC2BaseVideoFormat {
> > + enum AVPixelFormat pix_fmt;
> > + AVRational time_base;
> > + int width, height;
> > + uint8_t interlaced, level;
> > + char name[13];
> > +} VC2BaseVideoFormat;
> > +
> > +static const VC2BaseVideoFormat base_video_fmts[] = {
> > + { 0 }, /* Custom format, here just to make indexing equal to
> base_vf */
> > + { AV_PIX_FMT_YUV420P, { 1001, 15000 }, 176, 120, 0, 1,
> "QSIF525" },
> > + { AV_PIX_FMT_YUV420P, { 2, 25 }, 176, 144, 0, 1,
> "QCIF" },
> > + { AV_PIX_FMT_YUV420P, { 1001, 15000 }, 352, 240, 0, 1,
> "SIF525" },
> > + { AV_PIX_FMT_YUV420P, { 2, 25 }, 352, 288, 0, 1,
> "CIF" },
> > + { AV_PIX_FMT_YUV420P, { 1001, 15000 }, 704, 480, 0, 1,
> "4SIF525" },
> > + { AV_PIX_FMT_YUV420P, { 2, 25 }, 704, 576, 0, 1,
> "4CIF" },
> > +
> > + { AV_PIX_FMT_YUV422P10, { 1001, 30000 }, 720, 480, 1, 2,
> "SD480I-60" },
> > + { AV_PIX_FMT_YUV422P10, { 1, 25 }, 720, 576, 1, 2,
> "SD576I-50" },
> > +
> > + { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 1280, 720, 0, 3,
> "HD720P-60" },
> > + { AV_PIX_FMT_YUV422P10, { 1, 50 }, 1280, 720, 0, 3,
> "HD720P-50" },
> > + { AV_PIX_FMT_YUV422P10, { 1001, 30000 }, 1920, 1080, 1, 3,
> "HD1080I-60" },
> > + { AV_PIX_FMT_YUV422P10, { 1, 25 }, 1920, 1080, 1, 3,
> "HD1080I-50" },
> > + { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 1920, 1080, 0, 3,
> "HD1080P-60" },
> > + { AV_PIX_FMT_YUV422P10, { 1, 50 }, 1920, 1080, 0, 3,
> "HD1080P-50" },
> > +
> > + { AV_PIX_FMT_YUV444P12, { 1, 24 }, 2048, 1080, 0, 4,
> "DC2K" },
> > + { AV_PIX_FMT_YUV444P12, { 1, 24 }, 4096, 2160, 0, 5,
> "DC4K" },
> > +
> > + { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 3840, 2160, 0, 6, "UHDTV
> 4K-60" },
> > + { AV_PIX_FMT_YUV422P10, { 1, 50 }, 3840, 2160, 0, 6, "UHDTV
> 4K-50" },
> > +
> > + { AV_PIX_FMT_YUV422P10, { 1001, 60000 }, 7680, 4320, 0, 7, "UHDTV
> 8K-60" },
> > + { AV_PIX_FMT_YUV422P10, { 1, 50 }, 7680, 4320, 0, 7, "UHDTV
> 8K-50" },
> > +
> > + { AV_PIX_FMT_YUV422P10, { 1001, 24000 }, 1920, 1080, 0, 3,
> "HD1080P-24" },
> > + { AV_PIX_FMT_YUV422P10, { 1001, 30000 }, 720, 486, 1, 2, "SD
> Pro486" },
> > +};
> > +static const int base_video_fmts_len = FF_ARRAY_ELEMS(base_video_fmts);
>
> Don't duplicate this array.
>
> > +
> > +enum VC2_QM {
> > + VC2_QM_DEF = 0,
> > + VC2_QM_COL,
> > + VC2_QM_FLAT,
> > +
> > + VC2_QM_NB
> > +};
> > +
> > +typedef struct SubBand {
> > + dwtcoef *buf;
> > + ptrdiff_t stride;
> > + int width;
> > + int height;
> > + int shift;
> > +} SubBand;
> > +
> > +typedef struct Plane {
> > + SubBand band[MAX_DWT_LEVELS][4];
> > + dwtcoef *coef_buf;
> > + int width;
> > + int height;
> > + int dwt_width;
> > + int dwt_height;
> > + ptrdiff_t coef_stride;
> > +} Plane;
> > +
> > +typedef struct SliceArgs {
> > + const struct VC2EncContext *ctx;
> > + union {
> > + int cache[DIRAC_MAX_QUANT_INDEX];
> > + uint8_t *buf;
> > + };
> > + int x;
> > + int y;
> > + int quant_idx;
> > + int bits_ceil;
> > + int bits_floor;
> > + int bytes;
> > +} SliceArgs;
> > +
> > +typedef struct TransformArgs {
> > + struct VC2EncContext *ctx;
> > + Plane *plane;
> > + const void *idata;
> > + ptrdiff_t istride;
> > + int field;
> > + VC2TransformContext t;
> > +} TransformArgs;
> > +
> > +typedef struct VC2DwtPlane {
> > + int width;
> > + int height;
> > + int dwt_width;
> > + int dwt_height;
> > +} VC2DwtPlane;
> > +
> > +typedef struct VC2DwtPushData {
> > + int s;
> > + union {
> > + int diff_offset;
> > + int plane_idx;
> > + };
> > + int level;
> > + VC2DwtPlane planes[3];
> > +} VC2DwtPushData;
> > +
> > +typedef struct VC2EncAuxData {
> > + uint32_t quant[MAX_DWT_LEVELS][4];
> > + int ff_dirac_qscale_tab[116];
> > +} VC2EncAuxData;
> > +
> > +typedef struct VC2EncPushData {
> > + VkDeviceAddress pb;
> > + VkDeviceAddress luts;
> > + VkDeviceAddress slice;
> > + int num_x;
> > + int num_y;
> > + VC2DwtPlane planes[3];
> > + int wavelet_depth;
> > + int size_scaler;
> > + int prefix_bytes;
> > +} VC2EncPushData;
> > +
> > +typedef struct VC2EncSliceArgs {
> > + int quant_idx;
> > + int bytes;
> > + int pb_start;
> > + int pad;
> > +} VC2EncSliceArgs;
> > +
> > +typedef struct VC2EncSliceCalcPushData {
> > + VkDeviceAddress luts;
> > + VkDeviceAddress slice;
> > + int num_x;
> > + int num_y;
> > + VC2DwtPlane planes[3];
> > + int wavelet_depth;
> > + int size_scaler;
> > + int prefix_bytes;
> > + int bits_ceil;
> > + int bits_floor;
> > +} VC2EncSliceCalcPushData;
> > +
> > +typedef struct VC2EncContext {
> > + AVClass *av_class;
> > + PutBitContext pb;
> > + Plane plane[3];
> > + AVCodecContext *avctx;
> > + DiracVersionInfo ver;
> > +
> > + SliceArgs *slice_args;
> > + VC2EncSliceArgs* vk_slice_args;
> > + TransformArgs transform_args[3];
> > +
> > + /* For conversion from unsigned pixel values to signed */
> > + int diff_offset;
> > + int bpp;
> > + int bpp_idx;
> > +
> > + /* Picture number */
> > + uint32_t picture_number;
> > +
> > + /* Base video format */
> > + int base_vf;
> > + int level;
> > + int profile;
> > +
> > + /* Quantization matrix */
> > + uint8_t quant[MAX_DWT_LEVELS][4];
> > + int custom_quant_matrix;
> > +
> > + /* Division LUT */
> > + uint32_t qmagic_lut[116][2];
> > +
> > + int num_x; /* #slices horizontally */
> > + int num_y; /* #slices vertically */
> > + int group_x;
> > + int group_y;
> > + int prefix_bytes;
> > + int size_scaler;
> > + int chroma_x_shift;
> > + int chroma_y_shift;
> > +
> > + /* Rate control stuff */
> > + int frame_max_bytes;
> > + int slice_max_bytes;
> > + int slice_min_bytes;
> > + int q_ceil;
> > + int q_avg;
> > +
> > + /* Options */
> > + double tolerance;
> > + int wavelet_idx;
> > + int wavelet_depth;
> > + int strict_compliance;
> > + int slice_height;
> > + int slice_width;
> > + int interlaced;
> > + enum VC2_QM quant_matrix;
> > +
> > + /* Parse code state */
> > + uint32_t next_parse_offset;
> > + enum DiracParseCodes last_parse_code;
> > +
> > + /* Vulkan state */
> > + FFVulkanContext vkctx;
> > + AVVulkanDeviceQueueFamily *qf;
> > + FFVkExecPool e;
> > +
> > + FFVulkanShader dwt_haar_shd;
> > + FFVulkanShader dwt_upload_shd;
> > + FFVulkanShader dwt_hor_shd, dwt_ver_shd;
> > + FFVulkanShader slice_shd;
> > + FFVulkanShader enc_shd;
>
> You are adding your vulkan stuff to the common header. This means that
> the software encoder's private context will be bloated with stuff it
> does not use at all which is not how it should be done even if it could
> be done. But it can't given that the vulkan header is not always available.
>
> It also means that your commit message is absolutely wrong: You are not
> only splitting out, you are adding new stuff.
>
> Instead you should use a common base structure containing the stuff that
> the common functions need to access and use extended structures (one for
> each encoder) as private contexts for the encoders.
>
> > + AVBufferPool* dwt_buf_pool;
> > + int haar_subgroup;
> > +
> > + VkBuffer plane_buf, slice_buf;
> > + VC2EncPushData enc_consts;
> > + VC2DwtPushData dwt_consts;
> > + VC2EncSliceCalcPushData calc_consts;
> > +
> > + /* Intermediate frame pool */
> > + AVBufferRef *intermediate_frames_ref[3];
> > + AVFrame *intermediate_frame[AV_NUM_DATA_POINTERS];
> > + VkImageView intermediate_views[AV_NUM_DATA_POINTERS];
> > +} VC2EncContext;
> > +
> > +static inline void put_vc2_ue_uint(PutBitContext *pb, uint32_t val)
> > +{
> > + int i;
> > + int bits = 0;
> > + unsigned topbit = 1, maxval = 1;
> > + uint64_t pbits = 0;
> > +
> > + if (!val++) {
> > + put_bits(pb, 1, 1);
> > + return;
> > + }
> > +
> > + while (val > maxval) {
> > + topbit <<= 1;
> > + maxval <<= 1;
> > + maxval |= 1;
> > + }
> > +
> > + bits = ff_log2(topbit);
> > +
> > + for (i = 0; i < bits; i++) {
> > + topbit >>= 1;
> > + av_assert2(pbits <= UINT64_MAX>>3);
> > + pbits <<= 2;
> > + if (val & topbit)
> > + pbits |= 0x1;
> > + }
> > +
> > + put_bits64(pb, bits*2 + 1, (pbits << 1) | 1);
> > +}
> > +
> > +static inline int count_vc2_ue_uint(uint32_t val)
> > +{
> > + int topbit = 1, maxval = 1;
> > +
> > + if (!val++)
> > + return 1;
> > +
> > + while (val > maxval) {
> > + topbit <<= 1;
> > + maxval <<= 1;
> > + maxval |= 1;
> > + }
> > +
> > + return ff_log2(topbit)*2 + 1;
> > +}
> > +
> > +void ff_vc2_init_quant_matrix(VC2EncContext *s);
> > +
> > +void ff_vc2_encode_parse_info(VC2EncContext *s, enum DiracParseCodes
> pcode);
> > +
> > +void ff_vc2_encode_seq_header(VC2EncContext *s);
> > +
> > +void ff_vc2_encode_picture_start(VC2EncContext *s);
> > +
> > +#endif
>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel at ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request at ffmpeg.org with subject "unsubscribe".
>
More information about the ffmpeg-devel
mailing list