[FFmpeg-devel] [PATCH 3/5] libavcodec: add VAAPI H.264 encoder
Mark Thompson
sw at jkqxz.net
Sun Jan 17 18:36:25 CET 2016
From 3a3c668ad55746e7313e6cf2b121a984ac5ca942 Mon Sep 17 00:00:00 2001
From: Mark Thompson <mrt at jkqxz.net>
Date: Sun, 17 Jan 2016 15:57:55 +0000
Subject: [PATCH 3/5] libavcodec: add VAAPI H.264 encoder
---
configure | 1 +
libavcodec/Makefile | 1 +
libavcodec/allcodecs.c | 1 +
libavcodec/vaapi_enc_h264.c | 944
++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 947 insertions(+)
create mode 100644 libavcodec/vaapi_enc_h264.c
diff --git a/configure b/configure
index 1c77015..a31d65e 100755
--- a/configure
+++ b/configure
@@ -2499,6 +2499,7 @@ h264_mmal_encoder_deps="mmal"
h264_qsv_hwaccel_deps="libmfx"
h264_vaapi_hwaccel_deps="vaapi"
h264_vaapi_hwaccel_select="h264_decoder"
+h264_vaapi_encoder_deps="vaapi"
h264_vda_decoder_deps="vda"
h264_vda_decoder_select="h264_decoder"
h264_vda_hwaccel_deps="vda"
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index b9ffdb9..06b3c48 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -303,6 +303,7 @@ OBJS-$(CONFIG_H264_MMAL_DECODER) += mmaldec.o
OBJS-$(CONFIG_H264_VDA_DECODER) += vda_h264_dec.o
OBJS-$(CONFIG_H264_QSV_DECODER) += qsvdec_h2645.o
OBJS-$(CONFIG_H264_QSV_ENCODER) += qsvenc_h264.o
+OBJS-$(CONFIG_H264_VAAPI_ENCODER) += vaapi_enc_h264.o
OBJS-$(CONFIG_HAP_DECODER) += hapdec.o hap.o
OBJS-$(CONFIG_HAP_ENCODER) += hapenc.o hap.o
OBJS-$(CONFIG_HEVC_DECODER) += hevc.o hevc_mvs.o hevc_ps.o
hevc_sei.o \
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index 2128546..0d07087 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -199,6 +199,7 @@ void avcodec_register_all(void)
#if FF_API_VDPAU
REGISTER_DECODER(H264_VDPAU, h264_vdpau);
#endif
+ REGISTER_ENCODER(H264_VAAPI, h264_vaapi);
REGISTER_ENCDEC (HAP, hap);
REGISTER_DECODER(HEVC, hevc);
REGISTER_DECODER(HEVC_QSV, hevc_qsv);
diff --git a/libavcodec/vaapi_enc_h264.c b/libavcodec/vaapi_enc_h264.c
new file mode 100644
index 0000000..39c7236
--- /dev/null
+++ b/libavcodec/vaapi_enc_h264.c
@@ -0,0 +1,944 @@
+/*
+ * VAAPI H.264 encoder.
+ *
+ * Copyright (C) 2016 Mark Thompson <mrt at jkqxz.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
+ */
+
+#include "avcodec.h"
+#include "golomb.h"
+#include "put_bits.h"
+
+#include "h264.h"
+
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/vaapi.h"
+
+#define DPB_FRAMES 16
+#define INPUT_FRAMES 2
+
+typedef struct VAAPIH264EncodeFrame {
+ AVFrame avframe;
+ VASurfaceID surface_id;
+
+ int frame_num;
+ enum {
+ FRAME_TYPE_I,
+ FRAME_TYPE_P,
+ FRAME_TYPE_B,
+ } type;
+
+ VAPictureH264 pic;
+ VAEncSliceParameterBufferH264 params;
+ VABufferID params_id;
+
+ VABufferID coded_data_id;
+
+ struct VAAPIH264EncodeFrame *refp, *refb;
+} VAAPIH264EncodeFrame;
+
+typedef struct VAAPIH264EncodeContext {
+ const AVClass *class;
+
+ AVVAAPIInstance va_instance;
+ AVVAAPIPipelineConfig va_config;
+ AVVAAPIPipelineContext va_codec;
+
+ AVVAAPISurfaceConfig input_config;
+ AVVAAPISurfaceConfig output_config;
+
+ VAProfile va_profile;
+ int level;
+ int rc_mode;
+ int width;
+ int height;
+
+ VAEncSequenceParameterBufferH264 seq_params;
+ VABufferID seq_params_id;
+
+ VAEncMiscParameterRateControl rc_params;
+ VAEncMiscParameterBuffer rc_params_buffer;
+ VABufferID rc_params_id;
+
+ VAEncPictureParameterBufferH264 pic_params;
+ VABufferID pic_params_id;
+
+ int frame_num;
+
+ VAAPIH264EncodeFrame dpb[DPB_FRAMES];
+ int current_frame;
+ int previous_frame;
+
+ struct {
+ const char *profile;
+ const char *level;
+ int qp;
+ int idr_interval;
+ } options;
+
+} VAAPIH264EncodeContext;
+
+
+static int vaapi_h264_render_packed_header(VAAPIH264EncodeContext *ctx,
int type,
+ char *data, size_t bit_len)
+{
+ VAStatus vas;
+ VABufferID id_list[2];
+ VAEncPackedHeaderParameterBuffer buffer = {
+ .type = type,
+ .bit_length = bit_len,
+ .has_emulation_bytes = 0,
+ };
+
+ vas = vaCreateBuffer(ctx->va_instance.display,
ctx->va_codec.context_id,
+ VAEncPackedHeaderParameterBufferType,
+ sizeof(&buffer), 1, &buffer, &id_list[0]);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to create parameter buffer
for packed "
+ "header (type %d): %d (%s).\n", type, vas, vaErrorStr(vas));
+ return -1;
+ }
+
+ vas = vaCreateBuffer(ctx->va_instance.display,
ctx->va_codec.context_id,
+ VAEncPackedHeaderDataBufferType,
+ (bit_len + 7) / 8, 1, data, &id_list[1]);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to create data buffer for
packed "
+ "header (type %d): %d (%s).\n", type, vas, vaErrorStr(vas));
+ return -1;
+ }
+
+ vas = vaRenderPicture(ctx->va_instance.display,
ctx->va_codec.context_id,
+ id_list, 2);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to render packed "
+ "header (type %d): %d (%s).\n", type, vas, vaErrorStr(vas));
+ return -1;
+ }
+
+ return 0;
+}
+
+static void vaapi_h264_write_nal_header(PutBitContext *b, int ref, int
type)
+{
+ // zero_byte
+ put_bits(b, 8, 0);
+ // start_code_prefix_one_3bytes
+ put_bits(b, 24, 1);
+ // forbidden_zero_bit
+ put_bits(b, 1, 0);
+ // nal_ref_idc
+ put_bits(b, 2, ref);
+ // nal_unit_type
+ put_bits(b, 5, type);
+}
+
+static void vaapi_h264_write_trailing_rbsp(PutBitContext *b)
+{
+ // rbsp_stop_one_bit
+ put_bits(b, 1, 1);
+ while(put_bits_count(b) & 7) {
+ // rbsp_alignment_zero_bit
+ put_bits(b, 1, 0);
+ }
+}
+
+static int vaapi_h264_render_packed_sps(VAAPIH264EncodeContext *ctx)
+{
+ PutBitContext b;
+ char tmp[256];
+ size_t len;
+
+ init_put_bits(&b, tmp, sizeof(tmp));
+
+ vaapi_h264_write_nal_header(&b, 3, NAL_SPS);
+
+ // profile_idc
+ put_bits(&b, 8, 66);
+ // constraint_set0_flag
+ put_bits(&b, 1, 0);
+ // constraint_set1_flag
+ put_bits(&b, 1, ctx->va_profile == VAProfileH264ConstrainedBaseline);
+ // constraint_set2_flag
+ put_bits(&b, 1, 0);
+ // constraint_set3_flag
+ put_bits(&b, 1, 0);
+ // constraint_set4_flag
+ put_bits(&b, 1, 0);
+ // constraint_set5_flag
+ put_bits(&b, 1, 0);
+ // reserved_zero_2bits
+ put_bits(&b, 2, 0);
+ // level_idc
+ put_bits(&b, 8, 52);
+ // seq_parameter_set_id
+ set_ue_golomb(&b, 0);
+
+ if(0) {
+ // chroma_format_idc
+ set_ue_golomb(&b, 1);
+ // bit_depth_luma_minus8
+ set_ue_golomb(&b, 0);
+ // bit_depth_chroma_minus8
+ set_ue_golomb(&b, 0);
+ // qpprime_y_zero_transform_bypass_flag
+ put_bits(&b, 1, 0);
+ // seq_scaling_matrix_present_flag
+ put_bits(&b, 1, 0);
+ }
+
+ // log2_max_frame_num_minus4
+ set_ue_golomb(&b, 4);
+ // pic_order_cnt_type
+ set_ue_golomb(&b, 2);
+
+ // max_num_ref_frames
+ set_ue_golomb(&b, 1);
+ // gaps_in_frame_num_value_allowed_flag
+ put_bits(&b, 1, 0);
+ // pic_width_in_mbs_minus1
+ set_ue_golomb(&b, (ctx->width + 15) / 16 - 1);
+ // pic_height_in_map_units_minus1
+ set_ue_golomb(&b, (ctx->height + 15) / 16 - 1);
+ // frame_mbs_oly_flag
+ put_bits(&b, 1, 1);
+
+ // direct_8x8_inference_flag
+ put_bits(&b, 1, 1);
+ // frame_cropping_flag
+ put_bits(&b, 1, 0);
+
+ // vui_parameters_present_flag
+ put_bits(&b, 1, 0);
+
+ vaapi_h264_write_trailing_rbsp(&b);
+
+ len = put_bits_count(&b);
+ flush_put_bits(&b);
+
+ return vaapi_h264_render_packed_header(ctx, VAEncPackedHeaderSequence,
+ tmp, len);
+}
+
+static int vaapi_h264_render_packed_pps(VAAPIH264EncodeContext *ctx)
+{
+ PutBitContext b;
+ char tmp[256];
+ size_t len;
+
+ init_put_bits(&b, tmp, sizeof(tmp));
+
+ vaapi_h264_write_nal_header(&b, 3, NAL_PPS);
+
+ // seq_parameter_set_id
+ set_ue_golomb(&b, 0);
+ // pic_parameter_set_id
+ set_ue_golomb(&b, 0);
+ // entropy_coding_mode_flag
+ put_bits(&b, 1, 1);
+ // bottom_field_pic_order_in_frame_present_flag
+ put_bits(&b, 1, 0);
+ // num_slice_groups_minus1
+ set_ue_golomb(&b, 0);
+
+ // num_ref_idx_l0_default_active_minus1
+ set_ue_golomb(&b, 0);
+ // num_ref_idx_l1_default_active_minus1
+ set_ue_golomb(&b, 0);
+ // weighted_pred_flag
+ put_bits(&b, 1, 0);
+ // weighted_bipred_idc
+ put_bits(&b, 2, 0);
+ // pic_init_qp_minus26
+ set_se_golomb(&b, ctx->options.qp - 26);
+ // pic_init_qs_minus26
+ set_se_golomb(&b, 0);
+ // chroma_qp_index_offset
+ set_se_golomb(&b, 0);
+ // deblocking_filter_control_present_flag
+ put_bits(&b, 1, 1);
+ // constrained_intra_pred_flag
+ put_bits(&b, 1, 0);
+ // redundant_pic_cnt_present_flag
+ put_bits(&b, 1, 0);
+
+ // transform_8x8_mode_flag
+ put_bits(&b, 1, 0);
+ // pic_scaling_matrix_present_flag
+ put_bits(&b, 1, 0);
+ // second_chroma_qp_index_offset
+ set_se_golomb(&b, 0);
+
+ vaapi_h264_write_trailing_rbsp(&b);
+
+ len = put_bits_count(&b);
+ flush_put_bits(&b);
+
+ return vaapi_h264_render_packed_header(ctx, VAEncPackedHeaderPicture,
+ tmp, len);
+}
+
+static int vaapi_h264_render_packed_slice(VAAPIH264EncodeContext *ctx,
+ VAAPIH264EncodeFrame *current)
+{
+ PutBitContext b;
+ char tmp[256];
+ size_t len;
+
+ init_put_bits(&b, tmp, sizeof(tmp));
+
+ if(current->type == FRAME_TYPE_I)
+ vaapi_h264_write_nal_header(&b, 3, NAL_IDR_SLICE);
+ else
+ vaapi_h264_write_nal_header(&b, 3, NAL_SLICE);
+
+ // first_mb_in_slice
+ set_ue_golomb(&b, 0);
+ // slice_type
+ set_ue_golomb(&b, (current->type == FRAME_TYPE_I ? 2 :
+ current->type == FRAME_TYPE_P ? 0 : 1));
+ // pic_parameter_set_id
+ set_ue_golomb(&b, 0);
+
+ // frame_num
+ put_bits(&b, 8, current->frame_num);
+
+ if(current->type == FRAME_TYPE_I) {
+ // idr_pic_id
+ set_ue_golomb(&b, 0);
+ }
+
+ // pic_order_cnt stuff
+
+ if(current->type == FRAME_TYPE_B) {
+ // direct_spatial_mv_pred_flag
+ put_bits(&b, 1, 1);
+ }
+
+ if(current->type == FRAME_TYPE_P || current->type == FRAME_TYPE_B) {
+ // num_ref_idx_active_override_flag
+ put_bits(&b, 1, 0);
+ if(0) {
+ // num_ref_idx_l0_active_minus1
+ if(current->type == FRAME_TYPE_B) {
+ // num_ref_idx_l1_active_minus1
+ }
+ }
+
+ // ref_pic_list_modification_flag_l0
+ put_bits(&b, 1, 0);
+
+ if(current->type == FRAME_TYPE_B) {
+ // ref_pic_list_modification_flag_l1
+ put_bits(&b, 1, 0);
+ }
+ }
+
+ if(1) {
+ // dec_ref_pic_marking
+ if(current->type == FRAME_TYPE_I) {
+ // no_output_of_prior_pics_flag
+ put_bits(&b, 1, 0);
+ // long_term_reference_flag
+ put_bits(&b, 1, 0);
+ } else {
+ // adaptive_pic_ref_marking_mode_flag
+ put_bits(&b, 1, 0);
+ }
+ }
+
+ if(current->type != FRAME_TYPE_I) {
+ // cabac_init_idc
+ set_ue_golomb(&b, 0);
+ }
+
+ // slice_qp_delta
+ set_se_golomb(&b, 0);
+
+ if(1) {
+ // disable_deblocking_filter_idc
+ set_ue_golomb(&b, 0);
+ // slice_alpha_c0_offset_div2
+ set_se_golomb(&b, 0);
+ // slice_beta_offset_div2
+ set_se_golomb(&b, 0);
+ }
+
+ len = put_bits_count(&b);
+ flush_put_bits(&b);
+
+ return vaapi_h264_render_packed_header(ctx, VAEncPackedHeaderSlice,
+ tmp, len);
+}
+
+static int vaapi_h264_render_sequence(VAAPIH264EncodeContext *ctx)
+{
+ VAStatus vas;
+ VAEncSequenceParameterBufferH264 *seq = &ctx->seq_params;
+
+ {
+ memset(seq, 0, sizeof(*seq));
+
+ seq->level_idc = 52;
+ seq->picture_width_in_mbs = (ctx->width + 15) / 16;
+ seq->picture_height_in_mbs = (ctx->height + 15) / 16;
+
+ seq->intra_period = 0;
+ seq->intra_idr_period = 0;
+ seq->ip_period = 1;
+
+ seq->max_num_ref_frames = 2;
+ seq->time_scale = 900;
+ seq->num_units_in_tick = 15;
+ seq->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = 4;
+ seq->seq_fields.bits.log2_max_frame_num_minus4 = 4;
+ seq->seq_fields.bits.frame_mbs_only_flag = 1;
+ seq->seq_fields.bits.chroma_format_idc = 1;
+ seq->seq_fields.bits.direct_8x8_inference_flag = 1;
+ seq->seq_fields.bits.pic_order_cnt_type = 2;
+
+ seq->frame_cropping_flag = 1;
+ seq->frame_crop_left_offset = 0;
+ seq->frame_crop_right_offset = 0;
+ seq->frame_crop_top_offset = 0;
+ seq->frame_crop_bottom_offset = 8;
+ }
+
+ vas = vaCreateBuffer(ctx->va_instance.display,
ctx->va_codec.context_id,
+ VAEncSequenceParameterBufferType,
+ sizeof(*seq), 1, seq, &ctx->seq_params_id);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to create buffer for sequence "
+ "parameters: %d (%s).\n", vas, vaErrorStr(vas));
+ return -1;
+ }
+ av_log(ctx, AV_LOG_DEBUG, "Sequence parameter buffer is %#x.\n",
+ ctx->seq_params_id);
+
+ vas = vaRenderPicture(ctx->va_instance.display,
ctx->va_codec.context_id,
+ &ctx->seq_params_id, 1);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to send sequence parameters: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int vaapi_h264_render_picture(VAAPIH264EncodeContext *ctx,
+ VAAPIH264EncodeFrame *current)
+{
+ VAStatus vas;
+ VAEncPictureParameterBufferH264 *pic = &ctx->pic_params;
+ int i;
+
+ memset(pic, 0, sizeof(*pic));
+ memcpy(&pic->CurrPic, ¤t->pic, sizeof(VAPictureH264));
+ for(i = 0; i < FF_ARRAY_ELEMS(pic->ReferenceFrames); i++) {
+ pic->ReferenceFrames[i].picture_id = VA_INVALID_ID;
+ pic->ReferenceFrames[i].flags = VA_PICTURE_H264_INVALID;
+ }
+ if(current->type == FRAME_TYPE_P || current->type == FRAME_TYPE_B)
+ memcpy(&pic->ReferenceFrames[0], ¤t->refp->pic,
+ sizeof(VAPictureH264));
+ if(current->type == FRAME_TYPE_B)
+ memcpy(&pic->ReferenceFrames[1], ¤t->refb->pic,
+ sizeof(VAPictureH264));
+
+ pic->pic_fields.bits.idr_pic_flag = (current->type == FRAME_TYPE_I);
+ pic->pic_fields.bits.reference_pic_flag = 1;
+ pic->pic_fields.bits.entropy_coding_mode_flag = 1;
+ pic->pic_fields.bits.deblocking_filter_control_present_flag = 1;
+
+ pic->frame_num = current->frame_num;
+ pic->last_picture = 0;
+ pic->pic_init_qp = ctx->options.qp;
+
+ pic->coded_buf = current->coded_data_id;
+
+ vas = vaCreateBuffer(ctx->va_instance.display,
ctx->va_codec.context_id,
+ VAEncPictureParameterBufferType,
+ sizeof(*pic), 1, pic, &ctx->pic_params_id);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to create buffer for picture "
+ "parameters: %d (%s).\n", vas, vaErrorStr(vas));
+ return -1;
+ }
+ av_log(ctx, AV_LOG_DEBUG, "Picture parameter buffer is %#x.\n",
+ ctx->pic_params_id);
+
+ vas = vaRenderPicture(ctx->va_instance.display,
ctx->va_codec.context_id,
+ &ctx->pic_params_id, 1);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to send picture parameters: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int vaapi_h264_render_slice(VAAPIH264EncodeContext *ctx,
+ VAAPIH264EncodeFrame *current)
+{
+ VAStatus vas;
+ VAEncSliceParameterBufferH264 *slice = ¤t->params;
+ int i;
+
+ {
+ memset(slice, 0, sizeof(*slice));
+
+ slice->slice_type = (current->type == FRAME_TYPE_I ? 2 :
+ current->type == FRAME_TYPE_P ? 0 : 1);
+ slice->idr_pic_id = 0;
+
+ slice->macroblock_address = 0;
+ slice->num_macroblocks = (ctx->seq_params.picture_width_in_mbs *
+ ctx->seq_params.picture_height_in_mbs);
+ slice->macroblock_info = VA_INVALID_ID;
+
+ for(i = 0; i < FF_ARRAY_ELEMS(slice->RefPicList0); i++) {
+ slice->RefPicList0[i].picture_id = VA_INVALID_SURFACE;
+ slice->RefPicList0[i].flags = VA_PICTURE_H264_INVALID;
+ }
+ for(i = 0; i < FF_ARRAY_ELEMS(slice->RefPicList1); i++) {
+ slice->RefPicList1[i].picture_id = VA_INVALID_SURFACE;
+ slice->RefPicList1[i].flags = VA_PICTURE_H264_INVALID;
+ }
+
+ if(current->refp) {
+ av_log(ctx, AV_LOG_DEBUG, "Using %#x as first reference
frame.\n",
+ current->refp->pic.picture_id);
+ slice->RefPicList0[0].picture_id =
current->refp->pic.picture_id;
+ slice->RefPicList0[0].flags =
VA_PICTURE_H264_SHORT_TERM_REFERENCE;
+ }
+ if(current->refb) {
+ av_log(ctx, AV_LOG_DEBUG, "Using %#x as second reference
frame.\n",
+ current->refb->pic.picture_id);
+ slice->RefPicList0[1].picture_id =
current->refb->pic.picture_id;
+ slice->RefPicList0[1].flags =
VA_PICTURE_H264_SHORT_TERM_REFERENCE;
+ }
+
+ slice->slice_qp_delta = 0;
+ slice->slice_alpha_c0_offset_div2 = 0;
+ slice->slice_beta_offset_div2 = 0;
+ slice->direct_spatial_mv_pred_flag = 1;
+ }
+
+ vas = vaCreateBuffer(ctx->va_instance.display,
ctx->va_codec.context_id,
+ VAEncSliceParameterBufferType,
+ sizeof(*slice), 1, slice, ¤t->params_id);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to create buffer for slice "
+ "parameters: %d (%s).\n", vas, vaErrorStr(vas));
+ return -1;
+ }
+ av_log(ctx, AV_LOG_DEBUG, "Slice buffer is %#x.\n",
current->params_id);
+
+ vas = vaRenderPicture(ctx->va_instance.display,
ctx->va_codec.context_id,
+ ¤t->params_id, 1);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to send slice parameters: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int vaapi_h264_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
+ const AVFrame *pic, int *got_packet)
+{
+ VAAPIH264EncodeContext *ctx = avctx->priv_data;
+ AVVAAPISurface *input, *recon;
+ VAAPIH264EncodeFrame *current;
+ AVFrame *input_image, *recon_image;
+ VACodedBufferSegment *buf_list, *buf;
+ VAStatus vas;
+ int err;
+
+ av_log(ctx, AV_LOG_DEBUG, "New frame: format %s, size %ux%u.\n",
+ av_get_pix_fmt_name(pic->format), pic->width, pic->height);
+
+ if(pic->format == AV_PIX_FMT_VAAPI) {
+ input_image = 0;
+ input = (AVVAAPISurface*)pic->buf[0]->data;
+
+ } else {
+ input_image = av_frame_alloc();
+
+ err = av_vaapi_get_input_surface(&ctx->va_codec, input_image);
+ if(err) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to allocate surface to "
+ "copy input frame: %d (%s).\n", err, av_err2str(err));
+ return -1;
+ }
+
+ input = (AVVAAPISurface*)input_image->buf[0]->data;
+
+ err = av_vaapi_map_surface(input, 0);
+ if(err) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to map input surface: "
+ "%d (%s).\n", err, av_err2str(err));
+ return -1;
+ }
+
+ err = av_vaapi_copy_to_surface(pic, input);
+ if(err) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to copy to input surface: "
+ "%d (%s).\n", err, av_err2str(err));
+ return -1;
+ }
+
+ err = av_vaapi_unmap_surface(input, 1);
+ if(err) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to unmap input surface: "
+ "%d (%s).\n", err, av_err2str(err));
+ return -1;
+ }
+ }
+ av_log(ctx, AV_LOG_DEBUG, "Using surface %#x for input image.\n",
+ input->id);
+
+ recon_image = av_frame_alloc();
+
+ err = av_vaapi_get_output_surface(&ctx->va_codec, recon_image);
+ if(err) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to allocate surface for "
+ "reconstructed frame: %d (%s).\n", err, av_err2str(err));
+ return -1;
+ }
+ recon = (AVVAAPISurface*)recon_image->buf[0]->data;
+ av_log(ctx, AV_LOG_DEBUG, "Using surface %#x for reconstructed
image.\n",
+ recon->id);
+
+ if(ctx->previous_frame != ctx->current_frame) {
+ av_frame_unref(&ctx->dpb[ctx->previous_frame].avframe);
+ }
+
+ ctx->previous_frame = ctx->current_frame;
+ ctx->current_frame = (ctx->current_frame + 1) % DPB_FRAMES;
+ {
+ current = &ctx->dpb[ctx->current_frame];
+
+ if(ctx->frame_num < 0 ||
+ ctx->frame_num == ctx->options.idr_interval)
+ current->type = FRAME_TYPE_I;
+ else
+ current->type = FRAME_TYPE_P;
+
+ if(current->type == FRAME_TYPE_I)
+ ctx->frame_num = 0;
+ else
+ ++ctx->frame_num;
+ current->frame_num = ctx->frame_num;
+
+ if(current->type == FRAME_TYPE_I) {
+ current->refp = 0;
+ current->refb = 0;
+ } else if(current->type == FRAME_TYPE_P) {
+ current->refp = &ctx->dpb[ctx->previous_frame];
+ current->refb = 0;
+ } else {
+ av_assert0(0);
+ }
+
+ memset(¤t->pic, 0, sizeof(VAPictureH264));
+ current->pic.picture_id = recon->id;
+ current->pic.frame_idx = ctx->frame_num;
+
+ memcpy(¤t->avframe, recon_image, sizeof(AVFrame));
+ }
+ av_log(ctx, AV_LOG_DEBUG, "Encoding as frame as %s (%d).\n",
+ current->type == FRAME_TYPE_I ? "I" :
+ current->type == FRAME_TYPE_P ? "P" : "B", ctx->frame_num);
+
+ vas = vaBeginPicture(ctx->va_instance.display,
ctx->va_codec.context_id,
+ input->id);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to attach new picture: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ return -1;
+ }
+
+ if(current->type == FRAME_TYPE_I) {
+ err = vaapi_h264_render_sequence(ctx);
+ if(err) return err;
+ }
+
+ err = vaapi_h264_render_picture(ctx, current);
+ if(err) return err;
+
+ if(current->type == FRAME_TYPE_I) {
+ err = vaapi_h264_render_packed_sps(ctx);
+ if(err) return err;
+
+ err = vaapi_h264_render_packed_pps(ctx);
+ if(err) return err;
+ }
+
+ err = vaapi_h264_render_packed_slice(ctx, current);
+ if(err) return err;
+
+ err = vaapi_h264_render_slice(ctx, current);
+ if(err) return err;
+
+ vas = vaEndPicture(ctx->va_instance.display, ctx->va_codec.context_id);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to start picture processing: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ return -1;
+ }
+
+ vas = vaSyncSurface(ctx->va_instance.display, input->id);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to sync to picture completion: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ return -1;
+ }
+
+ buf_list = 0;
+ vas = vaMapBuffer(ctx->va_instance.display, current->coded_data_id,
+ (void**)&buf_list);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to map output buffers: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ return -1;
+ }
+
+ for(buf = buf_list; buf; buf = buf->next) {
+ av_log(ctx, AV_LOG_DEBUG, "Output buffer: %u bytes.\n", buf->size);
+ err = av_new_packet(pkt, buf->size);
+ if(err) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to make output buffer "
+ "(%u bytes).\n", buf->size);
+ return err;
+ }
+
+ memcpy(pkt->data, buf->buf, buf->size);
+
+ if(current->type == FRAME_TYPE_I)
+ pkt->flags |= AV_PKT_FLAG_KEY;
+
+ *got_packet = 1;
+ }
+
+ vas = vaUnmapBuffer(ctx->va_instance.display, current->coded_data_id);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to unmap output buffers: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ return -1;
+ }
+
+ if(pic->format != AV_PIX_FMT_VAAPI)
+ av_frame_free(&input_image);
+
+ return 0;
+}
+
+static VAConfigAttrib config_attributes[] = {
+ { .type = VAConfigAttribRTFormat,
+ .value = VA_RT_FORMAT_YUV420 },
+ { .type = VAConfigAttribRateControl,
+ .value = VA_RC_CQP },
+ { .type = VAConfigAttribEncPackedHeaders,
+ .value = 0 },
+};
+
+static av_cold int vaapi_h264_encode_init(AVCodecContext *avctx)
+{
+ VAAPIH264EncodeContext *ctx = avctx->priv_data;
+ VAStatus vas;
+ int i, err;
+
+ if(strcmp(ctx->options.profile, "constrained_baseline"))
+ ctx->va_profile = VAProfileH264ConstrainedBaseline;
+ else if(strcmp(ctx->options.profile, "baseline"))
+ ctx->va_profile = VAProfileH264Baseline;
+ else if(strcmp(ctx->options.profile, "main"))
+ ctx->va_profile = VAProfileH264Main;
+ else if(strcmp(ctx->options.profile, "high"))
+ ctx->va_profile = VAProfileH264High;
+ else {
+ av_log(ctx, AV_LOG_ERROR, "Invalid profile '%s'.\n",
+ ctx->options.profile);
+ return AVERROR(EINVAL);
+ }
+
+ ctx->level = -1;
+ if(sscanf(ctx->options.level, "%d", &ctx->level) <= 0 ||
+ ctx->level < 0 || ctx->level > 52) {
+ av_log(ctx, AV_LOG_ERROR, "Invaid level '%s'.\n",
ctx->options.level);
+ return AVERROR(EINVAL);
+ }
+
+ if(ctx->options.qp >= 0) {
+ ctx->rc_mode = VA_RC_CQP;
+ } else {
+ // Default to CQP 26.
+ ctx->rc_mode = VA_RC_CQP;
+ ctx->options.qp = 26;
+ }
+ av_log(ctx, AV_LOG_INFO, "Using constant-QP mode at %d.\n",
+ ctx->options.qp);
+
+ err = av_vaapi_instance_init(&ctx->va_instance, 0);
+ if(err) {
+ av_log(ctx, AV_LOG_ERROR, "No VAAPI instance.\n");
+ return err;
+ }
+
+ ctx->width = avctx->width;
+ ctx->height = avctx->height;
+
+ ctx->frame_num = -1;
+
+ {
+ AVVAAPIPipelineConfig *config = &ctx->va_config;
+
+ config->profile = ctx->va_profile;
+ config->entrypoint = VAEntrypointEncSlice;
+
+ config->attribute_count = FF_ARRAY_ELEMS(config_attributes);
+ config->attributes = config_attributes;
+ }
+
+ {
+ AVVAAPISurfaceConfig *config = &ctx->output_config;
+
+ config->rt_format = VA_RT_FORMAT_YUV420;
+ config->av_format = AV_PIX_FMT_VAAPI;
+
+ config->image_format.fourcc = VA_FOURCC_NV12;
+ config->image_format.bits_per_pixel = 12;
+
+ config->count = DPB_FRAMES;
+ config->width = ctx->width;
+ config->height = ctx->height;
+
+ config->attribute_count = 0;
+ }
+
+ {
+ AVVAAPISurfaceConfig *config = &ctx->input_config;
+
+ config->rt_format = VA_RT_FORMAT_YUV420;
+ config->rt_format = VA_RT_FORMAT_YUV420;
+ config->av_format = AV_PIX_FMT_VAAPI;
+
+ config->image_format.fourcc = VA_FOURCC_NV12;
+ config->image_format.bits_per_pixel = 12;
+
+ config->count = INPUT_FRAMES;
+ config->width = ctx->width;
+ config->height = ctx->height;
+
+ config->attribute_count = 0;
+ }
+
+ err = av_vaapi_pipeline_init(&ctx->va_codec, &ctx->va_instance,
+ &ctx->va_config,
+ &ctx->input_config, &ctx->output_config);
+ if(err) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to create codec: %d (%s).\n",
+ err, av_err2str(err));
+ return err;
+ }
+
+ for(i = 0; i < DPB_FRAMES; i++) {
+ vas = vaCreateBuffer(ctx->va_instance.display,
+ ctx->va_codec.context_id,
+ VAEncCodedBufferType,
+ 1048576, 1, 0, &ctx->dpb[i].coded_data_id);
+ if(vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to create buffer for "
+ "coded data: %d (%s).\n", vas, vaErrorStr(vas));
+ break;
+ }
+ av_log(ctx, AV_LOG_TRACE, "Coded data buffer %d is %#x.\n",
+ i, ctx->dpb[i].coded_data_id);
+ }
+
+ av_log(ctx, AV_LOG_INFO, "Started VAAPI H.264 encoder.\n");
+ return 0;
+}
+
+static av_cold int vaapi_h264_encode_close(AVCodecContext *avctx)
+{
+ VAAPIH264EncodeContext *ctx = avctx->priv_data;
+ int err;
+
+ err = av_vaapi_pipeline_uninit(&ctx->va_codec);
+ if(err) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to destroy codec: %d (%s).\n",
+ err, av_err2str(err));
+ }
+
+ err = av_vaapi_instance_uninit(&ctx->va_instance);
+ if(err) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to uninitialised VAAPI "
+ "instance: %d (%s).\n",
+ err, av_err2str(err));
+ }
+
+ return 0;
+}
+
+#define OFFSET(member) offsetof(VAAPIH264EncodeContext, options.member)
+#define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
+static const AVOption vaapi_h264_options[] = {
+ { "profile", "Set H.264 profile",
+ OFFSET(profile), AV_OPT_TYPE_STRING,
+ { .str = "baseline" }, 0, 0, FLAGS },
+ { "level", "Set H.264 level",
+ OFFSET(level), AV_OPT_TYPE_STRING,
+ { .str = "52" }, 0, 0, FLAGS },
+ { "qp", "Use constant quantisation parameter",
+ OFFSET(qp), AV_OPT_TYPE_INT,
+ { .i64 = -1 }, -1, 52, FLAGS },
+ { "idr_interval", "Number of frames between IDR frames (0 = all
intra)",
+ OFFSET(idr_interval), AV_OPT_TYPE_INT,
+ { .i64 = -1 }, -1, INT_MAX, FLAGS },
+ { 0 }
+};
+
+static const AVClass vaapi_h264_class = {
+ .class_name = "VAAPI/H.264",
+ .item_name = av_default_item_name,
+ .option = vaapi_h264_options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_h264_vaapi_encoder = {
+ .name = "vaapi_h264",
+ .long_name = NULL_IF_CONFIG_SMALL("H.264 (VAAPI)"),
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_H264,
+ .priv_data_size = sizeof(VAAPIH264EncodeContext),
+ .init = &vaapi_h264_encode_init,
+ .encode2 = &vaapi_h264_encode_picture,
+ .close = &vaapi_h264_encode_close,
+ .priv_class = &vaapi_h264_class,
+ .pix_fmts = (const enum AVPixelFormat[]) {
+ AV_PIX_FMT_VAAPI,
+ AV_PIX_FMT_NV12,
+ AV_PIX_FMT_NONE,
+ },
+};
--
2.6.4
More information about the ffmpeg-devel
mailing list