[FFmpeg-devel] [PATCH v8 06/15] avcodec/vaapi_encode: extract the init function to base layer
tong1.wu at intel.com
tong1.wu at intel.com
Thu Apr 18 11:59:00 EEST 2024
From: Tong Wu <tong1.wu at intel.com>
Related parameters are also moved to base layer.
Signed-off-by: Tong Wu <tong1.wu at intel.com>
---
libavcodec/hw_base_encode.c | 33 ++++++++++++++++
libavcodec/hw_base_encode.h | 11 ++++++
libavcodec/vaapi_encode.c | 68 ++++++++++-----------------------
libavcodec/vaapi_encode.h | 6 ---
libavcodec/vaapi_encode_av1.c | 2 +-
libavcodec/vaapi_encode_h264.c | 2 +-
libavcodec/vaapi_encode_h265.c | 2 +-
libavcodec/vaapi_encode_mjpeg.c | 6 ++-
8 files changed, 72 insertions(+), 58 deletions(-)
diff --git a/libavcodec/hw_base_encode.c b/libavcodec/hw_base_encode.c
index 1d9a255f69..14f3ecfc94 100644
--- a/libavcodec/hw_base_encode.c
+++ b/libavcodec/hw_base_encode.c
@@ -598,3 +598,36 @@ end:
return 0;
}
+
+int ff_hw_base_encode_init(AVCodecContext *avctx)
+{
+ HWBaseEncodeContext *ctx = avctx->priv_data;
+
+ ctx->frame = av_frame_alloc();
+ if (!ctx->frame)
+ return AVERROR(ENOMEM);
+
+ if (!avctx->hw_frames_ctx) {
+ av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is "
+ "required to associate the encoding device.\n");
+ return AVERROR(EINVAL);
+ }
+
+ ctx->input_frames_ref = av_buffer_ref(avctx->hw_frames_ctx);
+ if (!ctx->input_frames_ref)
+ return AVERROR(ENOMEM);
+
+ ctx->input_frames = (AVHWFramesContext *)ctx->input_frames_ref->data;
+
+ ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref);
+ if (!ctx->device_ref)
+ return AVERROR(ENOMEM);
+
+ ctx->device = (AVHWDeviceContext *)ctx->device_ref->data;
+
+ ctx->tail_pkt = av_packet_alloc();
+ if (!ctx->tail_pkt)
+ return AVERROR(ENOMEM);
+
+ return 0;
+}
diff --git a/libavcodec/hw_base_encode.h b/libavcodec/hw_base_encode.h
index b5b676b9a8..f7e385e840 100644
--- a/libavcodec/hw_base_encode.h
+++ b/libavcodec/hw_base_encode.h
@@ -19,6 +19,7 @@
#ifndef AVCODEC_HW_BASE_ENCODE_H
#define AVCODEC_HW_BASE_ENCODE_H
+#include "libavutil/hwcontext.h"
#include "libavutil/fifo.h"
#define MAX_DPB_SIZE 16
@@ -117,6 +118,14 @@ typedef struct HWBaseEncodeContext {
// Hardware-specific hooks.
const struct HWEncodePictureOperation *op;
+ // The hardware device context.
+ AVBufferRef *device_ref;
+ AVHWDeviceContext *device;
+
+ // The hardware frame context containing the input frames.
+ AVBufferRef *input_frames_ref;
+ AVHWFramesContext *input_frames;
+
// Current encoding window, in display (input) order.
HWBaseEncodePicture *pic_start, *pic_end;
// The next picture to use as the previous reference picture in
@@ -183,6 +192,8 @@ typedef struct HWBaseEncodeContext {
int ff_hw_base_encode_receive_packet(AVCodecContext *avctx, AVPacket *pkt);
+int ff_hw_base_encode_init(AVCodecContext *avctx);
+
#define HW_BASE_ENCODE_COMMON_OPTIONS \
{ "async_depth", "Maximum processing parallelism. " \
"Increase this to improve single channel performance.", \
diff --git a/libavcodec/vaapi_encode.c b/libavcodec/vaapi_encode.c
index 18966596e1..c7488ad150 100644
--- a/libavcodec/vaapi_encode.c
+++ b/libavcodec/vaapi_encode.c
@@ -996,9 +996,10 @@ static const VAEntrypoint vaapi_encode_entrypoints_low_power[] = {
static av_cold int vaapi_encode_profile_entrypoint(AVCodecContext *avctx)
{
- VAAPIEncodeContext *ctx = avctx->priv_data;
- VAProfile *va_profiles = NULL;
- VAEntrypoint *va_entrypoints = NULL;
+ HWBaseEncodeContext *base_ctx = avctx->priv_data;
+ VAAPIEncodeContext *ctx = avctx->priv_data;
+ VAProfile *va_profiles = NULL;
+ VAEntrypoint *va_entrypoints = NULL;
VAStatus vas;
const VAEntrypoint *usable_entrypoints;
const VAAPIEncodeProfile *profile;
@@ -1021,10 +1022,10 @@ static av_cold int vaapi_encode_profile_entrypoint(AVCodecContext *avctx)
usable_entrypoints = vaapi_encode_entrypoints_normal;
}
- desc = av_pix_fmt_desc_get(ctx->input_frames->sw_format);
+ desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
if (!desc) {
av_log(avctx, AV_LOG_ERROR, "Invalid input pixfmt (%d).\n",
- ctx->input_frames->sw_format);
+ base_ctx->input_frames->sw_format);
return AVERROR(EINVAL);
}
depth = desc->comp[0].depth;
@@ -2131,20 +2132,21 @@ static int vaapi_encode_alloc_output_buffer(FFRefStructOpaque opaque, void *obj)
static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx)
{
- VAAPIEncodeContext *ctx = avctx->priv_data;
+ HWBaseEncodeContext *base_ctx = avctx->priv_data;
+ VAAPIEncodeContext *ctx = avctx->priv_data;
AVVAAPIHWConfig *hwconfig = NULL;
AVHWFramesConstraints *constraints = NULL;
enum AVPixelFormat recon_format;
int err, i;
- hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref);
+ hwconfig = av_hwdevice_hwconfig_alloc(base_ctx->device_ref);
if (!hwconfig) {
err = AVERROR(ENOMEM);
goto fail;
}
hwconfig->config_id = ctx->va_config;
- constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref,
+ constraints = av_hwdevice_get_hwframe_constraints(base_ctx->device_ref,
hwconfig);
if (!constraints) {
err = AVERROR(ENOMEM);
@@ -2157,9 +2159,9 @@ static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx)
recon_format = AV_PIX_FMT_NONE;
if (constraints->valid_sw_formats) {
for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) {
- if (ctx->input_frames->sw_format ==
+ if (base_ctx->input_frames->sw_format ==
constraints->valid_sw_formats[i]) {
- recon_format = ctx->input_frames->sw_format;
+ recon_format = base_ctx->input_frames->sw_format;
break;
}
}
@@ -2170,7 +2172,7 @@ static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx)
}
} else {
// No idea what to use; copy input format.
- recon_format = ctx->input_frames->sw_format;
+ recon_format = base_ctx->input_frames->sw_format;
}
av_log(avctx, AV_LOG_DEBUG, "Using %s as format of "
"reconstructed frames.\n", av_get_pix_fmt_name(recon_format));
@@ -2191,7 +2193,7 @@ static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx)
av_freep(&hwconfig);
av_hwframe_constraints_free(&constraints);
- ctx->recon_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref);
+ ctx->recon_frames_ref = av_hwframe_ctx_alloc(base_ctx->device_ref);
if (!ctx->recon_frames_ref) {
err = AVERROR(ENOMEM);
goto fail;
@@ -2235,44 +2237,16 @@ av_cold int ff_vaapi_encode_init(AVCodecContext *avctx)
VAStatus vas;
int err;
+ err = ff_hw_base_encode_init(avctx);
+ if (err < 0)
+ goto fail;
+
ctx->va_config = VA_INVALID_ID;
ctx->va_context = VA_INVALID_ID;
base_ctx->op = &vaapi_op;
- /* If you add something that can fail above this av_frame_alloc(),
- * modify ff_vaapi_encode_close() accordingly. */
- base_ctx->frame = av_frame_alloc();
- if (!base_ctx->frame) {
- return AVERROR(ENOMEM);
- }
-
- if (!avctx->hw_frames_ctx) {
- av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is "
- "required to associate the encoding device.\n");
- return AVERROR(EINVAL);
- }
-
- ctx->input_frames_ref = av_buffer_ref(avctx->hw_frames_ctx);
- if (!ctx->input_frames_ref) {
- err = AVERROR(ENOMEM);
- goto fail;
- }
- ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data;
-
- ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref);
- if (!ctx->device_ref) {
- err = AVERROR(ENOMEM);
- goto fail;
- }
- ctx->device = (AVHWDeviceContext*)ctx->device_ref->data;
- ctx->hwctx = ctx->device->hwctx;
-
- base_ctx->tail_pkt = av_packet_alloc();
- if (!base_ctx->tail_pkt) {
- err = AVERROR(ENOMEM);
- goto fail;
- }
+ ctx->hwctx = base_ctx->device->hwctx;
err = vaapi_encode_profile_entrypoint(avctx);
if (err < 0)
@@ -2475,8 +2449,8 @@ av_cold int ff_vaapi_encode_close(AVCodecContext *avctx)
av_fifo_freep2(&base_ctx->encode_fifo);
av_buffer_unref(&ctx->recon_frames_ref);
- av_buffer_unref(&ctx->input_frames_ref);
- av_buffer_unref(&ctx->device_ref);
+ av_buffer_unref(&base_ctx->input_frames_ref);
+ av_buffer_unref(&base_ctx->device_ref);
return 0;
}
diff --git a/libavcodec/vaapi_encode.h b/libavcodec/vaapi_encode.h
index 13ccad8e47..8e466e2074 100644
--- a/libavcodec/vaapi_encode.h
+++ b/libavcodec/vaapi_encode.h
@@ -219,14 +219,8 @@ typedef struct VAAPIEncodeContext {
VAConfigID va_config;
VAContextID va_context;
- AVBufferRef *device_ref;
- AVHWDeviceContext *device;
AVVAAPIDeviceContext *hwctx;
- // The hardware frame context containing the input frames.
- AVBufferRef *input_frames_ref;
- AVHWFramesContext *input_frames;
-
// The hardware frame context containing the reconstructed frames.
AVBufferRef *recon_frames_ref;
AVHWFramesContext *recon_frames;
diff --git a/libavcodec/vaapi_encode_av1.c b/libavcodec/vaapi_encode_av1.c
index 393b479f99..c37ff7591a 100644
--- a/libavcodec/vaapi_encode_av1.c
+++ b/libavcodec/vaapi_encode_av1.c
@@ -370,7 +370,7 @@ static int vaapi_encode_av1_init_sequence_params(AVCodecContext *avctx)
memset(sh_obu, 0, sizeof(*sh_obu));
sh_obu->header.obu_type = AV1_OBU_SEQUENCE_HEADER;
- desc = av_pix_fmt_desc_get(priv->common.input_frames->sw_format);
+ desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
av_assert0(desc);
sh->seq_profile = avctx->profile;
diff --git a/libavcodec/vaapi_encode_h264.c b/libavcodec/vaapi_encode_h264.c
index 412e392ac4..106df30563 100644
--- a/libavcodec/vaapi_encode_h264.c
+++ b/libavcodec/vaapi_encode_h264.c
@@ -308,7 +308,7 @@ static int vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx)
memset(sps, 0, sizeof(*sps));
memset(pps, 0, sizeof(*pps));
- desc = av_pix_fmt_desc_get(priv->common.input_frames->sw_format);
+ desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
av_assert0(desc);
if (desc->nb_components == 1 || desc->log2_chroma_w != 1 || desc->log2_chroma_h != 1) {
av_log(avctx, AV_LOG_ERROR, "Chroma format of input pixel format "
diff --git a/libavcodec/vaapi_encode_h265.c b/libavcodec/vaapi_encode_h265.c
index bea35d7ca8..199d61c3a2 100644
--- a/libavcodec/vaapi_encode_h265.c
+++ b/libavcodec/vaapi_encode_h265.c
@@ -278,7 +278,7 @@ static int vaapi_encode_h265_init_sequence_params(AVCodecContext *avctx)
memset(pps, 0, sizeof(*pps));
- desc = av_pix_fmt_desc_get(priv->common.input_frames->sw_format);
+ desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
av_assert0(desc);
if (desc->nb_components == 1) {
chroma_format = 0;
diff --git a/libavcodec/vaapi_encode_mjpeg.c b/libavcodec/vaapi_encode_mjpeg.c
index 8ca1be192a..17fd8ba8e9 100644
--- a/libavcodec/vaapi_encode_mjpeg.c
+++ b/libavcodec/vaapi_encode_mjpeg.c
@@ -222,6 +222,7 @@ static int vaapi_encode_mjpeg_write_extra_buffer(AVCodecContext *avctx,
static int vaapi_encode_mjpeg_init_picture_params(AVCodecContext *avctx,
VAAPIEncodePicture *pic)
{
+ HWBaseEncodeContext *base_ctx = avctx->priv_data;
VAAPIEncodeMJPEGContext *priv = avctx->priv_data;
HWBaseEncodePicture *base_pic = (HWBaseEncodePicture *)pic;
JPEGRawFrameHeader *fh = &priv->frame_header;
@@ -235,7 +236,7 @@ static int vaapi_encode_mjpeg_init_picture_params(AVCodecContext *avctx,
av_assert0(base_pic->type == PICTURE_TYPE_IDR);
- desc = av_pix_fmt_desc_get(priv->common.input_frames->sw_format);
+ desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
av_assert0(desc);
if (desc->flags & AV_PIX_FMT_FLAG_RGB)
components = components_rgb;
@@ -437,10 +438,11 @@ static int vaapi_encode_mjpeg_init_slice_params(AVCodecContext *avctx,
static av_cold int vaapi_encode_mjpeg_get_encoder_caps(AVCodecContext *avctx)
{
+ HWBaseEncodeContext *base_ctx = avctx->priv_data;
VAAPIEncodeContext *ctx = avctx->priv_data;
const AVPixFmtDescriptor *desc;
- desc = av_pix_fmt_desc_get(ctx->input_frames->sw_format);
+ desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
av_assert0(desc);
ctx->surface_width = FFALIGN(avctx->width, 8 << desc->log2_chroma_w);
--
2.41.0.windows.1
More information about the ffmpeg-devel
mailing list