[FFmpeg-devel] [PATCHv7] libavcodec: v4l2: add support for v4l2 mem2mem codecs
Jorge Ramirez-Ortiz
jorge.ramirez-ortiz at linaro.org
Fri Sep 1 17:21:31 EEST 2017
This patchset enhances Alexis Ballier's original patch and validates
it using Qualcomm's Venus hardware (driver recently landed upstream
[1]).
This has been tested on Qualcomm's DragonBoard 410c and 820c
Configure/make scripts have been validated on Ubuntu 10.04 and
16.04.
Tested decoders:
- h264
- h263
- mpeg4
- vp8
- vp9
- hevc
Tested encoders:
- h264
- h263
- mpeg4
Tested transcoding (concurrent encoding/decoding)
Some of the changes introduced:
- v4l2: code cleanup and abstractions added
- v4l2: follow the new encode/decode api.
- v4l2: fix display size for NV12 output pool.
- v4l2: handle EOS.
- v4l2: vp8 and mpeg4 decoding and encoding.
- v4l2: hevc and vp9 support.
- v4l2: generate EOF on dequeue errors.
- v4l2: h264_mp4toannexb filtering.
- v4l2: fixed make install and fate issues.
- v4l2: codecs enabled/disabled depending on pixfmt defined
- v4l2: pass timebase/framerate to the context
- v4l2: runtime decoder reconfiguration.
- v4l2: add more frame information
- v4l2: free hardware resources on last reference being released
- v4l2 encoding: disable b-frames for upstreaming (patch required)
[1] https://lwn.net/Articles/697956/
Reviewed-by: Jorge Ramirez <jorge.ramirez-ortiz at linaro.org>
Reviewed-by: Alexis Ballier <aballier at gentoo.org>
Tested-by: Jorge Ramirez <jorge.ramirez-ortiz at linaro.org>
---
Changelog | 1 +
configure | 30 +-
libavcodec/Makefile | 16 +
libavcodec/allcodecs.c | 9 +
libavcodec/v4l2_buffers.c | 916 ++++++++++++++++++++++++++++++++++++++++++
libavcodec/v4l2_buffers.h | 236 +++++++++++
libavcodec/v4l2_fmt.c | 147 +++++++
libavcodec/v4l2_fmt.h | 34 ++
libavcodec/v4l2_m2m.c | 452 +++++++++++++++++++++
libavcodec/v4l2_m2m.h | 70 ++++
libavcodec/v4l2_m2m_avcodec.h | 32 ++
libavcodec/v4l2_m2m_dec.c | 213 ++++++++++
libavcodec/v4l2_m2m_enc.c | 332 +++++++++++++++
13 files changed, 2487 insertions(+), 1 deletion(-)
create mode 100644 libavcodec/v4l2_buffers.c
create mode 100644 libavcodec/v4l2_buffers.h
create mode 100644 libavcodec/v4l2_fmt.c
create mode 100644 libavcodec/v4l2_fmt.h
create mode 100644 libavcodec/v4l2_m2m.c
create mode 100644 libavcodec/v4l2_m2m.h
create mode 100644 libavcodec/v4l2_m2m_avcodec.h
create mode 100644 libavcodec/v4l2_m2m_dec.c
create mode 100644 libavcodec/v4l2_m2m_enc.c
diff --git a/Changelog b/Changelog
index 8309417..c6fcda3 100644
--- a/Changelog
+++ b/Changelog
@@ -40,6 +40,7 @@ version <next>:
They must always be used by name.
- FITS demuxer and decoder
- FITS muxer and encoder
+- V4L2 mem2mem HW accelerated codecs support
version 3.3:
- CrystalHD decoder moved to new decode API
diff --git a/configure b/configure
index 4f1c172..d9244e7 100755
--- a/configure
+++ b/configure
@@ -149,6 +149,7 @@ Component options:
--disable-pixelutils disable pixel utils in libavutil
Individual component options:
+ --disable-v4l2_m2m disable V4L2 mem2mem code [autodetect]
--disable-everything disable all components listed below
--disable-encoder=NAME disable encoder NAME
--enable-encoder=NAME enable encoder NAME
@@ -1433,6 +1434,7 @@ AVCODEC_COMPONENTS="
AVDEVICE_COMPONENTS="
indevs
+ v4l2_m2m
outdevs
"
AVFILTER_COMPONENTS="
@@ -2271,6 +2273,7 @@ map 'eval ${v}_inline_deps=inline_asm' $ARCH_EXT_LIST_ARM
loongson2_deps="mips"
loongson3_deps="mips"
+v4l2_deps_any="linux_videodev2_h"
mipsfpu_deps="mips"
mipsdsp_deps="mips"
mipsdspr2_deps="mips"
@@ -2743,6 +2746,8 @@ nvenc_deps="cuda"
nvenc_deps_any="dlopen LoadLibrary"
nvenc_encoder_deps="nvenc"
+h263_v4l2m2m_decoder_deps="v4l2_m2m h263_v4l2_m2m"
+h263_v4l2m2m_encoder_deps="v4l2_m2m h263_v4l2_m2m"
h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf h264_parser"
h264_cuvid_decoder_deps="cuda cuvid"
h264_cuvid_decoder_select="h264_mp4toannexb_bsf"
@@ -2761,6 +2766,8 @@ h264_vda_decoder_deps="vda"
h264_vda_decoder_select="h264_decoder"
h264_vdpau_decoder_deps="vdpau"
h264_vdpau_decoder_select="h264_decoder"
+h264_v4l2m2m_decoder_deps="v4l2_m2m h264_v4l2_m2m"
+h264_v4l2m2m_encoder_deps="v4l2_m2m h264_v4l2_m2m"
hevc_cuvid_decoder_deps="cuda cuvid"
hevc_cuvid_decoder_select="hevc_mp4toannexb_bsf"
hevc_mediacodec_decoder_deps="mediacodec"
@@ -2772,12 +2779,15 @@ hevc_qsv_encoder_deps="libmfx"
hevc_qsv_encoder_select="hevcparse qsvenc"
hevc_vaapi_encoder_deps="VAEncPictureParameterBufferHEVC"
hevc_vaapi_encoder_select="vaapi_encode golomb"
+hevc_v4l2m2m_decoder_deps="v4l2_m2m hevc_v4l2_m2m"
+hevc_v4l2m2m_encoder_deps="v4l2_m2m hevc_v4l2_m2m"
mjpeg_cuvid_decoder_deps="cuda cuvid"
mjpeg_vaapi_encoder_deps="VAEncPictureParameterBufferJPEG"
mjpeg_vaapi_encoder_select="vaapi_encode jpegtables"
mpeg1_cuvid_decoder_deps="cuda cuvid"
mpeg1_vdpau_decoder_deps="vdpau"
mpeg1_vdpau_decoder_select="mpeg1video_decoder"
+mpeg1_v4l2m2m_decoder_deps="v4l2_m2m mpeg1_v4l2_m2m"
mpeg2_crystalhd_decoder_select="crystalhd"
mpeg2_cuvid_decoder_deps="cuda cuvid"
mpeg2_mmal_decoder_deps="mmal"
@@ -2788,6 +2798,7 @@ mpeg2_qsv_encoder_deps="libmfx"
mpeg2_qsv_encoder_select="qsvenc"
mpeg2_vaapi_encoder_deps="VAEncPictureParameterBufferMPEG2"
mpeg2_vaapi_encoder_select="vaapi_encode"
+mpeg2_v4l2m2m_decoder_deps="v4l2_m2m mpeg2_v4l2_m2m"
mpeg4_crystalhd_decoder_select="crystalhd"
mpeg4_cuvid_decoder_deps="cuda cuvid"
mpeg4_mediacodec_decoder_deps="mediacodec"
@@ -2795,6 +2806,8 @@ mpeg4_mmal_decoder_deps="mmal"
mpeg4_omx_encoder_deps="omx"
mpeg4_vdpau_decoder_deps="vdpau"
mpeg4_vdpau_decoder_select="mpeg4_decoder"
+mpeg4_v4l2m2m_decoder_deps="v4l2_m2m mpeg4_v4l2_m2m"
+mpeg4_v4l2m2m_encoder_deps="v4l2_m2m mpeg4_v4l2_m2m"
mpeg_vdpau_decoder_deps="vdpau"
mpeg_vdpau_decoder_select="mpeg2video_decoder"
msmpeg4_crystalhd_decoder_select="crystalhd"
@@ -2805,16 +2818,20 @@ vc1_cuvid_decoder_deps="cuda cuvid"
vc1_mmal_decoder_deps="mmal"
vc1_vdpau_decoder_deps="vdpau"
vc1_vdpau_decoder_select="vc1_decoder"
+vc1_v4l2m2m_decoder_deps="v4l2_m2m vc1_v4l2_m2m"
vp8_cuvid_decoder_deps="cuda cuvid"
vp8_mediacodec_decoder_deps="mediacodec"
vp8_qsv_decoder_deps="libmfx"
vp8_qsv_decoder_select="qsvdec vp8_qsv_hwaccel vp8_parser"
vp8_vaapi_encoder_deps="VAEncPictureParameterBufferVP8"
vp8_vaapi_encoder_select="vaapi_encode"
+vp8_v4l2m2m_decoder_deps="v4l2_m2m vp8_v4l2_m2m"
+vp8_v4l2m2m_encoder_deps="v4l2_m2m vp8_v4l2_m2m"
vp9_cuvid_decoder_deps="cuda cuvid"
vp9_mediacodec_decoder_deps="mediacodec"
vp9_vaapi_encoder_deps="VAEncPictureParameterBufferVP9"
vp9_vaapi_encoder_select="vaapi_encode"
+vp9_v4l2m2m_decoder_deps="v4l2_m2m vp9_v4l2_m2m"
wmv3_crystalhd_decoder_select="crystalhd"
wmv3_vdpau_decoder_select="vc1_vdpau_decoder"
@@ -3597,7 +3614,7 @@ done
enable_weak audiotoolbox
# Enable hwaccels by default.
-enable_weak d3d11va dxva2 vaapi vda vdpau videotoolbox_hwaccel xvmc
+enable_weak d3d11va dxva2 vaapi v4l2_m2m vda vdpau videotoolbox_hwaccel xvmc
enable_weak xlib
enable_weak cuda cuvid nvenc vda_framework videotoolbox videotoolbox_encoder
@@ -6066,10 +6083,21 @@ perl -v > /dev/null 2>&1 && enable perl || disable perl
pod2man --help > /dev/null 2>&1 && enable pod2man || disable pod2man
rsync --help 2> /dev/null | grep -q 'contimeout' && enable rsync_contimeout || disable rsync_contimeout
+# check V4L2 codecs available in the API
check_header linux/fb.h
check_header linux/videodev.h
check_header linux/videodev2.h
check_code cc linux/videodev2.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_safe struct_v4l2_frmivalenum_discrete
+check_code cc linux/videodev2.h "int i = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_VIDEO_M2M | V4L2_BUF_FLAG_LAST;" || disable v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_VC1_ANNEX_G;" && enable vc1_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG1;" && enable mpeg1_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG2;" && enable mpeg2_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG4;" && enable mpeg4_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_HEVC;" && enable hevc_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_H263;" && enable h263_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_H264;" && enable h264_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_VP8;" && enable vp8_v4l2_m2m
+check_code cc linux/videodev2.h "int i = V4L2_PIX_FMT_VP9;" && enable vp9_v4l2_m2m
check_header sys/videoio.h
check_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_safe struct_v4l2_frmivalenum_discrete
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 999632c..6e1de37 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -137,6 +137,7 @@ OBJS-$(CONFIG_VIDEODSP) += videodsp.o
OBJS-$(CONFIG_VP3DSP) += vp3dsp.o
OBJS-$(CONFIG_VP56DSP) += vp56dsp.o
OBJS-$(CONFIG_VP8DSP) += vp8dsp.o
+OBJS-$(CONFIG_V4L2_M2M) += v4l2_m2m.o v4l2_buffers.o v4l2_fmt.o
OBJS-$(CONFIG_WMA_FREQS) += wma_freqs.o
OBJS-$(CONFIG_WMV2DSP) += wmv2dsp.o
@@ -323,6 +324,8 @@ OBJS-$(CONFIG_H263_DECODER) += h263dec.o h263.o ituh263dec.o \
intelh263dec.o h263data.o
OBJS-$(CONFIG_H263_ENCODER) += mpeg4videoenc.o mpeg4video.o \
h263.o ituh263enc.o flvenc.o h263data.o
+OBJS-$(CONFIG_H263_V4L2M2M_DECODER) += v4l2_m2m_dec.o
+OBJS-$(CONFIG_H263_V4L2M2M_ENCODER) += v4l2_m2m_enc.o
OBJS-$(CONFIG_H264_DECODER) += h264dec.o h264_cabac.o h264_cavlc.o \
h264_direct.o h264_loopfilter.o \
h264_mb.o h264_picture.o \
@@ -340,6 +343,8 @@ OBJS-$(CONFIG_H264_QSV_DECODER) += qsvdec_h2645.o
OBJS-$(CONFIG_H264_QSV_ENCODER) += qsvenc_h264.o
OBJS-$(CONFIG_H264_VAAPI_ENCODER) += vaapi_encode_h264.o vaapi_encode_h26x.o
OBJS-$(CONFIG_H264_VIDEOTOOLBOX_ENCODER) += videotoolboxenc.o
+OBJS-$(CONFIG_H264_V4L2M2M_DECODER) += v4l2_m2m_dec.o
+OBJS-$(CONFIG_H264_V4L2M2M_ENCODER) += v4l2_m2m_enc.o
OBJS-$(CONFIG_HAP_DECODER) += hapdec.o hap.o
OBJS-$(CONFIG_HAP_ENCODER) += hapenc.o hap.o
OBJS-$(CONFIG_HEVC_DECODER) += hevcdec.o hevc_mvs.o \
@@ -353,6 +358,8 @@ OBJS-$(CONFIG_HEVC_QSV_DECODER) += qsvdec_h2645.o
OBJS-$(CONFIG_HEVC_QSV_ENCODER) += qsvenc_hevc.o hevc_ps_enc.o \
hevc_data.o
OBJS-$(CONFIG_HEVC_VAAPI_ENCODER) += vaapi_encode_h265.o vaapi_encode_h26x.o
+OBJS-$(CONFIG_HEVC_V4L2M2M_DECODER) += v4l2_m2m_dec.o
+OBJS-$(CONFIG_HEVC_V4L2M2M_ENCODER) += v4l2_m2m_enc.o
OBJS-$(CONFIG_HNM4_VIDEO_DECODER) += hnm4video.o
OBJS-$(CONFIG_HQ_HQA_DECODER) += hq_hqa.o hq_hqadata.o hq_hqadsp.o \
canopus.o
@@ -422,6 +429,7 @@ OBJS-$(CONFIG_MPC8_DECODER) += mpc8.o mpc.o
OBJS-$(CONFIG_MPEGVIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o
OBJS-$(CONFIG_MPEG1VIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o
OBJS-$(CONFIG_MPEG1VIDEO_ENCODER) += mpeg12enc.o mpeg12.o
+OBJS-$(CONFIG_MPEG1_V4L2M2M_DECODER) += v4l2_m2m_dec.o
OBJS-$(CONFIG_MPEG2_MMAL_DECODER) += mmaldec.o
OBJS-$(CONFIG_MPEG2_QSV_DECODER) += qsvdec_other.o
OBJS-$(CONFIG_MPEG2_QSV_ENCODER) += qsvenc_mpeg2.o
@@ -429,9 +437,12 @@ OBJS-$(CONFIG_MPEG2VIDEO_DECODER) += mpeg12dec.o mpeg12.o mpeg12data.o
OBJS-$(CONFIG_MPEG2VIDEO_ENCODER) += mpeg12enc.o mpeg12.o
OBJS-$(CONFIG_MPEG2_MEDIACODEC_DECODER) += mediacodecdec.o
OBJS-$(CONFIG_MPEG2_VAAPI_ENCODER) += vaapi_encode_mpeg2.o
+OBJS-$(CONFIG_MPEG2_V4L2M2M_DECODER) += v4l2_m2m_dec.o
OBJS-$(CONFIG_MPEG4_DECODER) += xvididct.o
OBJS-$(CONFIG_MPEG4_MEDIACODEC_DECODER) += mediacodecdec.o
OBJS-$(CONFIG_MPEG4_OMX_ENCODER) += omx.o
+OBJS-$(CONFIG_MPEG4_V4L2M2M_DECODER) += v4l2_m2m_dec.o
+OBJS-$(CONFIG_MPEG4_V4L2M2M_ENCODER) += v4l2_m2m_enc.o
OBJS-$(CONFIG_MPL2_DECODER) += mpl2dec.o ass.o
OBJS-$(CONFIG_MSA1_DECODER) += mss3.o
OBJS-$(CONFIG_MSCC_DECODER) += mscc.o
@@ -605,6 +616,7 @@ OBJS-$(CONFIG_VC1_DECODER) += vc1dec.o vc1_block.o vc1_loopfilter.o
OBJS-$(CONFIG_VC1_CUVID_DECODER) += cuvid.o
OBJS-$(CONFIG_VC1_MMAL_DECODER) += mmaldec.o
OBJS-$(CONFIG_VC1_QSV_DECODER) += qsvdec_other.o
+OBJS-$(CONFIG_VC1_V4L2M2M_DECODER) += v4l2_m2m_dec.o
OBJS-$(CONFIG_VC2_ENCODER) += vc2enc.o vc2enc_dwt.o diractab.o
OBJS-$(CONFIG_VCR1_DECODER) += vcr1.o
OBJS-$(CONFIG_VMDAUDIO_DECODER) += vmdaudio.o
@@ -614,6 +626,7 @@ OBJS-$(CONFIG_VORBIS_DECODER) += vorbisdec.o vorbisdsp.o vorbis.o \
vorbis_data.o
OBJS-$(CONFIG_VORBIS_ENCODER) += vorbisenc.o vorbis.o \
vorbis_data.o
+OBJS-$(CONFIG_VPLAYER_DECODER) += textdec.o ass.o
OBJS-$(CONFIG_VP3_DECODER) += vp3.o
OBJS-$(CONFIG_VP5_DECODER) += vp5.o vp56.o vp56data.o vp56rac.o
OBJS-$(CONFIG_VP6_DECODER) += vp6.o vp56.o vp56data.o \
@@ -624,6 +637,8 @@ OBJS-$(CONFIG_VP8_CUVID_DECODER) += cuvid.o
OBJS-$(CONFIG_VP8_MEDIACODEC_DECODER) += mediacodecdec.o
OBJS-$(CONFIG_VP8_QSV_DECODER) += qsvdec_other.o
OBJS-$(CONFIG_VP8_VAAPI_ENCODER) += vaapi_encode_vp8.o
+OBJS-$(CONFIG_VP8_V4L2M2M_DECODER) += v4l2_m2m_dec.o
+OBJS-$(CONFIG_VP8_V4L2M2M_ENCODER) += v4l2_m2m_enc.o
OBJS-$(CONFIG_VP9_DECODER) += vp9.o vp9data.o vp9dsp.o vp9lpf.o vp9recon.o \
vp9block.o vp9prob.o vp9mvs.o vp56rac.o \
vp9dsp_8bpp.o vp9dsp_10bpp.o vp9dsp_12bpp.o
@@ -631,6 +646,7 @@ OBJS-$(CONFIG_VP9_CUVID_DECODER) += cuvid.o
OBJS-$(CONFIG_VP9_MEDIACODEC_DECODER) += mediacodecdec.o
OBJS-$(CONFIG_VP9_VAAPI_ENCODER) += vaapi_encode_vp9.o
OBJS-$(CONFIG_VPLAYER_DECODER) += textdec.o ass.o
+OBJS-$(CONFIG_VP9_V4L2M2M_DECODER) += v4l2_m2m_dec.o
OBJS-$(CONFIG_VQA_DECODER) += vqavideo.o
OBJS-$(CONFIG_WAVPACK_DECODER) += wavpack.o
OBJS-$(CONFIG_WAVPACK_ENCODER) += wavpackenc.o
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index ce0bc7e..eea1ae1 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -208,8 +208,10 @@ static void register_all(void)
REGISTER_ENCDEC (H263, h263);
REGISTER_DECODER(H263I, h263i);
REGISTER_ENCDEC (H263P, h263p);
+ REGISTER_ENCDEC (H263_V4L2M2M, h263_v4l2m2m);
REGISTER_DECODER(H264, h264);
REGISTER_DECODER(H264_CRYSTALHD, h264_crystalhd);
+ REGISTER_ENCDEC (H264_V4L2M2M, h264_v4l2m2m);
REGISTER_DECODER(H264_MEDIACODEC, h264_mediacodec);
REGISTER_DECODER(H264_MMAL, h264_mmal);
REGISTER_DECODER(H264_QSV, h264_qsv);
@@ -220,6 +222,7 @@ static void register_all(void)
REGISTER_ENCDEC (HAP, hap);
REGISTER_DECODER(HEVC, hevc);
REGISTER_DECODER(HEVC_QSV, hevc_qsv);
+ REGISTER_ENCDEC(HEVC_V4L2M2M, hevc_v4l2m2m);
REGISTER_DECODER(HNM4_VIDEO, hnm4_video);
REGISTER_DECODER(HQ_HQA, hq_hqa);
REGISTER_DECODER(HQX, hqx);
@@ -254,6 +257,7 @@ static void register_all(void)
REGISTER_ENCDEC (MPEG2VIDEO, mpeg2video);
REGISTER_ENCDEC (MPEG4, mpeg4);
REGISTER_DECODER(MPEG4_CRYSTALHD, mpeg4_crystalhd);
+ REGISTER_ENCDEC (MPEG4_V4L2M2M, mpeg4_v4l2m2m);
REGISTER_DECODER(MPEG4_MMAL, mpeg4_mmal);
#if FF_API_VDPAU
REGISTER_DECODER(MPEG4_VDPAU, mpeg4_vdpau);
@@ -263,8 +267,10 @@ static void register_all(void)
REGISTER_DECODER(MPEG_VDPAU, mpeg_vdpau);
REGISTER_DECODER(MPEG1_VDPAU, mpeg1_vdpau);
#endif
+ REGISTER_DECODER(MPEG1_V4L2M2M, mpeg1_v4l2m2m);
REGISTER_DECODER(MPEG2_MMAL, mpeg2_mmal);
REGISTER_DECODER(MPEG2_CRYSTALHD, mpeg2_crystalhd);
+ REGISTER_DECODER(MPEG2_V4L2M2M, mpeg2_v4l2m2m);
REGISTER_DECODER(MPEG2_QSV, mpeg2_qsv);
REGISTER_DECODER(MPEG2_MEDIACODEC, mpeg2_mediacodec);
REGISTER_DECODER(MSA1, msa1);
@@ -362,6 +368,7 @@ static void register_all(void)
REGISTER_DECODER(VC1IMAGE, vc1image);
REGISTER_DECODER(VC1_MMAL, vc1_mmal);
REGISTER_DECODER(VC1_QSV, vc1_qsv);
+ REGISTER_DECODER(VC1_V4L2M2M, vc1_v4l2m2m);
REGISTER_ENCODER(VC2, vc2);
REGISTER_DECODER(VCR1, vcr1);
REGISTER_DECODER(VMDVIDEO, vmdvideo);
@@ -373,7 +380,9 @@ static void register_all(void)
REGISTER_DECODER(VP6F, vp6f);
REGISTER_DECODER(VP7, vp7);
REGISTER_DECODER(VP8, vp8);
+ REGISTER_ENCDEC (VP8_V4L2M2M, vp8_v4l2m2m);
REGISTER_DECODER(VP9, vp9);
+ REGISTER_DECODER(VP9_V4L2M2M, vp9_v4l2m2m);
REGISTER_DECODER(VQA, vqa);
REGISTER_DECODER(BITPACKED, bitpacked);
REGISTER_DECODER(WEBP, webp);
diff --git a/libavcodec/v4l2_buffers.c b/libavcodec/v4l2_buffers.c
new file mode 100644
index 0000000..131f004
--- /dev/null
+++ b/libavcodec/v4l2_buffers.c
@@ -0,0 +1,916 @@
+/*
+ * V4L2 buffer helper functions.
+ *
+ * Copyright (C) 2017 Alexis Ballier <aballier at gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz at linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/videodev2.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <poll.h>
+#include "libavcodec/avcodec.h"
+#include "libavcodec/internal.h"
+#include "v4l2_buffers.h"
+#include "v4l2_m2m.h"
+
+#define USEC_PER_SEC 1000000
+
+enum V4L2Buffer_status {
+ V4L2BUF_AVAILABLE,
+ V4L2BUF_IN_DRIVER,
+ V4L2BUF_RET_USER,
+};
+
+/* buffer transform */
+typedef int (*pkt_to_buf_f)(const AVPacket *, V4L2Buffer *);
+typedef int (*frm_to_buf_f)(const AVFrame *, V4L2Buffer *);
+typedef int (*buf_to_pkt_f)(AVPacket *, V4L2Buffer *);
+typedef int (*buf_to_frm_f)(AVFrame *, V4L2Buffer *);
+
+typedef int (*buf_to_bufref_f)(V4L2Buffer *in, int plane, AVBufferRef **buf);
+typedef int (*bufref_to_buf_f)(V4L2Buffer *out, int plane, const uint8_t* data, int size, AVBufferRef* bref);
+
+struct V4L2Buffer_ops {
+ pkt_to_buf_f pkt_to_buf;
+ frm_to_buf_f frm_to_buf;
+ buf_to_pkt_f buf_to_pkt;
+ buf_to_frm_f buf_to_frm;
+
+ bufref_to_buf_f bufref_to_buf;
+ buf_to_bufref_f buf_to_bufref;
+};
+
+struct V4L2Buffer {
+ /* each buffer needs to have a reference to its context */
+ struct V4L2Context *context;
+
+ struct V4L2Plane_info {
+ void * mm_addr;
+ size_t lengths;
+ } plane_info[VIDEO_MAX_PLANES];
+
+ /* some common buffer operations */
+ struct V4L2Buffer_ops ops;
+
+ /* memcpy to the v4l2_buffer planes array when needed */
+ struct v4l2_plane planes[VIDEO_MAX_PLANES];
+ struct v4l2_buffer buf;
+
+ int bytesperline[4];
+ int num_planes;
+
+ int flags;
+ enum V4L2Buffer_status status;
+
+};
+
+static inline int buffer_type_supported(V4L2Context *ctx)
+{
+ return ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
+ ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
+ ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+ ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT;
+}
+
+static inline unsigned int get_width(V4L2Context *ctx, struct v4l2_format *fmt)
+{
+ return V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? fmt->fmt.pix_mp.width : fmt->fmt.pix.width;
+}
+
+static inline unsigned int get_height(V4L2Context *ctx, struct v4l2_format *fmt)
+{
+ return V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? fmt->fmt.pix_mp.height : fmt->fmt.pix.height;
+}
+
+static inline enum AVColorSpace get_colorspace(V4L2Buffer *buf)
+{
+ enum v4l2_ycbcr_encoding ycbcr;
+ enum v4l2_colorspace cs;
+
+ cs = V4L2_TYPE_IS_MULTIPLANAR(buf->context->type) ?
+ buf->context->format.fmt.pix_mp.colorspace :
+ buf->context->format.fmt.pix.colorspace;
+
+ ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->context->type) ?
+ buf->context->format.fmt.pix_mp.ycbcr_enc:
+ buf->context->format.fmt.pix.ycbcr_enc;
+
+ switch(cs) {
+ case V4L2_COLORSPACE_SRGB: return AVCOL_SPC_RGB;
+ case V4L2_COLORSPACE_REC709: return AVCOL_SPC_BT709;
+ case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_SPC_FCC;
+ case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_SPC_BT470BG;
+ case V4L2_COLORSPACE_SMPTE170M: return AVCOL_SPC_SMPTE170M;
+ case V4L2_COLORSPACE_SMPTE240M: return AVCOL_SPC_SMPTE240M;
+ case V4L2_COLORSPACE_BT2020:
+ if (ycbcr == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
+ return AVCOL_SPC_BT2020_CL;
+ else
+ return AVCOL_SPC_BT2020_NCL;
+ default:
+ break;
+ }
+
+ return AVCOL_SPC_UNSPECIFIED;
+}
+
+static inline enum AVColorPrimaries get_colorprimaries(V4L2Buffer *buf)
+{
+ enum v4l2_ycbcr_encoding ycbcr;
+ enum v4l2_colorspace cs;
+
+ cs = V4L2_TYPE_IS_MULTIPLANAR(buf->context->type) ?
+ buf->context->format.fmt.pix_mp.colorspace :
+ buf->context->format.fmt.pix.colorspace;
+
+ ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->context->type) ?
+ buf->context->format.fmt.pix_mp.ycbcr_enc:
+ buf->context->format.fmt.pix.ycbcr_enc;
+
+ switch(ycbcr) {
+ case V4L2_YCBCR_ENC_XV709:
+ case V4L2_YCBCR_ENC_709: return AVCOL_PRI_BT709;
+ case V4L2_YCBCR_ENC_XV601:
+ case V4L2_YCBCR_ENC_601:return AVCOL_PRI_BT470M;
+ default:
+ break;
+ }
+
+ switch(cs) {
+ case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_PRI_BT470BG;
+ case V4L2_COLORSPACE_SMPTE170M: return AVCOL_PRI_SMPTE170M;
+ case V4L2_COLORSPACE_SMPTE240M: return AVCOL_PRI_SMPTE240M;
+ case V4L2_COLORSPACE_BT2020: return AVCOL_PRI_BT2020;
+ default:
+ break;
+ }
+
+ return AVCOL_PRI_UNSPECIFIED;
+}
+
+static inline enum AVColorTransferCharacteristic get_colortrc(V4L2Buffer *buf)
+{
+ enum v4l2_ycbcr_encoding ycbcr;
+ enum v4l2_xfer_func xfer;
+ enum v4l2_colorspace cs;
+
+ cs = V4L2_TYPE_IS_MULTIPLANAR(buf->context->type) ?
+ buf->context->format.fmt.pix_mp.colorspace :
+ buf->context->format.fmt.pix.colorspace;
+
+ ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->context->type) ?
+ buf->context->format.fmt.pix_mp.ycbcr_enc:
+ buf->context->format.fmt.pix.ycbcr_enc;
+
+ xfer = V4L2_TYPE_IS_MULTIPLANAR(buf->context->type) ?
+ buf->context->format.fmt.pix_mp.xfer_func:
+ buf->context->format.fmt.pix.xfer_func;
+
+ switch (xfer) {
+ case V4L2_XFER_FUNC_709: return AVCOL_TRC_BT709;
+ case V4L2_XFER_FUNC_SRGB: return AVCOL_TRC_IEC61966_2_1;
+ default:
+ break;
+ }
+
+ switch (cs) {
+ case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_TRC_GAMMA22;
+ case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_TRC_GAMMA28;
+ case V4L2_COLORSPACE_SMPTE170M: return AVCOL_TRC_SMPTE170M;
+ case V4L2_COLORSPACE_SMPTE240M: return AVCOL_TRC_SMPTE240M;
+ default:
+ break;
+ }
+
+ switch (ycbcr) {
+ case V4L2_YCBCR_ENC_XV709:
+ case V4L2_YCBCR_ENC_XV601: return AVCOL_TRC_BT1361_ECG;
+ default:
+ break;
+ }
+
+ return AVCOL_TRC_UNSPECIFIED;
+}
+
+static inline enum AVColorPrimaries get_colorrange(V4L2Buffer *buf)
+{
+ enum v4l2_quantization qt;
+
+ qt = V4L2_TYPE_IS_MULTIPLANAR(buf->context->type) ?
+ buf->context->format.fmt.pix_mp.quantization :
+ buf->context->format.fmt.pix.quantization;
+
+ switch (qt) {
+ case V4L2_QUANTIZATION_LIM_RANGE: return AVCOL_RANGE_MPEG;
+ case V4L2_QUANTIZATION_FULL_RANGE: return AVCOL_RANGE_JPEG;
+ default:
+ break;
+ }
+
+ return AVCOL_RANGE_UNSPECIFIED;
+}
+
+static inline void set_pts(V4L2Buffer *out, int64_t pts)
+{
+ V4L2m2mContext *s = container_of(out->context, V4L2m2mContext, output);
+ AVRational v4l2_timebase = { 1, USEC_PER_SEC };
+ int64_t v4l2_pts;
+
+ if (pts == AV_NOPTS_VALUE)
+ pts = 0;
+
+ /* convert pts to v4l2 timebase */
+ v4l2_pts = av_rescale_q(pts, s->avctx->time_base, v4l2_timebase);
+ out->buf.timestamp.tv_usec = v4l2_pts % USEC_PER_SEC;
+ out->buf.timestamp.tv_sec = v4l2_pts / USEC_PER_SEC;
+}
+
+static inline uint64_t get_pts(V4L2Buffer *avbuf)
+{
+ V4L2m2mContext *s = container_of(avbuf->context, V4L2m2mContext, capture);
+ AVRational v4l2_timebase = { 1, USEC_PER_SEC };
+ int64_t v4l2_pts;
+
+ /* convert pts back to encoder timebase */
+ v4l2_pts = avbuf->buf.timestamp.tv_sec * USEC_PER_SEC + avbuf->buf.timestamp.tv_usec;
+
+ return av_rescale_q(v4l2_pts, v4l2_timebase, s->avctx->time_base);
+}
+
+static void free_v4l2buf_cb(void *opaque, uint8_t *unused)
+{
+ V4L2Buffer* avbuf = opaque;
+ V4L2m2mContext *s = container_of(avbuf->context, V4L2m2mContext, capture);
+
+ atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel);
+
+ if (s->reinit) {
+ if (!atomic_load(&s->refcount))
+ sem_post(&s->refsync);
+ return;
+ }
+
+ if (avbuf->context->streamon) {
+ avbuf->context->ops.enqueue(avbuf);
+ return;
+ }
+
+ if (!atomic_load(&s->refcount))
+ v4l2_m2m_codec_end(s);
+}
+
+/***
+ Buffer Operations
+ */
+static int buffer_ops_bufref_to_v4l2buf(V4L2Buffer *out, int plane, const uint8_t* data, int size, AVBufferRef* bref)
+{
+ if (plane >= out->num_planes)
+ return AVERROR(EINVAL);
+
+ memcpy(out->plane_info[plane].mm_addr, data, FFMIN(size, out->plane_info[plane].lengths));
+
+ out->planes[plane].bytesused = FFMIN(size, out->plane_info[plane].lengths);
+ out->planes[plane].length = out->plane_info[plane].lengths;
+
+ return 0;
+}
+
+static inline int buffer_ops_v4l2buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
+{
+ V4L2m2mContext *s = container_of(in->context, V4L2m2mContext, capture);
+
+ if (plane >= in->num_planes)
+ return AVERROR(EINVAL);
+
+ /* even though most encoders return 0 in data_offset encoding vp8 does require this value */
+ *buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + in->planes[plane].data_offset,
+ in->plane_info[plane].lengths, free_v4l2buf_cb, in, 0);
+ if (!*buf)
+ return AVERROR(ENOMEM);
+
+ in->status = V4L2BUF_RET_USER;
+ atomic_fetch_add_explicit(&s->refcount, 1, memory_order_relaxed);
+
+ return 0;
+}
+
+static int buffer_ops_v4l2buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
+{
+ int i, ret;
+
+ av_frame_unref(frame);
+
+ /* 1. get references to the actual data */
+ for (i = 0; i < avbuf->num_planes; i++) {
+ ret = avbuf->ops.buf_to_bufref(avbuf, i, &frame->buf[i]);
+ if (ret)
+ return ret;
+
+ frame->linesize[i] = avbuf->bytesperline[i];
+ frame->data[i] = frame->buf[i]->data;
+ }
+
+ /* 1.1 fixup special cases */
+ switch (avbuf->context->av_pix_fmt) {
+ case AV_PIX_FMT_NV12:
+ if (avbuf->num_planes > 1)
+ break;
+ frame->linesize[1] = avbuf->bytesperline[0];
+ frame->data[1] = frame->buf[0]->data + avbuf->bytesperline[0] * avbuf->context->format.fmt.pix_mp.height;
+ break;
+ default:
+ break;
+ }
+
+ /* 2. get frame information */
+ frame->key_frame = !!(avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME);
+ frame->format = avbuf->context->av_pix_fmt;
+ frame->color_primaries = get_colorprimaries(avbuf);
+ frame->colorspace = get_colorspace(avbuf);
+ frame->color_range = get_colorrange(avbuf);
+ frame->color_trc = get_colortrc(avbuf);
+ frame->pts = get_pts(avbuf);
+ /* these two values are updated also during re-init in process_video_event */
+ frame->height = avbuf->context->height;
+ frame->width = avbuf->context->width;
+
+ /* 3. report errors upstream */
+ if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
+ av_log(avbuf->context->log_ctx, AV_LOG_ERROR, "%s: driver decode error\n", avbuf->context->name);
+ frame->decode_error_flags |= FF_DECODE_ERROR_INVALID_BITSTREAM;
+ }
+
+ return 0;
+}
+
+static int buffer_ops_avpkt_to_v4l2buf(const AVPacket *pkt, V4L2Buffer *out)
+{
+ int ret;
+
+ ret = out->ops.bufref_to_buf(out, 0, pkt->data, pkt->size, pkt->buf);
+ if (ret)
+ return ret;
+
+ set_pts(out, pkt->pts);
+
+ if (pkt->flags & AV_PKT_FLAG_KEY)
+ out->flags = V4L2_BUF_FLAG_KEYFRAME;
+
+ return 0;
+}
+
+static int buffer_ops_avframe_to_v4l2buf(const AVFrame *frame, V4L2Buffer* out)
+{
+ int i, ret;
+
+ for(i = 0; i < out->num_planes; i++) {
+ ret = out->ops.bufref_to_buf(out, i, frame->buf[i]->data, frame->buf[i]->size, frame->buf[i]);
+ if (ret)
+ return ret;
+ }
+
+ set_pts(out, frame->pts);
+
+ return 0;
+}
+
+static int buffer_ops_v4l2buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
+{
+ int ret;
+
+ av_packet_unref(pkt);
+ ret = avbuf->ops.buf_to_bufref(avbuf, 0, &pkt->buf);
+ if (ret)
+ return ret;
+
+ pkt->size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->context->type) ? avbuf->buf.m.planes[0].bytesused : avbuf->buf.bytesused;
+ pkt->data = pkt->buf->data;
+
+ if (avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME)
+ pkt->flags |= AV_PKT_FLAG_KEY;
+
+ if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
+ av_log(avbuf->context->log_ctx, AV_LOG_ERROR, "%s driver encode error\n", avbuf->context->name);
+ pkt->flags |= AV_PKT_FLAG_CORRUPT;
+ }
+
+ pkt->dts = pkt->pts = get_pts(avbuf);
+
+ return 0;
+}
+
+/***
+ Context Operations
+ */
+static int context_ops_stop_decode(V4L2Context *ctx)
+{
+ struct v4l2_decoder_cmd cmd = {
+ .cmd = V4L2_DEC_CMD_STOP,
+ };
+ int ret;
+
+ ret = ioctl(ctx->fd, VIDIOC_DECODER_CMD, &cmd);
+ if (ret) {
+ /* DECODER_CMD is optional */
+ if (errno == ENOTTY)
+ return v4l2_context_set_status(ctx, VIDIOC_STREAMOFF);
+ }
+
+ return 0;
+}
+
+static int context_ops_stop_encode(V4L2Context *ctx)
+{
+ struct v4l2_encoder_cmd cmd = {
+ .cmd = V4L2_ENC_CMD_STOP,
+ };
+ int ret;
+
+ ret = ioctl(ctx->fd, VIDIOC_ENCODER_CMD, &cmd);
+ if (ret) {
+ /* ENCODER_CMD is optional */
+ if (errno == ENOTTY)
+ return v4l2_context_set_status(ctx, VIDIOC_STREAMOFF);
+ }
+
+ return 0;
+}
+
+static int context_ops_enqueue_v4l2buf(V4L2Buffer* avbuf)
+{
+ int ret;
+
+ avbuf->buf.flags = avbuf->flags;
+
+ ret = ioctl(avbuf->context->fd, VIDIOC_QBUF, &avbuf->buf);
+ if (ret < 0)
+ return AVERROR(errno);
+
+ avbuf->status = V4L2BUF_IN_DRIVER;
+
+ return 0;
+}
+
+static int process_video_event(V4L2Context *ctx)
+{
+ V4L2m2mContext *s = container_of(ctx, V4L2m2mContext, capture);
+ struct v4l2_format cap_fmt = s->capture.format;
+ struct v4l2_format out_fmt = s->output.format;
+ struct v4l2_event evt;
+ int ret;
+
+ ret = ioctl(ctx->fd, VIDIOC_DQEVENT, &evt);
+ if (ret < 0) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "%s VIDIOC_DQEVENT\n", ctx->name);
+ return 0;
+ }
+
+ ret = ioctl(s->fd, VIDIOC_G_FMT, &cap_fmt);
+ if (ret) {
+ av_log(s->capture.log_ctx, AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", s->capture.name);
+ return 0;
+ }
+
+ ret = ioctl(s->fd, VIDIOC_G_FMT, &out_fmt);
+ if (ret) {
+ av_log(s->output.log_ctx, AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", s->output.name);
+ return 0;
+ }
+
+ if (evt.type != V4L2_EVENT_SOURCE_CHANGE)
+ return 0;
+
+ if (get_height(&s->output, &s->output.format) != get_height(&s->output, &out_fmt) ||
+ get_width(&s->output, &s->output.format) != get_width(&s->output, &out_fmt)) {
+
+ av_log(s->output.log_ctx, AV_LOG_DEBUG, "%s changed (%dx%d) -> (%dx%d)\n",
+ s->output.name,
+ get_width(&s->output, &s->output.format), get_height(&s->output, &s->output.format),
+ get_width(&s->output, &out_fmt), get_height(&s->output, &out_fmt));
+
+ /* 0. update the output context */
+ s->output.height = get_height(ctx, &out_fmt);
+ s->output.width = get_width(ctx, &out_fmt);
+
+ /* 1. store the new dimensions in the capture context so the resulting frame
+ can be cropped */
+ s->capture.height = get_height(ctx, &out_fmt);
+ s->capture.width = get_width(ctx, &out_fmt);
+ }
+
+ if (get_height(&s->capture, &s->capture.format) != get_height(&s->capture, &cap_fmt) ||
+ get_width(&s->capture, &s->capture.format) != get_width(&s->capture, &cap_fmt)) {
+
+ av_log(s->capture.log_ctx, AV_LOG_DEBUG, "%s changed (%dx%d) -> (%dx%d)\n",
+ s->capture.name,
+ get_width(&s->capture, &s->capture.format), get_height(&s->capture, &s->capture.format),
+ get_width(&s->capture, &cap_fmt), get_height(&s->capture, &cap_fmt));
+
+ /* streamoff capture and unmap and remap new buffers */
+ ret = v4l2_m2m_codec_reinit(s);
+ if (ret)
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "avpriv_v4l2m2m_reinit\n");
+
+ /* let the caller function know that reinit was executed */
+ return 1;
+ }
+
+ return 0;
+}
+
+static V4L2Buffer* context_ops_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
+{
+ struct v4l2_plane planes[VIDEO_MAX_PLANES];
+ struct v4l2_buffer buf = { 0 };
+ V4L2Buffer* avbuf = NULL;
+ struct pollfd pfd = {
+ .events = POLLIN | POLLRDNORM | POLLPRI, /* default capture context */
+ .fd = ctx->fd,
+ };
+ int ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(ctx->type))
+ pfd.events = POLLOUT | POLLWRNORM;
+
+ for (;;) {
+ /* timeout:
+ * - capture: not enough buffers are queued to generate output
+ * - output: no more free buffers to reuse, keep going
+ */
+ ret = poll(&pfd, 1, timeout);
+ if (ret > 0)
+ break;
+ if (errno == EINTR)
+ continue;
+ return NULL;
+ }
+
+ /* 0. handle errors */
+ if (pfd.revents & POLLERR) {
+ av_log(ctx->log_ctx, AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
+ return NULL;
+ }
+
+ /* 1. dequeue the buffer */
+ if (pfd.revents & (POLLIN | POLLRDNORM) || pfd.revents & (POLLOUT | POLLWRNORM)) {
+ memset(&buf, 0, sizeof(buf));
+ buf.memory = V4L2_MEMORY_MMAP;
+ buf.type = ctx->type;
+ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
+ memset(planes, 0, sizeof(planes));
+ buf.length = VIDEO_MAX_PLANES;
+ buf.m.planes = planes;
+ }
+
+ ret = ioctl(ctx->fd, VIDIOC_DQBUF, &buf);
+ if (ret) {
+ if (errno != EAGAIN) {
+ ctx->done = errno;
+ if (errno != EPIPE)
+ av_log(ctx->log_ctx, AV_LOG_DEBUG, "%s VIDIOC_DQBUF, errno (%s)\n",
+ ctx->name, av_err2str(AVERROR(errno)));
+ }
+ } else {
+ avbuf = &ctx->buffers[buf.index];
+ avbuf->status = V4L2BUF_AVAILABLE;
+ avbuf->buf = buf;
+ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
+ memcpy(avbuf->planes, planes, sizeof(planes));
+ avbuf->buf.m.planes = avbuf->planes;
+ }
+ }
+ }
+
+ /* 2. handle resolution changes */
+ if (pfd.revents & POLLPRI) {
+ ret = process_video_event(ctx);
+ if (ret) {
+ /* drop the buffer (if there was one) since we had to reconfigure capture (unmap all buffers) */
+ return NULL;
+ }
+ }
+
+ return avbuf;
+}
+
+static V4L2Buffer* context_ops_getfree_v4l2buf(V4L2Context *ctx)
+{
+ int timeout = 0; /* return when no more buffers to dequeue */
+ int i;
+
+ /* get back as many output buffers as possible */
+ if (V4L2_TYPE_IS_OUTPUT(ctx->type)) {
+ do {
+ } while (ctx->ops.dequeue(ctx, timeout));
+ }
+
+ for (i = 0; i < ctx->num_buffers; i++) {
+ if (ctx->buffers[i].status == V4L2BUF_AVAILABLE)
+ return &ctx->buffers[i];
+ }
+
+ return NULL;
+}
+
+static int context_ops_release_v4l2_buffers(V4L2Context* ctx)
+{
+ struct v4l2_requestbuffers req = {
+ .memory = V4L2_MEMORY_MMAP,
+ .type = ctx->type,
+ .count = 0, /* 0 unmaps buffers from the driver */
+ };
+ int i, j;
+
+ for (i = 0; i < ctx->num_buffers; i++) {
+ V4L2Buffer *buffer = &ctx->buffers[i];
+
+ for (j = 0; j < buffer->num_planes; j++) {
+ struct V4L2Plane_info *p = &buffer->plane_info[j];
+ if (p->mm_addr && p->lengths)
+ munmap(p->mm_addr, p->lengths);
+ }
+ }
+
+ return ioctl(ctx->fd, VIDIOC_REQBUFS, &req);
+}
+
+static int context_ops_initialize_v4l2buf(V4L2Context *ctx, V4L2Buffer* avbuf, int index)
+{
+ int ret, i;
+
+ /* keep a reference to the context */
+ avbuf->context = ctx;
+
+ /* initalize the buffer operations */
+ avbuf->ops.buf_to_frm = buffer_ops_v4l2buf_to_avframe;
+ avbuf->ops.frm_to_buf = buffer_ops_avframe_to_v4l2buf;
+ avbuf->ops.buf_to_pkt = buffer_ops_v4l2buf_to_avpkt;
+ avbuf->ops.pkt_to_buf = buffer_ops_avpkt_to_v4l2buf;
+
+ avbuf->ops.bufref_to_buf = buffer_ops_bufref_to_v4l2buf;
+ avbuf->ops.buf_to_bufref = buffer_ops_v4l2buf_to_bufref;
+
+ avbuf->buf.memory = V4L2_MEMORY_MMAP;
+ avbuf->buf.type = ctx->type;
+ avbuf->buf.index = index;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
+ avbuf->buf.length = VIDEO_MAX_PLANES;
+ avbuf->buf.m.planes = avbuf->planes;
+ }
+
+ ret = ioctl(ctx->fd, VIDIOC_QUERYBUF, &avbuf->buf);
+ if (ret < 0)
+ return AVERROR(errno);
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
+ avbuf->num_planes = 0;
+ for (;;) {
+ /* in MP, the V4L2 API states that buf.length means num_planes */
+ if (avbuf->num_planes >= avbuf->buf.length)
+ break;
+ if (avbuf->buf.m.planes[avbuf->num_planes].length)
+ avbuf->num_planes++;
+ }
+ } else
+ avbuf->num_planes = 1;
+
+ for (i = 0; i < avbuf->num_planes; i++) {
+
+ avbuf->bytesperline[i] = V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
+ ctx->format.fmt.pix_mp.plane_fmt[i].bytesperline :
+ ctx->format.fmt.pix.bytesperline;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
+ avbuf->plane_info[i].lengths = avbuf->buf.m.planes[i].length;
+ avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.m.planes[i].length,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ ctx->fd, avbuf->buf.m.planes[i].m.mem_offset);
+ } else {
+ avbuf->plane_info[i].lengths = avbuf->buf.length;
+ avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ ctx->fd, avbuf->buf.m.offset);
+ }
+
+ if (avbuf->plane_info[i].mm_addr == MAP_FAILED)
+ return AVERROR(ENOMEM);
+ }
+
+ avbuf->status = V4L2BUF_AVAILABLE;
+
+ if (V4L2_TYPE_IS_OUTPUT(ctx->type))
+ return 0;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
+ avbuf->buf.m.planes = avbuf->planes;
+ avbuf->buf.length = avbuf->num_planes;
+
+ } else {
+ avbuf->buf.bytesused = avbuf->planes[index].bytesused;
+ avbuf->buf.length = avbuf->planes[index].length;
+ }
+
+ return ctx->ops.enqueue(avbuf);
+}
+
+int v4l2_enqueue_frame(V4L2Context* ctx, const AVFrame* frame)
+{
+ V4L2m2mContext *s = container_of(ctx, V4L2m2mContext, output);
+ V4L2Buffer* avbuf;
+ int ret;
+
+ if (!frame) {
+ ret = ctx->ops.stop_encode(ctx);
+ if (ret)
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "%s stop_encode\n", ctx->name);
+ s->draining= 1;
+ return 0;
+ }
+
+ avbuf = ctx->ops.get_buffer(ctx);
+ if (!avbuf)
+ return AVERROR(ENOMEM);
+
+ ret = avbuf->ops.frm_to_buf(frame, avbuf);
+ if (ret)
+ return ret;
+
+ ret = ctx->ops.enqueue(avbuf);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int v4l2_enqueue_packet(V4L2Context* ctx, const AVPacket* pkt)
+{
+ V4L2m2mContext *s = container_of(ctx, V4L2m2mContext, output);
+ V4L2Buffer* avbuf;
+ int ret;
+
+ if (!pkt->size) {
+ ret = ctx->ops.stop_decode(ctx);
+ if (ret)
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "%s stop_decode\n", ctx->name);
+ s->draining = 1;
+ return 0;
+ }
+
+ avbuf = ctx->ops.get_buffer(ctx);
+ if (!avbuf)
+ return AVERROR(ENOMEM);
+
+ ret = avbuf->ops.pkt_to_buf(pkt, avbuf);
+ if (ret)
+ return ret;
+
+ ret = ctx->ops.enqueue(avbuf);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int v4l2_dequeue_frame(V4L2Context* ctx, AVFrame* frame, int timeout)
+{
+ V4L2Buffer* avbuf = NULL;
+
+ avbuf = ctx->ops.dequeue(ctx, timeout);
+ if (!avbuf) {
+ if (ctx->done)
+ return AVERROR_EOF;
+
+ return AVERROR(EAGAIN);
+ }
+
+ return avbuf->ops.buf_to_frm(frame, avbuf);
+}
+
+int v4l2_dequeue_packet(V4L2Context* ctx, AVPacket* pkt, int timeout)
+{
+ V4L2Buffer* avbuf = NULL;
+
+ avbuf = ctx->ops.dequeue(ctx, timeout);
+ if (!avbuf) {
+ if (ctx->done)
+ return AVERROR_EOF;
+
+ return AVERROR(EAGAIN);
+ }
+
+ return avbuf->ops.buf_to_pkt(pkt, avbuf);
+}
+
+int v4l2_context_set_status(V4L2Context* ctx, int cmd)
+{
+ int type = ctx->type;
+ int ret;
+
+ ret = ioctl(ctx->fd, cmd, &type);
+ if (ret < 0)
+ return AVERROR(errno);
+
+ ctx->streamon = (cmd == VIDIOC_STREAMON);
+
+ return 0;
+}
+
+void v4l2_context_release(V4L2Context* ctx)
+{
+ int ret;
+
+ if (!ctx->buffers)
+ return;
+
+ ret = ctx->ops.release_buffers(ctx);
+ if (ret)
+ av_log(ctx->log_ctx, AV_LOG_WARNING, "V4L2 failed to unmap the %s buffers\n", ctx->name);
+ else
+ av_log(ctx->log_ctx, AV_LOG_DEBUG, "%s all buffers unmapped\n", ctx->name);
+
+ av_free(ctx->buffers);
+ ctx->buffers = NULL;
+}
+
+int v4l2_context_init(V4L2Context* ctx, int lazy_init)
+{
+ struct v4l2_requestbuffers req;
+ struct v4l2_control ctrl;
+ int min_buffers = 6;
+ int ret, i;
+
+ if (!buffer_type_supported(ctx)) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "type %i not supported\n", ctx->type);
+ return AVERROR_PATCHWELCOME;
+ }
+
+ ctx->ops.release_buffers = context_ops_release_v4l2_buffers;
+ ctx->ops.init_buffer = context_ops_initialize_v4l2buf;
+ ctx->ops.get_buffer = context_ops_getfree_v4l2buf;
+ ctx->ops.dequeue = context_ops_dequeue_v4l2buf;
+ ctx->ops.enqueue = context_ops_enqueue_v4l2buf;
+ ctx->ops.stop_decode = context_ops_stop_decode;
+ ctx->ops.stop_encode = context_ops_stop_encode;
+
+ if (lazy_init)
+ return 0;
+
+ /* get the minimum number of buffers required by the hardware */
+ memset(&ctrl, 0, sizeof(ctrl));
+ ctrl.id = V4L2_TYPE_IS_OUTPUT(ctx->type) ? V4L2_CID_MIN_BUFFERS_FOR_OUTPUT : V4L2_CID_MIN_BUFFERS_FOR_CAPTURE;
+ ret = ioctl(ctx->fd, VIDIOC_G_CTRL, &ctrl);
+ if (!ret)
+ min_buffers = ctrl.value;
+ else {
+ av_log(ctx->log_ctx, AV_LOG_WARNING, "%s min_buffers to default %d\n", ctx->name, min_buffers);
+ }
+
+ memset(&req, 0, sizeof(req));
+ req.count = ctx->num_buffers + min_buffers;
+ req.memory = V4L2_MEMORY_MMAP;
+ req.type = ctx->type;
+ ret = ioctl(ctx->fd, VIDIOC_REQBUFS, &req);
+ if (ret < 0)
+ return AVERROR(errno);
+
+ ctx->num_buffers = req.count;
+ ctx->buffers = av_mallocz(ctx->num_buffers * sizeof(V4L2Buffer));
+ if (!ctx->buffers) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "%s buffer initialization ENOMEM\n", ctx->name);
+ return AVERROR(ENOMEM);
+ }
+
+ av_log(ctx->log_ctx, AV_LOG_DEBUG, "%s queuing %d buffers\n", ctx->name, req.count);
+ for (i = 0; i < req.count; i++) {
+ ret = ctx->ops.init_buffer(ctx, &ctx->buffers[i], i);
+ if (ret < 0) {
+ av_log(ctx->log_ctx, AV_LOG_ERROR, "%s buffer initialization (%s)\n", ctx->name, av_err2str(ret));
+ av_free(ctx->buffers);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+
diff --git a/libavcodec/v4l2_buffers.h b/libavcodec/v4l2_buffers.h
new file mode 100644
index 0000000..22ee078
--- /dev/null
+++ b/libavcodec/v4l2_buffers.h
@@ -0,0 +1,236 @@
+/*
+ * V4L2 buffer{,context} helper functions.
+ *
+ * Copyright (C) 2017 Alexis Ballier <aballier at gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz at linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_V4L2_BUFFERS_H
+#define AVCODEC_V4L2_BUFFERS_H
+
+#include <stdatomic.h>
+#include "libavcodec/avcodec.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/frame.h"
+#include "libavutil/buffer.h"
+
+struct V4L2Buffer;
+typedef struct V4L2Buffer V4L2Buffer;
+
+struct V4L2Context;
+typedef struct V4L2Context V4L2Context;
+
+/**
+ * V4L2Context_operations:
+ */
+typedef V4L2Buffer* (*v4l2_dequeue_buffer_f)(V4L2Context *ctx, int timeout);
+typedef V4L2Buffer* (*v4l2_getfree_buffer_f)(V4L2Context *ctx);
+
+typedef int (*v4l2_initialize_buffer_f)(V4L2Context *ctx, V4L2Buffer* avbuf, int index);
+typedef int (*v4l2_enqueue_buffer_f)(V4L2Buffer *buf);
+typedef int (*v4l2_release_buffers_f)(V4L2Context *ctx);
+typedef int (*v4l2_stop_decode_f)(V4L2Context *ctx);
+typedef int (*v4l2_stop_encode_f)(V4L2Context *ctx);
+
+typedef struct V4L2Context_ops {
+ v4l2_stop_decode_f stop_decode;
+ v4l2_stop_encode_f stop_encode;
+
+ /* operations on all buffers at once */
+ v4l2_release_buffers_f release_buffers;
+
+ /* operations on single buffers */
+ v4l2_initialize_buffer_f init_buffer;
+ v4l2_getfree_buffer_f get_buffer;
+ v4l2_dequeue_buffer_f dequeue;
+ v4l2_enqueue_buffer_f enqueue;
+
+} V4L2Context_ops;
+
+typedef struct V4L2Context {
+ /**
+ * Buffer context operations
+ * queue a V4L2Buffer into the context
+ * dequeue a V4L2Buffer into the context
+ * get a free V4L2Buffer from the context
+ * release all V4L2Buffers allocated to the context
+ */
+ V4L2Context_ops ops;
+
+ /**
+ * Log context (for av_log()). Can be NULL.
+ */
+ void *log_ctx;
+
+ /**
+ * Lazy Initialization: set to one if the context can not initialize its
+ * buffers until it first queries the driver for formats and sizes.
+ */
+ int lazy_init;
+
+ /**
+ * context name: must be set before calling avpriv_v4l2_context_init().
+ */
+ const char* name;
+
+ /**
+ * File descriptor obtained from opening the associated device.
+ * Must be set before calling avpriv_v4l2_context_init().
+ * Readonly after init.
+ */
+ int fd;
+
+ /**
+ * Type of this buffer context.
+ * See V4L2_BUF_TYPE_VIDEO_* in videodev2.h
+ * Must be set before calling avpriv_v4l2_context_init().
+ * Readonly after init.
+ */
+ enum v4l2_buf_type type;
+
+ /**
+ * AVPixelFormat corresponding to this buffer context.
+ * AV_PIX_FMT_NONE means this is an encoded stream.
+ */
+ enum AVPixelFormat av_pix_fmt;
+
+ /**
+ * AVCodecID corresponding to this buffer context.
+ * AV_CODEC_ID_RAWVIDEO means this is a raw stream and av_pix_fmt must be set to a valid value.
+ */
+ enum AVCodecID av_codec_id;
+
+ /**
+ * Format returned by the driver after initializing the buffer context.
+ * Must be set before calling avpriv_v4l2_context_init().
+ * avpriv_v4l2m2m_format() can set it.
+ * Readonly after init.
+ */
+ struct v4l2_format format;
+
+ /**
+ * Width and height of the frames it produces (in case of a capture context, e.g. when decoding)
+ * or accepts (in case of an output context, e.g. when encoding).
+ *
+ * For output context, this must must be set before calling avpriv_v4l2_context_init().
+ * For capture context during decoding, it will be set after having received the
+ * information from the driver. at which point we can initialize the buffers.
+ */
+ int width, height;
+
+ /**
+ * Whether the stream has been started (VIDIOC_STREAMON has been sent).
+ */
+ int streamon;
+
+ /**
+ *
+ * Before calling avpriv_v4l2_context_init() this is the number of buffers we would like to have available.
+ * avpriv_v4l2_context_init() asks for (min_buffers + num_buffers) and sets this value to the actual number
+ * of buffers the driver gave us.
+ *
+ * Readonly after init.
+ */
+ int num_buffers;
+
+ /**
+ * Indexed array of V4L2Buffers
+ */
+ V4L2Buffer *buffers;
+
+ /**
+ * Either no more buffers available or an unrecoverable error was notified
+ * by the V4L2 kernel driver: either way, we can't continue using this context..
+ */
+ int done;
+
+} V4L2Context;
+
+/**
+ * Initializes a V4L2Context.
+ *
+ * @param[in] ctx A pointer to a V4L2Context. See V4L2Context description for required variables.
+ * @return 0 in case of success, a negative value representing the error otherwise.
+ */
+int v4l2_context_init(V4L2Context* ctx, int lazy_init);
+
+/**
+ * Releases a V4L2Context.
+ *
+ * @param[in] ctx A pointer to a V4L2Context.
+ * The caller is reponsible for freeing it.
+ * It must not be used after calling this function.
+ */
+void v4l2_context_release(V4L2Context* ctx);
+
+/**
+ * Sets the status of a V4L2Context.
+ *
+ * @param[in] ctx A pointer to a V4L2Context.
+ * @param[in] cmd The status to set (VIDIOC_STREAMON or VIDIOC_STREAMOFF).
+ * Warning: If VIDIOC_STREAMOFF is sent to a buffer context that still has some frames buffered,
+ * those frames will be dropped.
+ * @return 0 in case of success, a negative value representing the error otherwise.
+ */
+int v4l2_context_set_status(V4L2Context* ctx, int cmd);
+
+/**
+ * Dequeues a buffer from a V4L2Context to an AVPacket.
+ *
+ * The pkt must be non NULL.
+ * @param[in] ctx The V4L2Context to dequeue from.
+ * @param[inout] pkt The AVPacket to dequeue to.
+ * @param[ino] timeout The number of milliseconds to wait for the dequeue.
+ * @return 0 in case of success, AVERROR(EAGAIN) if no buffer was ready, another negative error in case of error.
+ */
+int v4l2_dequeue_packet(V4L2Context* ctx, AVPacket* pkt, int timeout);
+
+/**
+ * Dequeues a buffer from a V4L2Context to an AVFrame.
+ *
+ * The frame must be non NULL.
+ * @param[in] ctx The V4L2Context to dequeue from.
+ * @param[inout] f The AVFrame to dequeue to.
+ * @param[ino] timeout The number of milliseconds to wait for the dequeue.
+ * @return 0 in case of success, AVERROR(EAGAIN) if no buffer was ready, another negative error in case of error.
+ */
+int v4l2_dequeue_frame(V4L2Context* ctx, AVFrame* f, int timeout);
+
+/**
+ * Enqueues a buffer to a V4L2Context from an AVPacket
+ * The packet must be non NULL.
+ * When the size of the pkt is null, the buffer is not queued but a V4L2_DEC_CMD_STOP command is sent instead to the driver.
+ *
+ * @param[in] ctx The V4L2Context to enqueue to.
+ * @param[in] pkt A pointer to an AVPacket.
+ * @return 0 in case of success, a negative error otherwise.
+ */
+int v4l2_enqueue_packet(V4L2Context* ctx, const AVPacket* pkt);
+
+/**
+ * Enqueues a buffer to a V4L2Context from an AVFrame
+ * The frame must be non NULL.
+ *
+ * @param[in] ctx The V4L2Context to enqueue to.
+ * @param[in] f A pointer to an AVFrame to enqueue.
+ * @return 0 in case of success, a negative error otherwise.
+ */
+int v4l2_enqueue_frame(V4L2Context* ctx, const AVFrame* f);
+
+#endif // AVCODEC_V4L2_BUFFERS_H
diff --git a/libavcodec/v4l2_fmt.c b/libavcodec/v4l2_fmt.c
new file mode 100644
index 0000000..a32540e
--- /dev/null
+++ b/libavcodec/v4l2_fmt.c
@@ -0,0 +1,147 @@
+/*
+ * V4L2 format helper functions
+ *
+ * Copyright (C) 2017 Alexis Ballier <aballier at gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz at linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/videodev2.h>
+#include "v4l2_fmt.h"
+
+#define V4L2_FMT(x) V4L2_PIX_FMT_##x
+#define AV_CODEC(x) AV_CODEC_ID_##x
+#define AV_FMT(x) AV_PIX_FMT_##x
+
+static const struct fmt_conversion_map {
+ enum AVPixelFormat avfmt;
+ enum AVCodecID avcodec;
+ uint32_t v4l2_fmt;
+} fmt_map[] = {
+ { AV_FMT(RGB555LE), AV_CODEC(RAWVIDEO), V4L2_FMT(RGB555) },
+ { AV_FMT(RGB555BE), AV_CODEC(RAWVIDEO), V4L2_FMT(RGB555X) },
+ { AV_FMT(RGB565LE), AV_CODEC(RAWVIDEO), V4L2_FMT(RGB565) },
+ { AV_FMT(RGB565BE), AV_CODEC(RAWVIDEO), V4L2_FMT(RGB565X) },
+ { AV_FMT(BGR24), AV_CODEC(RAWVIDEO), V4L2_FMT(BGR24) },
+ { AV_FMT(RGB24), AV_CODEC(RAWVIDEO), V4L2_FMT(RGB24) },
+ { AV_FMT(BGR0), AV_CODEC(RAWVIDEO), V4L2_FMT(BGR32) },
+ { AV_FMT(0RGB), AV_CODEC(RAWVIDEO), V4L2_FMT(RGB32) },
+ { AV_FMT(GRAY8), AV_CODEC(RAWVIDEO), V4L2_FMT(GREY) },
+ { AV_FMT(YUV420P), AV_CODEC(RAWVIDEO), V4L2_FMT(YUV420) },
+ { AV_FMT(YUYV422), AV_CODEC(RAWVIDEO), V4L2_FMT(YUYV) },
+ { AV_FMT(UYVY422), AV_CODEC(RAWVIDEO), V4L2_FMT(UYVY) },
+ { AV_FMT(YUV422P), AV_CODEC(RAWVIDEO), V4L2_FMT(YUV422P) },
+ { AV_FMT(YUV411P), AV_CODEC(RAWVIDEO), V4L2_FMT(YUV411P) },
+ { AV_FMT(YUV410P), AV_CODEC(RAWVIDEO), V4L2_FMT(YUV410) },
+ { AV_FMT(YUV410P), AV_CODEC(RAWVIDEO), V4L2_FMT(YVU410) },
+ { AV_FMT(NV12), AV_CODEC(RAWVIDEO), V4L2_FMT(NV12) },
+ { AV_FMT(NONE), AV_CODEC(MJPEG), V4L2_FMT(MJPEG) },
+ { AV_FMT(NONE), AV_CODEC(MJPEG), V4L2_FMT(JPEG) },
+#ifdef V4L2_PIX_FMT_SRGGB8
+ { AV_FMT(BAYER_BGGR8), AV_CODEC(RAWVIDEO), V4L2_FMT(SBGGR8) },
+ { AV_FMT(BAYER_GBRG8), AV_CODEC(RAWVIDEO), V4L2_FMT(SGBRG8) },
+ { AV_FMT(BAYER_GRBG8), AV_CODEC(RAWVIDEO), V4L2_FMT(SGRBG8) },
+ { AV_FMT(BAYER_RGGB8), AV_CODEC(RAWVIDEO), V4L2_FMT(SRGGB8) },
+#endif
+#ifdef V4L2_PIX_FMT_Y16
+ { AV_FMT(GRAY16LE), AV_CODEC(RAWVIDEO), V4L2_FMT(Y16) },
+#endif
+#ifdef V4L2_PIX_FMT_NV12M
+ { AV_FMT(NV12), AV_CODEC(RAWVIDEO), V4L2_FMT(NV12M) },
+#endif
+#ifdef V4L2_PIX_FMT_NV21M
+ { AV_FMT(NV21), AV_CODEC(RAWVIDEO), V4L2_FMT(NV21M) },
+#endif
+#ifdef V4L2_PIX_FMT_YUV420M
+ { AV_FMT(YUV420P), AV_CODEC(RAWVIDEO), V4L2_FMT(YUV420M) },
+#endif
+#ifdef V4L2_PIX_FMT_NV16M
+ { AV_FMT(NV16), AV_CODEC(RAWVIDEO), V4L2_FMT(NV16M) },
+#endif
+#ifdef V4L2_PIX_FMT_H263
+ { AV_FMT(NONE), AV_CODEC(H263), V4L2_FMT(H263) },
+#endif
+#ifdef V4L2_PIX_FMT_H264
+ { AV_FMT(NONE), AV_CODEC(H264), V4L2_FMT(H264) },
+#endif
+#ifdef V4L2_PIX_FMT_MPEG4
+ { AV_FMT(NONE), AV_CODEC(MPEG4), V4L2_FMT(MPEG4) },
+#endif
+#ifdef V4L2_PIX_FMT_CPIA1
+ { AV_FMT(NONE), AV_CODEC(CPIA), V4L2_FMT(CPIA1) },
+#endif
+#ifdef V4L2_PIX_FMT_DV
+ { AV_FMT(NONE), AV_CODEC(DVVIDEO), V4L2_FMT(DV) },
+#endif
+#ifdef V4L2_PIX_FMT_MPEG1
+ { AV_FMT(NONE), AV_CODEC(MPEG1VIDEO), V4L2_FMT(MPEG1) },
+#endif
+#ifdef V4L2_PIX_FMT_MPEG2
+ { AV_FMT(NONE), AV_CODEC(MPEG2VIDEO), V4L2_FMT(MPEG2) },
+#endif
+#ifdef V4L2_PIX_FMT_VP8
+ { AV_FMT(NONE), AV_CODEC(VP8), V4L2_FMT(VP8) },
+#endif
+#ifdef V4L2_PIX_FMT_VP9
+ { AV_FMT(NONE), AV_CODEC(VP9), V4L2_FMT(VP9) },
+#endif
+#ifdef V4L2_PIX_FMT_HEVC
+ { AV_FMT(NONE), AV_CODEC(HEVC), V4L2_FMT(HEVC) },
+#endif
+#ifdef V4L2_PIX_FMT_VC1_ANNEX_G
+ { AV_FMT(NONE), AV_CODEC(VC1), V4L2_FMT(VC1_ANNEX_G) },
+#endif
+};
+
+uint32_t v4l2_avcodec_to_v4l2fmt(enum AVCodecID avcodec)
+{
+ int i;
+
+ for (i = 0; i < sizeof(fmt_map) / sizeof(fmt_map[0]); i++) {
+ if (fmt_map[i].avcodec == avcodec)
+ return fmt_map[i].v4l2_fmt;
+ }
+
+ return 0;
+}
+
+uint32_t v4l2_avfmt_to_v4l2fmt(enum AVPixelFormat avfmt)
+{
+ int i;
+
+ for (i = 0; i < sizeof(fmt_map) / sizeof(fmt_map[0]); i++) {
+ if (fmt_map[i].avfmt == avfmt)
+ return fmt_map[i].v4l2_fmt;
+ }
+
+ return 0;
+}
+
+enum AVPixelFormat v4l2_v4l2fmt_to_avfmt(uint32_t v4l2_fmt, enum AVCodecID avcodec)
+{
+ int i;
+
+ for (i = 0; sizeof(fmt_map)/sizeof(fmt_map[0]); i++) {
+ if (fmt_map[i].v4l2_fmt == v4l2_fmt && fmt_map[i].avcodec == avcodec)
+ return fmt_map[i].avfmt;
+ }
+
+ return AV_PIX_FMT_NONE;
+}
+
+
diff --git a/libavcodec/v4l2_fmt.h b/libavcodec/v4l2_fmt.h
new file mode 100644
index 0000000..0f2702b1
--- /dev/null
+++ b/libavcodec/v4l2_fmt.h
@@ -0,0 +1,34 @@
+/*
+ * V4L2 format helper functions
+ *
+ * Copyright (C) 2017 Alexis Ballier <aballier at gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz at linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_V4L2_FMT_H
+#define AVCODEC_V4L2_FMT_H
+
+#include "libavcodec/avcodec.h"
+#include "libavutil/pixfmt.h"
+
+enum AVPixelFormat v4l2_v4l2fmt_to_avfmt(uint32_t v4l2_fmt, enum AVCodecID avcodec);
+uint32_t v4l2_avcodec_to_v4l2fmt(enum AVCodecID avcodec);
+uint32_t v4l2_avfmt_to_v4l2fmt(enum AVPixelFormat avfmt);
+
+#endif /* AVCODEC_V4L2_FMT_H*/
diff --git a/libavcodec/v4l2_m2m.c b/libavcodec/v4l2_m2m.c
new file mode 100644
index 0000000..179e958
--- /dev/null
+++ b/libavcodec/v4l2_m2m.c
@@ -0,0 +1,452 @@
+/*
+ * V4L mem2mem
+ *
+ * Copyright (C) 2017 Alexis Ballier <aballier at gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz at linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/videodev2.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <dirent.h>
+#include <fcntl.h>
+#include "libavcodec/avcodec.h"
+#include "libavcodec/internal.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/pixfmt.h"
+#include "v4l2_m2m_avcodec.h"
+#include "v4l2_buffers.h"
+#include "v4l2_fmt.h"
+#include "v4l2_m2m.h"
+
+static inline int try_raw_format(V4L2Context* ctx, enum AVPixelFormat pixfmt)
+{
+ struct v4l2_format *fmt = &ctx->format;
+ uint32_t v4l2_fmt;
+ int ret;
+
+ v4l2_fmt = v4l2_avfmt_to_v4l2fmt(pixfmt);
+ if (!v4l2_fmt)
+ return AVERROR(EINVAL);
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type))
+ fmt->fmt.pix_mp.pixelformat = v4l2_fmt;
+ else
+ fmt->fmt.pix.pixelformat = v4l2_fmt;
+
+ fmt->type = ctx->type;
+
+ ret = ioctl(ctx->fd, VIDIOC_TRY_FMT, fmt);
+ if (ret)
+ return AVERROR(EINVAL);
+
+ return 0;
+}
+
+static int query_raw_format(V4L2Context* ctx, int set)
+{
+ enum AVPixelFormat pixfmt = ctx->av_pix_fmt;
+ struct v4l2_fmtdesc fdesc;
+ int ret;
+
+ memset(&fdesc, 0, sizeof(fdesc));
+ fdesc.type = ctx->type;
+
+ if (pixfmt != AV_PIX_FMT_NONE) {
+ ret = try_raw_format(ctx, pixfmt);
+ if (ret)
+ pixfmt = AV_PIX_FMT_NONE;
+ else
+ return 0;
+ }
+
+ for (;;) {
+ ret = ioctl(ctx->fd, VIDIOC_ENUM_FMT, &fdesc);
+ if (ret)
+ return AVERROR(EINVAL);
+
+ pixfmt = v4l2_v4l2fmt_to_avfmt(fdesc.pixelformat, AV_CODEC_ID_RAWVIDEO);
+ ret = try_raw_format(ctx, pixfmt);
+ if (ret){
+ fdesc.index++;
+ continue;
+ }
+
+ if (set)
+ ctx->av_pix_fmt = pixfmt;
+
+ return 0;
+ }
+
+ return AVERROR(EINVAL);
+}
+
+static int query_coded_format(V4L2Context* ctx, uint32_t *p)
+{
+ struct v4l2_fmtdesc fdesc;
+ uint32_t v4l2_fmt;
+ int ret;
+
+ v4l2_fmt = v4l2_avcodec_to_v4l2fmt(ctx->av_codec_id);
+ if (!v4l2_fmt)
+ return AVERROR(EINVAL);
+
+ memset(&fdesc, 0, sizeof(fdesc));
+ fdesc.type = ctx->type;
+
+ for (;;) {
+ ret = ioctl(ctx->fd, VIDIOC_ENUM_FMT, &fdesc);
+ if (ret)
+ return AVERROR(EINVAL);
+
+ if (fdesc.pixelformat == v4l2_fmt) {
+ break;
+ }
+
+ fdesc.index++;
+ }
+
+ *p = v4l2_fmt;
+
+ return 0;
+}
+
+static inline int splane_video(struct v4l2_capability *cap)
+{
+ if (cap->capabilities & (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT) & V4L2_CAP_STREAMING)
+ return 1;
+
+ if (cap->capabilities & V4L2_CAP_VIDEO_M2M)
+ return 1;
+
+ return 0;
+}
+
+static inline int mplane_video(struct v4l2_capability *cap)
+{
+ if (cap->capabilities & (V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE) & V4L2_CAP_STREAMING)
+ return 1;
+
+ if (cap->capabilities & V4L2_CAP_VIDEO_M2M_MPLANE)
+ return 1;
+
+ return 0;
+}
+
+static int prepare_contexts(V4L2m2mContext* s, void *log_ctx)
+{
+ struct v4l2_capability cap;
+ int ret;
+
+ s->capture.log_ctx = s->output.log_ctx = log_ctx;
+ s->capture.done = s->output.done = 0;
+ s->capture.fd = s->output.fd = s->fd;
+ s->capture.name = "v4l2_cap";
+ s->output.name = "v4l2_out";
+ atomic_init(&s->refcount, 0);
+ sem_init(&s->refsync, 0, 0);
+
+ memset(&cap, 0, sizeof(cap));
+ ret = ioctl(s->fd, VIDIOC_QUERYCAP, &cap);
+ if (ret < 0)
+ return ret;
+
+ av_log(log_ctx, AV_LOG_INFO, "driver '%s' on card '%s'\n", cap.driver, cap.card);
+
+ if (mplane_video(&cap)) {
+ s->capture.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ s->output.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ return 0;
+ }
+
+ if (splane_video(&cap)) {
+ s->capture.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ s->output.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ return 0;
+ }
+
+ return AVERROR(EINVAL);
+}
+
+static int probe_v4l2_driver(V4L2m2mContext* s, void *log_ctx)
+{
+ int ret;
+
+ s->fd = open(s->devname, O_RDWR | O_NONBLOCK, 0);
+ if (s->fd < 0)
+ return AVERROR(errno);
+
+ ret = prepare_contexts(s, log_ctx);
+ if (ret < 0)
+ goto done;
+
+ ret = v4l2_m2m_codec_format_context(&s->output, 0);
+ if (ret) {
+ av_log(log_ctx, AV_LOG_DEBUG, "can't set input format\n");
+ goto done;
+ }
+
+ ret = v4l2_m2m_codec_format_context(&s->capture, 0);
+ if (ret) {
+ av_log(log_ctx, AV_LOG_DEBUG, "can't to set output format\n");
+ goto done;
+ }
+
+done:
+ close(s->fd);
+ s->fd = 0;
+
+ return ret;
+}
+
+static int configure_contexts(V4L2m2mContext* s, void *log_ctx)
+{
+ int ret;
+
+ s->fd = open(s->devname, O_RDWR | O_NONBLOCK, 0);
+ if (s->fd < 0)
+ return AVERROR(errno);
+
+ ret = prepare_contexts(s, log_ctx);
+ if (ret < 0)
+ goto error;
+
+ ret = v4l2_m2m_codec_format_context(&s->output, 1);
+ if (ret) {
+ av_log(log_ctx, AV_LOG_ERROR, "can't set input format\n");
+ goto error;
+ }
+
+ ret = v4l2_m2m_codec_format_context(&s->capture, 1);
+ if (ret) {
+ av_log(log_ctx, AV_LOG_ERROR, "can't to set output format\n");
+ goto error;
+ }
+
+ ret = v4l2_context_init(&s->output, s->output.lazy_init);
+ if (ret) {
+ av_log(log_ctx, AV_LOG_ERROR, "no output context's buffers\n");
+ goto error;
+ }
+
+ ret = v4l2_context_init(&s->capture, s->capture.lazy_init);
+ if (ret) {
+ av_log(log_ctx, AV_LOG_ERROR, "no capture context's buffers\n");
+ goto error;
+ }
+
+error:
+ if (ret) {
+ close(s->fd);
+ s->fd = 0;
+ }
+
+ return 0;
+}
+
+static void save_to_context(V4L2Context* ctx, uint32_t v4l2_fmt)
+{
+ ctx->format.type = ctx->type;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
+ /* this is to handle the reconfiguration of the capture stream at runtime */
+ ctx->format.fmt.pix_mp.height = ctx->height;
+ ctx->format.fmt.pix_mp.width = ctx->width;
+ if (v4l2_fmt)
+ ctx->format.fmt.pix_mp.pixelformat = v4l2_fmt;
+ } else {
+ ctx->format.fmt.pix.height = ctx->height;
+ ctx->format.fmt.pix.width = ctx->width;
+ if (v4l2_fmt)
+ ctx->format.fmt.pix_mp.pixelformat = v4l2_fmt;
+ }
+}
+
+int v4l2_m2m_codec_format_context(V4L2Context* ctx, int set)
+{
+ uint32_t v4l2_fmt;
+ int ret;
+
+ if (ctx->av_codec_id == AV_CODEC_ID_RAWVIDEO) {
+ ret = query_raw_format(ctx, set);
+ if (ret)
+ return ret;
+
+ save_to_context(ctx, 0);
+ if (set)
+ return ioctl(ctx->fd, VIDIOC_S_FMT, &ctx->format);
+
+ return ret;
+ }
+
+ ret = query_coded_format(ctx, &v4l2_fmt);
+ if (ret)
+ return ret;
+
+ save_to_context(ctx, v4l2_fmt);
+ if (set)
+ return ioctl(ctx->fd, VIDIOC_S_FMT, &ctx->format);
+
+ return ioctl(ctx->fd, VIDIOC_TRY_FMT, &ctx->format);
+}
+
+int v4l2_m2m_codec_end(V4L2m2mContext* s)
+{
+ int ret;
+
+ ret = v4l2_context_set_status(&s->output, VIDIOC_STREAMOFF);
+ if (ret)
+ av_log(s->output.log_ctx, AV_LOG_ERROR, "VIDIOC_STREAMOFF %s\n", s->output.name);
+
+ ret = v4l2_context_set_status(&s->capture, VIDIOC_STREAMOFF);
+ if (ret)
+ av_log(s->capture.log_ctx, AV_LOG_ERROR, "VIDIOC_STREAMOFF %s\n", s->capture.name);
+
+ v4l2_context_release(&s->output);
+
+ if (atomic_load(&s->refcount)) {
+ av_log(s->capture.log_ctx, AV_LOG_DEBUG, "avpriv_v4l2m2m_end leaving pending buffers \n");
+
+ return 0;
+ }
+
+ v4l2_context_release(&s->capture);
+ sem_destroy(&s->refsync);
+
+ /* release the hardware */
+ close(s->fd);
+
+ return 0;
+}
+
+int v4l2_m2m_codec_init(V4L2m2mContext* s, void* log_ctx)
+{
+ char *devname_save = s->devname;
+ int ret = AVERROR(EINVAL);
+ struct dirent *entry;
+ char node[PATH_MAX];
+ DIR *dirp;
+
+ if (s->devname && *s->devname)
+ return configure_contexts(s, log_ctx);
+
+ dirp = opendir("/dev");
+ if (!dirp)
+ return AVERROR(errno);
+
+ for (entry = readdir(dirp); entry; entry = readdir(dirp)) {
+
+ if (strncmp(entry->d_name, "video", 5))
+ continue;
+
+ snprintf(node, sizeof(node), "/dev/%s", entry->d_name);
+
+ av_log(log_ctx, AV_LOG_DEBUG, "probing device %s\n", node);
+
+ s->devname = node;
+ ret = probe_v4l2_driver(s, log_ctx);
+ if (!ret)
+ break;
+ }
+
+ closedir(dirp);
+
+ if (!ret) {
+ av_log(log_ctx, AV_LOG_INFO, "Using device %s\n", node);
+ ret = configure_contexts(s, log_ctx);
+ } else {
+ av_log(log_ctx, AV_LOG_ERROR, "Could not find a valid device\n");
+ }
+ s->devname = devname_save;
+
+ return ret;
+}
+
+int v4l2_m2m_codec_reinit(V4L2m2mContext* s)
+{
+ int ret;
+
+ /* 1. reinit in progress */
+ s->reinit = 1;
+
+ av_log(s->avctx, AV_LOG_DEBUG, "reinit context\n");
+
+ /* 2. streamoff */
+ ret = v4l2_context_set_status(&s->capture, VIDIOC_STREAMOFF);
+ if (ret)
+ av_log(s->avctx, AV_LOG_ERROR, "capture VIDIOC_STREAMOFF\n");
+
+ /* 3. unmap the capture buffers (v4l2 and ffmpeg):
+ * we must wait for all references to be released before being allowed
+ * to queue new buffers.
+ */
+ av_log(s->avctx, AV_LOG_DEBUG, "capture wait for user to release AVBufferRefs \n");
+ if (atomic_load(&s->refcount)) {
+ while(sem_wait(&s->refsync) == -1 && errno == EINTR);
+ }
+
+ v4l2_context_release(&s->capture);
+
+ /* 4. query the new format */
+ ret = v4l2_m2m_codec_format_context(&s->capture, 1);
+ if (ret) {
+ av_log(s->avctx, AV_LOG_ERROR, "setting capture format\n");
+ return ret;
+ }
+
+ /* 5. do lazy initialization */
+ ret = v4l2_context_init(&s->capture, s->capture.lazy_init);
+ if (ret) {
+ av_log(s->avctx, AV_LOG_ERROR, "capture buffers lazy init\n");
+ return ret;
+ }
+
+ /* 6. update AVCodecContext */
+ ret = ff_set_dimensions(s->avctx, s->capture.width, s->capture.height);
+ if (ret < 0)
+ av_log(s->avctx, AV_LOG_WARNING, "update avcodec height and width\n");
+
+ /* 7. complete reinit */
+ sem_destroy(&s->refsync);
+ sem_init(&s->refsync, 0, 0);
+ s->draining = 0;
+ s->reinit = 0;
+
+ return 0;
+}
+
+int ff_v4l2m2m_codec_end(AVCodecContext *avctx)
+{
+ V4L2m2mContext *s = avctx->priv_data;
+
+ av_log(avctx, AV_LOG_DEBUG, "Closing context\n");
+
+ return v4l2_m2m_codec_end(s);
+}
+
+int ff_v4l2m2m_codec_init(AVCodecContext *avctx)
+{
+ V4L2m2mContext *s = avctx->priv_data;
+ s->avctx = avctx;
+
+ return v4l2_m2m_codec_init(s, avctx);
+}
+
diff --git a/libavcodec/v4l2_m2m.h b/libavcodec/v4l2_m2m.h
new file mode 100644
index 0000000..5d6bfe0
--- /dev/null
+++ b/libavcodec/v4l2_m2m.h
@@ -0,0 +1,70 @@
+/*
+ * V4L2 mem2mem helper functions
+ *
+ * Copyright (C) 2017 Alexis Ballier <aballier at gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz at linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_V4L2_M2M_H
+#define AVCODEC_V4L2_M2M_H
+
+#include <semaphore.h>
+#include "v4l2_buffers.h"
+
+#define container_of(ptr, type, member) ({ \
+ const __typeof__(((type *)0)->member ) *__mptr = (ptr); \
+ (type *)((char *)__mptr - offsetof(type,member) );})
+
+#define V4L_M2M_DEFAULT_OPTS \
+ { "device",\
+ "Path to the device to use",\
+ OFFSET(devname), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, FLAGS },\
+ { "num_output_buffers",\
+ "Number of buffers in the output context",\
+ OFFSET(output.num_buffers), AV_OPT_TYPE_INT, { .i64 = 16 }, 6, INT_MAX, FLAGS }
+
+typedef struct V4L2m2mContext
+{
+ AVClass *class;
+ int fd;
+ char *devname;
+
+ /* the codec context queues */
+ V4L2Context capture;
+ V4L2Context output;
+
+ /* refcount of buffers held by the user */
+ atomic_uint refcount;
+
+ /* dynamic stream reconfig */
+ AVCodecContext *avctx;
+ sem_t refsync;
+ int reinit;
+
+ /* null frame or packet received */
+ int draining;
+} V4L2m2mContext;
+
+int v4l2_m2m_codec_init(V4L2m2mContext *ctx, void* log_ctx);
+int v4l2_m2m_codec_reinit(V4L2m2mContext *ctx);
+int v4l2_m2m_codec_end(V4L2m2mContext *ctx);
+int v4l2_m2m_codec_format_context(V4L2Context *ctx, int set);
+
+
+#endif /* AVCODEC_V4L2_M2M_H */
diff --git a/libavcodec/v4l2_m2m_avcodec.h b/libavcodec/v4l2_m2m_avcodec.h
new file mode 100644
index 0000000..c6ad5d4
--- /dev/null
+++ b/libavcodec/v4l2_m2m_avcodec.h
@@ -0,0 +1,32 @@
+/*
+ * V4L2 mem2mem avcodec helper functions
+ *
+ * Copyright (C) 2017 Alexis Ballier <aballier at gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz at linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_V4L2_M2M_AVCODEC_H
+#define AVCODEC_V4L2_M2M_AVCODEC_H
+
+#include "libavcodec/avcodec.h"
+
+int ff_v4l2m2m_codec_init(AVCodecContext *avctx);
+int ff_v4l2m2m_codec_end(AVCodecContext *avctx);
+
+#endif
diff --git a/libavcodec/v4l2_m2m_dec.c b/libavcodec/v4l2_m2m_dec.c
new file mode 100644
index 0000000..7a22238
--- /dev/null
+++ b/libavcodec/v4l2_m2m_dec.c
@@ -0,0 +1,213 @@
+/*
+ * V4L2 mem2mem decoders
+ *
+ * Copyright (C) 2017 Alexis Ballier <aballier at gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz at linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/videodev2.h>
+#include <sys/ioctl.h>
+#include "libavutil/pixfmt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/opt.h"
+#include "libavcodec/avcodec.h"
+#include "libavcodec/decode.h"
+#include "v4l2_m2m_avcodec.h"
+#include "v4l2_buffers.h"
+#include "v4l2_fmt.h"
+#include "v4l2_m2m.h"
+
+static int try_start(AVCodecContext *avctx)
+{
+ V4L2m2mContext *s = avctx->priv_data;
+ V4L2Context *const capture = &s->capture;
+ V4L2Context *const output = &s->output;
+ struct v4l2_event_subscription sub;
+ struct v4l2_selection selection;
+ int ret;
+
+ if (output->streamon && capture->streamon)
+ return 0;
+
+ /* 0. subscribe to source change event */
+ memset(&sub, 0, sizeof(sub));
+ sub.type = V4L2_EVENT_SOURCE_CHANGE;
+ ret = ioctl(s->fd, VIDIOC_SUBSCRIBE_EVENT, &sub);
+ if ( ret < 0)
+ av_log(avctx, AV_LOG_WARNING, "decoding does not support resolution change\n");
+
+ /* 1. start the output process */
+ if (!output->streamon) {
+ ret = v4l2_context_set_status(output, VIDIOC_STREAMON);
+ if (ret < 0) {
+ av_log(avctx, AV_LOG_DEBUG, "VIDIOC_STREAMON on output context\n");
+ return ret;
+ }
+ }
+
+ /* 2. get the capture format */
+ capture->format.type = capture->type;
+ ret = ioctl(capture->fd, VIDIOC_G_FMT, &capture->format);
+ if (ret) {
+ av_log(avctx, AV_LOG_ERROR, "VIDIOC_G_FMT ioctl\n");
+ return ret;
+ }
+
+ /* 2.1 update the AVCodecContext */
+ avctx->pix_fmt = v4l2_v4l2fmt_to_avfmt(capture->format.fmt.pix_mp.pixelformat, AV_CODEC_ID_RAWVIDEO);
+ capture->av_pix_fmt = avctx->pix_fmt;
+
+ /* 3. set the crop parameters */
+ selection.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ selection.r.height = avctx->coded_height;
+ selection.r.width = avctx->coded_width;
+ ret = ioctl(s->fd, VIDIOC_S_SELECTION, &selection);
+ if (!ret) {
+ ret = ioctl(s->fd, VIDIOC_G_SELECTION, &selection);
+ if (ret) {
+ av_log(avctx, AV_LOG_ERROR, "VIDIOC_G_SELECTION ioctl\n");
+ } else {
+ av_log(avctx, AV_LOG_DEBUG, "crop output %dx%d\n", selection.r.width, selection.r.height);
+ /* update the size of the resulting frame */
+ capture->height = selection.r.height;
+ capture->width = selection.r.width;
+ }
+ }
+
+ /* 4. init the capture context now that we have the capture format */
+ if (!capture->buffers) {
+ av_log(capture->log_ctx, AV_LOG_DEBUG, "%s requested (%dx%d)\n",
+ capture->name, capture->format.fmt.pix_mp.width, capture->format.fmt.pix_mp.height);
+
+ ret = v4l2_context_init(capture, 0);
+ if (ret) {
+ av_log(avctx, AV_LOG_DEBUG, "can't request output buffers\n");
+ return ret;
+ }
+ }
+
+ /* 6. start the capture process */
+ ret = v4l2_context_set_status(capture, VIDIOC_STREAMON);
+ if (ret) {
+ av_log(avctx, AV_LOG_DEBUG, "VIDIOC_STREAMON, on capture context\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static av_cold int v4l2m2m_decode_init(AVCodecContext *avctx)
+{
+ V4L2m2mContext *s = avctx->priv_data;
+ V4L2Context *capture = &s->capture;
+ V4L2Context *output = &s->output;
+
+ output->height = capture->height = avctx->coded_height;
+ output->width = capture->width =avctx->coded_width;
+
+ output->av_codec_id = avctx->codec_id;
+ output->av_pix_fmt = AV_PIX_FMT_NONE;
+
+ /*
+ * the buffers associated to this context can not be initialized without
+ * additional information available in the kernel driver,
+ * so let's postpone requesting the buffers until we know more about the frames
+ */
+ capture->lazy_init = 1;
+ capture->av_codec_id = AV_CODEC_ID_RAWVIDEO;
+ capture->av_pix_fmt = avctx->pix_fmt;
+
+ return ff_v4l2m2m_codec_init(avctx);
+}
+/* in ffmpeg there is a single thread could be queueing/dequeuing buffers so a
+ * timeout is * required when retrieving a frame in case the driver has not received
+ * enough input * to start generating output.
+ *
+ * once decoding starts, the timeout should not be hit.
+ */
+static int v4l2m2m_receive_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+ V4L2m2mContext *s = avctx->priv_data;
+ V4L2Context *const capture = &s->capture;
+ V4L2Context *const output = &s->output;
+ AVPacket avpkt = {0};
+ int timeout = 50;
+ int ret;
+
+ ret = ff_decode_get_packet(avctx, &avpkt);
+ if (ret < 0 && ret != AVERROR_EOF)
+ return ret;
+
+ if (s->draining)
+ goto dequeue;
+
+ ret = v4l2_enqueue_packet(output, &avpkt);
+ if (ret < 0)
+ return ret;
+
+ if (avpkt.size) {
+ ret = try_start(avctx);
+ if (ret)
+ return 0;
+ }
+
+dequeue:
+ return v4l2_dequeue_frame(capture, frame, timeout);
+}
+
+#define OFFSET(x) offsetof(V4L2m2mContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
+
+static const AVOption options[] = {
+ V4L_M2M_DEFAULT_OPTS,{ "num_capture_extra_buffers","Number of extra buffers in the capture context",
+ OFFSET(capture.num_buffers), AV_OPT_TYPE_INT,{.i64 = 6}, 6, INT_MAX, FLAGS},
+ { NULL},
+};
+
+#define M2MDEC(NAME, LONGNAME, CODEC, bsf_name) \
+static const AVClass v4l2_m2m_ ## NAME ## _dec_class = {\
+ .class_name = #NAME "_v4l2_m2m_decoder",\
+ .item_name = av_default_item_name,\
+ .option = options,\
+ .version = LIBAVUTIL_VERSION_INT,\
+};\
+\
+AVCodec ff_ ## NAME ## _v4l2m2m_decoder = { \
+ .name = #NAME "_v4l2m2m" ,\
+ .long_name = NULL_IF_CONFIG_SMALL("V4L2 mem2mem " LONGNAME " decoder wrapper"),\
+ .type = AVMEDIA_TYPE_VIDEO,\
+ .id = CODEC ,\
+ .priv_data_size = sizeof(V4L2m2mContext),\
+ .priv_class = &v4l2_m2m_ ## NAME ## _dec_class,\
+ .init = v4l2m2m_decode_init,\
+ .receive_frame = v4l2m2m_receive_frame,\
+ .close = ff_v4l2m2m_codec_end,\
+ .bsfs = bsf_name, \
+};
+
+M2MDEC(h264, "H.264", AV_CODEC_ID_H264, "h264_mp4toannexb");
+M2MDEC(hevc, "HEVC", AV_CODEC_ID_HEVC, "h264_mp4toannexb");
+M2MDEC(mpeg1, "MPEG1", AV_CODEC_ID_MPEG1VIDEO, NULL);
+M2MDEC(mpeg2, "MPEG2", AV_CODEC_ID_MPEG2VIDEO, NULL);
+M2MDEC(mpeg4, "MPEG4", AV_CODEC_ID_MPEG4, NULL);
+M2MDEC(h263, "H.263", AV_CODEC_ID_H263, NULL);
+M2MDEC(vc1 , "VC1", AV_CODEC_ID_VC1, NULL);
+M2MDEC(vp8, "VP8", AV_CODEC_ID_VP8, NULL);
+M2MDEC(vp9, "VP9", AV_CODEC_ID_VP9, NULL);
+
diff --git a/libavcodec/v4l2_m2m_enc.c b/libavcodec/v4l2_m2m_enc.c
new file mode 100644
index 0000000..1ab8b47
--- /dev/null
+++ b/libavcodec/v4l2_m2m_enc.c
@@ -0,0 +1,332 @@
+/*
+ * V4L2 mem2mem encoders
+ *
+ * Copyright (C) 2017 Alexis Ballier <aballier at gentoo.org>
+ * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz at linaro.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <linux/videodev2.h>
+#include <sys/ioctl.h>
+#include "libavcodec/avcodec.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/opt.h"
+#include "v4l2_m2m_avcodec.h"
+#include "v4l2_buffers.h"
+#include "v4l2_m2m.h"
+
+#define MPEG_CID(x) V4L2_CID_MPEG_VIDEO_##x
+#define MPEG_VIDEO(x) V4L2_MPEG_VIDEO_##x
+
+static inline void v4l2_set_timeperframe(V4L2m2mContext *s, unsigned int num, unsigned int den)
+{
+ struct v4l2_streamparm parm = { 0 };
+
+ parm.parm.output.timeperframe.denominator = den;
+ parm.parm.output.timeperframe.numerator = num;
+ parm.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+
+ if (ioctl(s->fd, VIDIOC_S_PARM, &parm) < 0)
+ av_log(s->avctx, AV_LOG_WARNING, "Failed to set timeperframe");
+}
+
+static inline void v4l2_set_ext_ctrl(V4L2m2mContext *s, unsigned int id, signed int value, const char *name)
+{
+ struct v4l2_ext_controls ctrls = { 0 };
+ struct v4l2_ext_control ctrl = { 0 };
+
+ /* set ctrls */
+ ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+ ctrls.controls = &ctrl;
+ ctrls.count = 1;
+
+ /* set ctrl*/
+ ctrl.value = value;
+ ctrl.id = id ;
+
+ if (ioctl(s->fd, VIDIOC_S_EXT_CTRLS, &ctrls) < 0)
+ av_log(s->avctx, AV_LOG_WARNING, "Failed to set %s\n", name);
+ else
+ av_log(s->avctx, AV_LOG_DEBUG, "Encoder: %s = %d\n", name, value);
+}
+
+static inline int v4l2_get_ext_ctrl(V4L2m2mContext *s, unsigned int id, signed int *value, const char *name)
+{
+ struct v4l2_ext_controls ctrls = { 0 };
+ struct v4l2_ext_control ctrl = { 0 };
+ int ret;
+
+ /* set ctrls */
+ ctrls.ctrl_class = V4L2_CTRL_CLASS_MPEG;
+ ctrls.controls = &ctrl;
+ ctrls.count = 1;
+
+ /* set ctrl*/
+ ctrl.id = id ;
+
+ ret = ioctl(s->fd, VIDIOC_G_EXT_CTRLS, &ctrls);
+ if (ret < 0) {
+ av_log(s->avctx, AV_LOG_WARNING, "Failed to set %s\n", name);
+ return ret;
+ }
+
+ *value = ctrl.value;
+
+ return 0;
+}
+
+static inline int v4l2_h264_profile_from_ff(int p)
+{
+ struct h264_profile {
+ unsigned int ffmpeg_val;
+ unsigned int v4l2_val;
+ } profile[] = {
+ { FF_PROFILE_H264_CONSTRAINED_BASELINE, MPEG_VIDEO(H264_PROFILE_CONSTRAINED_BASELINE) },
+ { FF_PROFILE_H264_HIGH_444_PREDICTIVE, MPEG_VIDEO(H264_PROFILE_HIGH_444_PREDICTIVE) },
+ { FF_PROFILE_H264_HIGH_422_INTRA, MPEG_VIDEO(H264_PROFILE_HIGH_422_INTRA) },
+ { FF_PROFILE_H264_HIGH_444_INTRA, MPEG_VIDEO(H264_PROFILE_HIGH_444_INTRA) },
+ { FF_PROFILE_H264_HIGH_10_INTRA, MPEG_VIDEO(H264_PROFILE_HIGH_10_INTRA) },
+ { FF_PROFILE_H264_HIGH_422, MPEG_VIDEO(H264_PROFILE_HIGH_422) },
+ { FF_PROFILE_H264_BASELINE, MPEG_VIDEO(H264_PROFILE_BASELINE) },
+ { FF_PROFILE_H264_EXTENDED, MPEG_VIDEO(H264_PROFILE_EXTENDED) },
+ { FF_PROFILE_H264_HIGH_10, MPEG_VIDEO(H264_PROFILE_HIGH_10) },
+ { FF_PROFILE_H264_MAIN, MPEG_VIDEO(H264_PROFILE_MAIN) },
+ { FF_PROFILE_H264_HIGH, MPEG_VIDEO(H264_PROFILE_HIGH) },
+ };
+ int i;
+
+ for (i = 0; i < sizeof(profile)/sizeof(profile[0]); i++) {
+ if (profile[i].ffmpeg_val == p)
+ return profile[i].v4l2_val;
+ }
+
+ return FF_PROFILE_UNKNOWN;
+}
+
+static inline int v4l2_mpeg4_profile_from_ff(int p)
+{
+ struct mpeg4_profile {
+ unsigned int ffmpeg_val;
+ unsigned int v4l2_val;
+ } profile[] = {
+ { FF_PROFILE_MPEG4_ADVANCED_CODING, MPEG_VIDEO(MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY) },
+ { FF_PROFILE_MPEG4_ADVANCED_SIMPLE, MPEG_VIDEO(MPEG4_PROFILE_ADVANCED_SIMPLE) },
+ { FF_PROFILE_MPEG4_SIMPLE_SCALABLE, MPEG_VIDEO(MPEG4_PROFILE_SIMPLE_SCALABLE) },
+ { FF_PROFILE_MPEG4_SIMPLE, MPEG_VIDEO(MPEG4_PROFILE_SIMPLE) },
+ { FF_PROFILE_MPEG4_CORE, MPEG_VIDEO(MPEG4_PROFILE_CORE) },
+ };
+ int i;
+
+ for (i = 0; i < sizeof(profile)/sizeof(profile[0]); i++) {
+ if (profile[i].ffmpeg_val == p)
+ return profile[i].v4l2_val;
+ }
+
+ return FF_PROFILE_UNKNOWN;
+}
+
+static int check_b_frame_support(V4L2m2mContext *s)
+{
+ if (s->avctx->max_b_frames)
+ av_log(s->avctx, AV_LOG_WARNING, "Encoder does not support b-frames yet\n");
+
+ v4l2_set_ext_ctrl(s, MPEG_CID(B_FRAMES), 0, "number of B-frames");
+
+ v4l2_get_ext_ctrl(s, MPEG_CID(B_FRAMES), &s->avctx->max_b_frames, "number of B-frames");
+ if (s->avctx->max_b_frames == 0)
+ return 0;
+
+ avpriv_report_missing_feature(s->avctx, "DTS/PTS calculation for V4L2 encoding");
+ return AVERROR_PATCHWELCOME;
+}
+
+static av_cold int v4l2m2m_encode_init(AVCodecContext *avctx)
+{
+ V4L2m2mContext *s = avctx->priv_data;
+ V4L2Context *capture = &s->capture;
+ V4L2Context *output = &s->output;
+ int qmin_cid, qmax_cid, ret, val;
+ int qmin, qmax;
+
+ /* common settings output/capture */
+ output->height = capture->height = avctx->height;
+ output->width = capture->width = avctx->width;
+
+ /* output context */
+ output->av_codec_id = AV_CODEC_ID_RAWVIDEO;
+ output->av_pix_fmt = avctx->pix_fmt;
+
+ /* capture context */
+ capture->av_codec_id = avctx->codec_id;
+ capture->av_pix_fmt = AV_PIX_FMT_NONE;
+
+ ret = ff_v4l2m2m_codec_init(avctx);
+ if (ret)
+ return ret;
+
+ ret = check_b_frame_support(s);
+ if (ret)
+ return ret;
+
+ /* set params */
+ v4l2_set_timeperframe(s, avctx->framerate.num, avctx->framerate.den);
+
+ /* set ext ctrls */
+ v4l2_set_ext_ctrl(s, MPEG_CID(HEADER_MODE), MPEG_VIDEO(HEADER_MODE_SEPARATE), "header mode");
+ v4l2_set_ext_ctrl(s, MPEG_CID(BITRATE) , avctx->bit_rate, "bit rate");
+ v4l2_set_ext_ctrl(s, MPEG_CID(GOP_SIZE), avctx->gop_size,"gop size");
+
+ av_log(avctx, AV_LOG_DEBUG, "Encoder Context: id (%d), profile (%d), frame rate(%d/%d), number b-frames (%d), "
+ "gop size (%d), bit rate (%ld), qmin (%d), qmax (%d)\n",
+ avctx->codec_id, avctx->profile, avctx->framerate.num, avctx->framerate.den,
+ avctx->max_b_frames, avctx->gop_size, avctx->bit_rate, avctx->qmin, avctx->qmax);
+
+ switch (avctx->codec_id) {
+ case AV_CODEC_ID_H264:
+ val = v4l2_h264_profile_from_ff(avctx->profile);
+ if (val != FF_PROFILE_UNKNOWN)
+ v4l2_set_ext_ctrl(s, MPEG_CID(H264_PROFILE), val, "h264 profile");
+ else
+ av_log(avctx, AV_LOG_WARNING, "h264 profile unknown)\n");
+ qmin_cid = MPEG_CID(H264_MIN_QP);
+ qmax_cid = MPEG_CID(H264_MAX_QP);
+
+ qmin = 0;
+ qmax = 51;
+ break;
+ case AV_CODEC_ID_MPEG4:
+ val = v4l2_mpeg4_profile_from_ff(avctx->profile);
+ if (val != FF_PROFILE_UNKNOWN)
+ v4l2_set_ext_ctrl(s, MPEG_CID(MPEG4_PROFILE), val, "mpeg4 profile");
+ else
+ av_log(avctx, AV_LOG_WARNING, "mpeg4 profile unknown)\n");
+ qmin_cid = MPEG_CID(MPEG4_MIN_QP);
+ qmax_cid = MPEG_CID(MPEG4_MAX_QP);
+ if (avctx->flags & CODEC_FLAG_QPEL)
+ v4l2_set_ext_ctrl(s, MPEG_CID(MPEG4_QPEL), 1, "qpel");
+ qmax = 51;
+ qmin = 0;
+ break;
+ case AV_CODEC_ID_H263:
+ qmin_cid = MPEG_CID(H263_MIN_QP);
+ qmax_cid = MPEG_CID(H263_MAX_QP);
+ qmin = 1;
+ qmax = 31;
+ break;
+ case AV_CODEC_ID_VP8:
+ qmin_cid = MPEG_CID(VPX_MIN_QP);
+ qmax_cid = MPEG_CID(VPX_MAX_QP);
+ qmin = 0;
+ qmax = 127;
+ break;
+ case AV_CODEC_ID_VP9:
+ qmin_cid = MPEG_CID(VPX_MIN_QP);
+ qmax_cid = MPEG_CID(VPX_MAX_QP);
+ qmin = 0;
+ qmax = 255;
+ break;
+ default:
+ return 0;
+ }
+
+ if (qmin != avctx->qmin || qmax != avctx->qmax)
+ av_log(avctx, AV_LOG_WARNING, "Encoder adjusted: qmin (%d), qmax (%d)\n", qmin, qmax);
+
+ v4l2_set_ext_ctrl(s, qmin_cid, qmin, "minimum video quantizer scale");
+ v4l2_set_ext_ctrl(s, qmax_cid, qmax, "maximum video quantizer scale");
+
+ return 0;
+}
+
+static int v4l2m2m_send_frame(AVCodecContext *avctx, const AVFrame *frame)
+{
+ V4L2m2mContext *s = avctx->priv_data;
+ V4L2Context *const output = &s->output;
+
+ return v4l2_enqueue_frame(output, frame);
+}
+
+/* Send and receive frame happen on the same thread, hence the need for a polling timeout */
+static int v4l2m2m_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
+{
+ V4L2m2mContext *s = avctx->priv_data;
+ V4L2Context *const capture = &s->capture;
+ V4L2Context *const output = &s->output;
+ unsigned int timeout = 50;
+ int ret;
+
+ if (s->draining)
+ goto dequeue;
+
+ if (!output->streamon) {
+ ret = v4l2_context_set_status(output, VIDIOC_STREAMON);
+ if (ret) {
+ av_log(avctx, AV_LOG_ERROR, "VIDIOC_STREAMOFF failed on output context\n");
+ return ret;
+ }
+ }
+
+ if (!capture->streamon) {
+ ret = v4l2_context_set_status(capture, VIDIOC_STREAMON);
+ if (ret) {
+ av_log(avctx, AV_LOG_ERROR, "VIDIOC_STREAMON failed on capture context\n");
+ return ret;
+ }
+ }
+
+dequeue:
+ return v4l2_dequeue_packet(capture, avpkt, timeout);
+}
+
+#define OFFSET(x) offsetof(V4L2m2mContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+
+static const AVOption options[] = {
+ V4L_M2M_DEFAULT_OPTS,
+ { "num_capture_buffers", "Number of buffers in the capture context",
+ OFFSET(capture.num_buffers), AV_OPT_TYPE_INT, {.i64 = 4 }, 4, INT_MAX, FLAGS },
+ { NULL },
+};
+
+#define M2MENC(NAME, LONGNAME, CODEC) \
+static const AVClass v4l2_m2m_ ## NAME ## _enc_class = {\
+ .class_name = #NAME "_v4l2_m2m_encoder",\
+ .item_name = av_default_item_name,\
+ .option = options,\
+ .version = LIBAVUTIL_VERSION_INT,\
+};\
+\
+AVCodec ff_ ## NAME ## _v4l2m2m_encoder = { \
+ .name = #NAME "_v4l2m2m" ,\
+ .long_name = NULL_IF_CONFIG_SMALL("V4L2 mem2mem " LONGNAME " encoder wrapper"),\
+ .type = AVMEDIA_TYPE_VIDEO,\
+ .id = CODEC ,\
+ .priv_data_size = sizeof(V4L2m2mContext),\
+ .priv_class = &v4l2_m2m_ ## NAME ##_enc_class,\
+ .init = v4l2m2m_encode_init,\
+ .send_frame = v4l2m2m_send_frame,\
+ .receive_packet = v4l2m2m_receive_packet,\
+ .close = ff_v4l2m2m_codec_end,\
+};
+
+M2MENC(mpeg4,"MPEG4", AV_CODEC_ID_MPEG4);
+M2MENC(h263, "H.263", AV_CODEC_ID_H263);
+M2MENC(h264, "H.264", AV_CODEC_ID_H264);
+M2MENC(hevc, "HEVC", AV_CODEC_ID_HEVC);
+M2MENC(vp8, "VP8", AV_CODEC_ID_VP8);
--
2.7.4
More information about the ffmpeg-devel
mailing list