[FFmpeg-devel] [PATCH v2 8/8] avfilter: add vsrc_ddagrab
Gyan Doshi
ffmpeg at gyani.pro
Sat Jul 9 08:01:23 EEST 2022
On 2022-07-09 04:24 am, Timo Rothenpieler wrote:
> ---
> Changelog | 1 +
> configure | 7 +
> doc/filters.texi | 68 ++
> libavfilter/Makefile | 1 +
> libavfilter/allfilters.c | 1 +
> libavfilter/version.h | 2 +-
> libavfilter/vsrc_ddagrab.c | 980 +++++++++++++++++++++++++++++
> libavfilter/vsrc_ddagrab_shaders.h | 120 ++++
> 8 files changed, 1179 insertions(+), 1 deletion(-)
> create mode 100644 libavfilter/vsrc_ddagrab.c
> create mode 100644 libavfilter/vsrc_ddagrab_shaders.h
>
> diff --git a/Changelog b/Changelog
> index 1a7c84b7f8..9467e92dd1 100644
> --- a/Changelog
> +++ b/Changelog
> @@ -24,6 +24,7 @@ version 5.1:
> - VDPAU AV1 hwaccel
> - PHM image format support
> - remap_opencl filter
> +- ddagrab (Desktop Duplication) video source filter
The source filters are all generative filters. This should be called a
capture filter.
Why is it a filter instead of in lavd?
Regards,
Gyan
>
>
> version 5.0:
> diff --git a/configure b/configure
> index 7d5c4900bf..5a1794ebdc 100755
> --- a/configure
> +++ b/configure
> @@ -2309,6 +2309,7 @@ SYSTEM_FUNCS="
> SetDllDirectory
> setmode
> setrlimit
> + SetThreadDpiAwarenessContext
> Sleep
> strerror_r
> sysconf
> @@ -2352,6 +2353,7 @@ TOOLCHAIN_FEATURES="
> "
>
> TYPES_LIST="
> + IDXGIOutput5
> kCMVideoCodecType_HEVC
> kCMVideoCodecType_HEVCWithAlpha
> kCMVideoCodecType_VP9
> @@ -3154,6 +3156,8 @@ overlay_cuda_filter_deps="ffnvcodec"
> overlay_cuda_filter_deps_any="cuda_nvcc cuda_llvm"
> sharpen_npp_filter_deps="ffnvcodec libnpp"
>
> +ddagrab_filter_deps="d3d11va IDXGIOutput1"
> +
> amf_deps_any="libdl LoadLibrary"
> nvenc_deps="ffnvcodec"
> nvenc_deps_any="libdl LoadLibrary"
> @@ -6389,10 +6393,13 @@ check_struct "sys/time.h sys/resource.h" "struct rusage" ru_maxrss
> check_type "windows.h dxva.h" "DXVA_PicParams_AV1" -DWINAPI_FAMILY=WINAPI_FAMILY_DESKTOP_APP -D_CRT_BUILD_DESKTOP_APP=0
> check_type "windows.h dxva.h" "DXVA_PicParams_HEVC" -DWINAPI_FAMILY=WINAPI_FAMILY_DESKTOP_APP -D_CRT_BUILD_DESKTOP_APP=0
> check_type "windows.h dxva.h" "DXVA_PicParams_VP9" -DWINAPI_FAMILY=WINAPI_FAMILY_DESKTOP_APP -D_CRT_BUILD_DESKTOP_APP=0
> +check_type "windows.h dxgi1_2.h" "IDXGIOutput1"
> +check_type "windows.h dxgi1_5.h" "IDXGIOutput5"
> check_type "windows.h d3d11.h" "ID3D11VideoDecoder"
> check_type "windows.h d3d11.h" "ID3D11VideoContext"
> check_type "d3d9.h dxva2api.h" DXVA2_ConfigPictureDecode -D_WIN32_WINNT=0x0602
> check_func_headers mfapi.h MFCreateAlignedMemoryBuffer -lmfplat
> +check_func_headers windows.h SetThreadDpiAwarenessContext -D_WIN32_WINNT=0x0A00
>
> check_type "vdpau/vdpau.h" "VdpPictureInfoHEVC"
> check_type "vdpau/vdpau.h" "VdpPictureInfoVP9"
> diff --git a/doc/filters.texi b/doc/filters.texi
> index 296b0693ae..bb651112f6 100644
> --- a/doc/filters.texi
> +++ b/doc/filters.texi
> @@ -26390,6 +26390,74 @@ need for a nullsrc video source.
> @end itemize
>
>
> + at section ddagrab
> +
> +Captures the Windows Desktop via Desktop Duplication API.
> +
> +The filter exclusively returns D3D11 Hardware Frames, for on-gpu encoding
> +or processing. So an explicit @ref{hwdownload} is needed for any kind of
> +software processing.
> +
> +It accepts the following options:
> +
> + at table @option
> + at item output_idx
> +DXGI Output Index to capture.
> +
> +Usually corresponds to the index Windows has given the screen minus one,
> +so it's starting at 0.
> +
> +Defaults to output 0.
> +
> + at item draw_mouse
> +Whether to draw the mouse cursor.
> +
> +Defaults to true.
> +
> +Only affects hardware cursors. If a game or application renders its own cursor,
> +it'll always be captured.
> +
> + at item framerate
> +Framerate at which the desktop will be captured.
> +
> +Defaults to 30 FPS.
> +
> + at item video_size
> +Specify the size of the captured video.
> +
> +Defaults to the full size of the screen.
> +
> +Cropped from the bottom/right if smaller than screen size.
> +
> + at item offset_x
> +Horizontal offset of the captured video.
> +
> + at item offset_y
> +Vertical offset of the captured video.
> +
> + at end table
> +
> + at subsection Examples
> +
> +Capture primary screen and encode using nvenc:
> + at example
> +ffmpeg -f lavfi -i ddagrab -c:v h264_nvenc -cq 18 output.mp4
> + at end example
> +
> +You can also skip the lavfi device and directly use the filter.
> +Also demonstrates downloading the frame and encoding with libx264.
> +Explicit output format specification is required in this case:
> + at example
> +ffmpeg -filter_complex ddagrab=output_idx=1:framerate=60,hwdownload,format=bgra -c:v libx264 -crf 18 output.mp4
> + at end example
> +
> +If you want to capture only a subsection of the desktop, this can be achieved
> +by specifying a smaller size and its offsets into the screen:
> + at example
> +ddagrab=video_size=800x600:offset_x=100:offset_y=100
> + at end example
> +
> +
> @section gradients
> Generate several gradients.
>
> diff --git a/libavfilter/Makefile b/libavfilter/Makefile
> index 139f7cb751..e161196d87 100644
> --- a/libavfilter/Makefile
> +++ b/libavfilter/Makefile
> @@ -555,6 +555,7 @@ OBJS-$(CONFIG_COLOR_FILTER) += vsrc_testsrc.o
> OBJS-$(CONFIG_COLORCHART_FILTER) += vsrc_testsrc.o
> OBJS-$(CONFIG_COLORSPECTRUM_FILTER) += vsrc_testsrc.o
> OBJS-$(CONFIG_COREIMAGESRC_FILTER) += vf_coreimage.o
> +OBJS-$(CONFIG_DDAGRAB_FILTER) += vsrc_ddagrab.o
> OBJS-$(CONFIG_FREI0R_SRC_FILTER) += vf_frei0r.o
> OBJS-$(CONFIG_GRADIENTS_FILTER) += vsrc_gradients.o
> OBJS-$(CONFIG_HALDCLUTSRC_FILTER) += vsrc_testsrc.o
> diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
> index 3018850b4b..bd3d2c2d47 100644
> --- a/libavfilter/allfilters.c
> +++ b/libavfilter/allfilters.c
> @@ -523,6 +523,7 @@ extern const AVFilter ff_vsrc_color;
> extern const AVFilter ff_vsrc_colorchart;
> extern const AVFilter ff_vsrc_colorspectrum;
> extern const AVFilter ff_vsrc_coreimagesrc;
> +extern const AVFilter ff_vsrc_ddagrab;
> extern const AVFilter ff_vsrc_frei0r_src;
> extern const AVFilter ff_vsrc_gradients;
> extern const AVFilter ff_vsrc_haldclutsrc;
> diff --git a/libavfilter/version.h b/libavfilter/version.h
> index 37f03b44a6..2f4f4c6c21 100644
> --- a/libavfilter/version.h
> +++ b/libavfilter/version.h
> @@ -31,7 +31,7 @@
>
> #include "version_major.h"
>
> -#define LIBAVFILTER_VERSION_MINOR 42
> +#define LIBAVFILTER_VERSION_MINOR 43
> #define LIBAVFILTER_VERSION_MICRO 100
>
>
> diff --git a/libavfilter/vsrc_ddagrab.c b/libavfilter/vsrc_ddagrab.c
> new file mode 100644
> index 0000000000..fdcb7515ec
> --- /dev/null
> +++ b/libavfilter/vsrc_ddagrab.c
> @@ -0,0 +1,980 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#include "config.h"
> +
> +#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0A00
> +#undef _WIN32_WINNT
> +#define _WIN32_WINNT 0x0A00
> +#endif
> +#define WIN32_LEAN_AND_MEAN
> +
> +#include <windows.h>
> +
> +#define COBJMACROS
> +
> +#include <initguid.h>
> +#include <d3d11.h>
> +#include <dxgi1_2.h>
> +#if HAVE_IDXGIOUTPUT5
> +#include <dxgi1_5.h>
> +#endif
> +
> +#include "libavutil/opt.h"
> +#include "libavutil/time.h"
> +#include "libavutil/avstring.h"
> +#include "libavutil/avassert.h"
> +#include "libavutil/hwcontext.h"
> +#include "libavutil/hwcontext_d3d11va.h"
> +#include "avfilter.h"
> +#include "internal.h"
> +#include "formats.h"
> +#include "video.h"
> +
> +#include "vsrc_ddagrab_shaders.h"
> +
> +// avutil/time.h takes and returns time in microseconds
> +#define TIMER_RES 1000000
> +#define TIMER_RES64 INT64_C(1000000)
> +
> +typedef struct DdagrabContext {
> + const AVClass *class;
> +
> + AVBufferRef *device_ref;
> + AVHWDeviceContext *device_ctx;
> + AVD3D11VADeviceContext *device_hwctx;
> +
> + AVBufferRef *frames_ref;
> + AVHWFramesContext *frames_ctx;
> + AVD3D11VAFramesContext *frames_hwctx;
> +
> + DXGI_OUTPUT_DESC output_desc;
> + IDXGIOutputDuplication *dxgi_outdupl;
> + AVFrame *last_frame;
> +
> + int mouse_x, mouse_y;
> + ID3D11Texture2D *mouse_texture;
> + ID3D11ShaderResourceView* mouse_resource_view ;
> +
> + AVRational time_base;
> + int64_t time_frame;
> + int64_t time_timeout;
> + int64_t first_pts;
> +
> + DXGI_FORMAT raw_format;
> + int raw_width;
> + int raw_height;
> +
> + ID3D11Texture2D *probed_texture;
> +
> + ID3D11VertexShader *vertex_shader;
> + ID3D11InputLayout *input_layout;
> + ID3D11PixelShader *pixel_shader;
> + ID3D11Buffer *const_buffer;
> + ID3D11SamplerState *sampler_state;
> + ID3D11BlendState *blend_state;
> +
> + int output_idx;
> + int draw_mouse;
> + AVRational framerate;
> + int width;
> + int height;
> + int offset_x;
> + int offset_y;
> +} DdagrabContext;
> +
> +#define OFFSET(x) offsetof(DdagrabContext, x)
> +#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
> +static const AVOption ddagrab_options[] = {
> + { "output_idx", "dda output index to capture", OFFSET(output_idx), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, FLAGS },
> + { "draw_mouse", "draw the mouse pointer", OFFSET(draw_mouse), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, FLAGS },
> + { "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, { .str = "30" }, 0, INT_MAX, FLAGS },
> + { "video_size", "set video frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, { .str = NULL }, 0, 0, FLAGS },
> + { "offset_x", "capture area x offset", OFFSET(offset_x), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS },
> + { "offset_y", "capture area y offset", OFFSET(offset_y), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS },
> + { NULL }
> +};
> +
> +AVFILTER_DEFINE_CLASS(ddagrab);
> +
> +static inline void release_resource(void *resource)
> +{
> + IUnknown **resp = (IUnknown**)resource;
> + if (*resp) {
> + IUnknown_Release(*resp);
> + *resp = NULL;
> + }
> +}
> +
> +static av_cold void ddagrab_uninit(AVFilterContext *avctx)
> +{
> + DdagrabContext *dda = avctx->priv;
> +
> + release_resource(&dda->blend_state);
> + release_resource(&dda->sampler_state);
> + release_resource(&dda->pixel_shader);
> + release_resource(&dda->input_layout);
> + release_resource(&dda->vertex_shader);
> + release_resource(&dda->const_buffer);
> +
> + release_resource(&dda->probed_texture);
> +
> + release_resource(&dda->dxgi_outdupl);
> + release_resource(&dda->mouse_resource_view);
> + release_resource(&dda->mouse_texture);
> +
> + av_frame_free(&dda->last_frame);
> + av_buffer_unref(&dda->frames_ref);
> + av_buffer_unref(&dda->device_ref);
> +}
> +
> +static av_cold int init_dxgi_dda(AVFilterContext *avctx)
> +{
> + DdagrabContext *dda = avctx->priv;
> + IDXGIDevice *dxgi_device = NULL;
> + IDXGIAdapter *dxgi_adapter = NULL;
> + IDXGIOutput *dxgi_output = NULL;
> + IDXGIOutput1 *dxgi_output1 = NULL;
> +#if HAVE_IDXGIOUTPUT5 && HAVE_SETTHREADDPIAWARENESSCONTEXT
> + IDXGIOutput5 *dxgi_output5 = NULL;
> +#endif
> + HRESULT hr;
> +
> + hr = ID3D11Device_QueryInterface(dda->device_hwctx->device, &IID_IDXGIDevice, (void**)&dxgi_device);
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "Failed querying IDXGIDevice\n");
> + return AVERROR_EXTERNAL;
> + }
> +
> + hr = IDXGIDevice_GetParent(dxgi_device, &IID_IDXGIAdapter, (void**)&dxgi_adapter);
> + IDXGIDevice_Release(dxgi_device);
> + dxgi_device = NULL;
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "Failed getting parent IDXGIAdapter\n");
> + return AVERROR_EXTERNAL;
> + }
> +
> + hr = IDXGIAdapter_EnumOutputs(dxgi_adapter, dda->output_idx, &dxgi_output);
> + IDXGIAdapter_Release(dxgi_adapter);
> + dxgi_adapter = NULL;
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "Failed to enumerate DXGI output %d\n", dda->output_idx);
> + return AVERROR_EXTERNAL;
> + }
> +
> + hr = IDXGIOutput_GetDesc(dxgi_output, &dda->output_desc);
> + if (FAILED(hr)) {
> + IDXGIOutput_Release(dxgi_output);
> + av_log(avctx, AV_LOG_ERROR, "Failed getting output description\n");
> + return AVERROR_EXTERNAL;
> + }
> +
> +#if HAVE_IDXGIOUTPUT5 && HAVE_SETTHREADDPIAWARENESSCONTEXT
> + hr = IDXGIOutput_QueryInterface(dxgi_output, &IID_IDXGIOutput5, (void**)&dxgi_output5);
> + if (SUCCEEDED(hr)) {
> + DPI_AWARENESS_CONTEXT prev_dpi_ctx;
> + DXGI_FORMAT formats[] = {
> + DXGI_FORMAT_R10G10B10A2_UNORM,
> + DXGI_FORMAT_B8G8R8A8_UNORM
> + };
> +
> + IDXGIOutput_Release(dxgi_output);
> + dxgi_output = NULL;
> +
> + prev_dpi_ctx = SetThreadDpiAwarenessContext(DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2);
> + if (!prev_dpi_ctx)
> + av_log(avctx, AV_LOG_WARNING, "Failed enabling DPI awareness for DDA\n");
> +
> + hr = IDXGIOutput5_DuplicateOutput1(dxgi_output5,
> + (IUnknown*)dda->device_hwctx->device,
> + 0,
> + FF_ARRAY_ELEMS(formats),
> + formats,
> + &dda->dxgi_outdupl);
> + IDXGIOutput5_Release(dxgi_output5);
> + dxgi_output5 = NULL;
> +
> + if (prev_dpi_ctx)
> + SetThreadDpiAwarenessContext(prev_dpi_ctx);
> + } else {
> + av_log(avctx, AV_LOG_DEBUG, "Falling back to IDXGIOutput1\n");
> +#else
> + {
> +#endif
> + hr = IDXGIOutput_QueryInterface(dxgi_output, &IID_IDXGIOutput1, (void**)&dxgi_output1);
> + IDXGIOutput_Release(dxgi_output);
> + dxgi_output = NULL;
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "Failed querying IDXGIOutput1\n");
> + return AVERROR_EXTERNAL;
> + }
> +
> + hr = IDXGIOutput1_DuplicateOutput(dxgi_output1,
> + (IUnknown*)dda->device_hwctx->device,
> + &dda->dxgi_outdupl);
> + IDXGIOutput1_Release(dxgi_output1);
> + dxgi_output1 = NULL;
> + }
> +
> + if (hr == DXGI_ERROR_NOT_CURRENTLY_AVAILABLE) {
> + av_log(avctx, AV_LOG_ERROR, "Too many open duplication sessions\n");
> + return AVERROR(EBUSY);
> + } else if (hr == DXGI_ERROR_UNSUPPORTED) {
> + av_log(avctx, AV_LOG_ERROR, "Selected output not supported\n");
> + return AVERROR_EXTERNAL;
> + } else if (hr == E_INVALIDARG) {
> + av_log(avctx, AV_LOG_ERROR, "Invalid output duplication argument\n");
> + return AVERROR(EINVAL);
> + } else if (hr == E_ACCESSDENIED) {
> + av_log(avctx, AV_LOG_ERROR, "Desktop duplication access denied\n");
> + return AVERROR(EPERM);
> + } else if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "Failed duplicating output\n");
> + return AVERROR_EXTERNAL;
> + }
> +
> + dda->raw_width = dda->output_desc.DesktopCoordinates.right - dda->output_desc.DesktopCoordinates.left;
> + dda->raw_height = dda->output_desc.DesktopCoordinates.bottom - dda->output_desc.DesktopCoordinates.top;
> + av_log(avctx, AV_LOG_VERBOSE, "Opened dxgi output %d with dimensions %dx%d\n",
> + dda->output_idx, dda->raw_width, dda->raw_height);
> +
> + return 0;
> +}
> +
> +typedef struct ConstBufferData
> +{
> + float width;
> + float height;
> +
> + uint64_t padding;
> +} ConstBufferData;
> +
> +static const D3D11_INPUT_ELEMENT_DESC vertex_shader_input_layout[] =
> +{
> + { "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
> + { "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 }
> +};
> +
> +static av_cold int init_render_resources(AVFilterContext *avctx)
> +{
> + DdagrabContext *dda = avctx->priv;
> + ID3D11Device *dev = dda->device_hwctx->device;
> + D3D11_SAMPLER_DESC sampler_desc = { 0 };
> + D3D11_BLEND_DESC blend_desc = { 0 };
> + D3D11_BUFFER_DESC buffer_desc = { 0 };
> + D3D11_SUBRESOURCE_DATA buffer_data = { 0 };
> + ConstBufferData const_data = { 0 };
> + HRESULT hr;
> +
> + hr = ID3D11Device_CreateVertexShader(dev,
> + vertex_shader_bytes,
> + FF_ARRAY_ELEMS(vertex_shader_bytes),
> + NULL,
> + &dda->vertex_shader);
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "CreateVertexShader failed: %lx\n", hr);
> + return AVERROR_EXTERNAL;
> + }
> +
> + hr = ID3D11Device_CreateInputLayout(dev,
> + vertex_shader_input_layout,
> + FF_ARRAY_ELEMS(vertex_shader_input_layout),
> + vertex_shader_bytes,
> + FF_ARRAY_ELEMS(vertex_shader_bytes),
> + &dda->input_layout);
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "CreateInputLayout failed: %lx\n", hr);
> + return AVERROR_EXTERNAL;
> + }
> +
> + hr = ID3D11Device_CreatePixelShader(dev,
> + pixel_shader_bytes,
> + FF_ARRAY_ELEMS(pixel_shader_bytes),
> + NULL,
> + &dda->pixel_shader);
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "CreatePixelShader failed: %lx\n", hr);
> + return AVERROR_EXTERNAL;
> + }
> +
> + const_data = (ConstBufferData){ dda->width, dda->height };
> +
> + buffer_data.pSysMem = &const_data;
> + buffer_desc.ByteWidth = sizeof(const_data);
> + buffer_desc.Usage = D3D11_USAGE_IMMUTABLE;
> + buffer_desc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
> + hr = ID3D11Device_CreateBuffer(dev,
> + &buffer_desc,
> + &buffer_data,
> + &dda->const_buffer);
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "CreateBuffer const buffer failed: %lx\n", hr);
> + return AVERROR_EXTERNAL;
> + }
> +
> + sampler_desc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
> + sampler_desc.AddressU = D3D11_TEXTURE_ADDRESS_CLAMP;
> + sampler_desc.AddressV = D3D11_TEXTURE_ADDRESS_CLAMP;
> + sampler_desc.AddressW = D3D11_TEXTURE_ADDRESS_CLAMP;
> + sampler_desc.ComparisonFunc = D3D11_COMPARISON_NEVER;
> + hr = ID3D11Device_CreateSamplerState(dev,
> + &sampler_desc,
> + &dda->sampler_state);
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "CreateSamplerState failed: %lx\n", hr);
> + return AVERROR_EXTERNAL;
> + }
> +
> + blend_desc.AlphaToCoverageEnable = FALSE;
> + blend_desc.IndependentBlendEnable = FALSE;
> + blend_desc.RenderTarget[0].BlendEnable = TRUE;
> + blend_desc.RenderTarget[0].SrcBlend = D3D11_BLEND_SRC_ALPHA;
> + blend_desc.RenderTarget[0].DestBlend = D3D11_BLEND_INV_SRC_ALPHA;
> + blend_desc.RenderTarget[0].BlendOp = D3D11_BLEND_OP_ADD;
> + blend_desc.RenderTarget[0].SrcBlendAlpha = D3D11_BLEND_ONE;
> + blend_desc.RenderTarget[0].DestBlendAlpha = D3D11_BLEND_ZERO;
> + blend_desc.RenderTarget[0].BlendOpAlpha = D3D11_BLEND_OP_ADD;
> + blend_desc.RenderTarget[0].RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL;
> + hr = ID3D11Device_CreateBlendState(dev,
> + &blend_desc,
> + &dda->blend_state);
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "CreateBlendState failed: %lx\n", hr);
> + return AVERROR_EXTERNAL;
> + }
> +
> + return 0;
> +}
> +
> +static av_cold int ddagrab_init(AVFilterContext *avctx)
> +{
> + DdagrabContext *dda = avctx->priv;
> + int ret = 0;
> +
> + if (avctx->hw_device_ctx) {
> + dda->device_ctx = (AVHWDeviceContext*)avctx->hw_device_ctx->data;
> +
> + if (dda->device_ctx->type != AV_HWDEVICE_TYPE_D3D11VA) {
> + av_log(avctx, AV_LOG_ERROR, "Non-D3D11VA input hw_device_ctx\n");
> + return AVERROR(EINVAL);
> + }
> +
> + dda->device_ref = av_buffer_ref(avctx->hw_device_ctx);
> + if (!dda->device_ref)
> + return AVERROR(ENOMEM);
> +
> + av_log(avctx, AV_LOG_VERBOSE, "Using provided hw_device_ctx\n");
> + } else {
> + ret = av_hwdevice_ctx_create(&dda->device_ref, AV_HWDEVICE_TYPE_D3D11VA, NULL, NULL, 0);
> + if (ret < 0) {
> + av_log(avctx, AV_LOG_ERROR, "Failed to create D3D11VA device.\n");
> + return ret;
> + }
> +
> + dda->device_ctx = (AVHWDeviceContext*)dda->device_ref->data;
> +
> + av_log(avctx, AV_LOG_VERBOSE, "Created internal hw_device_ctx\n");
> + }
> +
> + dda->device_hwctx = (AVD3D11VADeviceContext*)dda->device_ctx->hwctx;
> +
> + ret = init_dxgi_dda(avctx);
> + if (ret < 0)
> + goto fail;
> +
> + if (dda->width <= 0)
> + dda->width = dda->raw_width;
> + if (dda->height <= 0)
> + dda->height = dda->raw_height;
> +
> + dda->width -= FFMAX(dda->width - dda->raw_width + dda->offset_x, 0);
> + dda->height -= FFMAX(dda->height - dda->raw_height + dda->offset_y, 0);
> +
> + dda->time_base = av_inv_q(dda->framerate);
> + dda->time_frame = av_gettime_relative() / av_q2d(dda->time_base);
> + dda->time_timeout = av_rescale_q(1, dda->time_base, (AVRational) { 1, 1000 }) / 2;
> +
> + dda->last_frame = av_frame_alloc();
> + if (!dda->last_frame) {
> + ret = AVERROR(ENOMEM);
> + goto fail;
> + }
> +
> + dda->mouse_x = -1;
> + dda->mouse_y = -1;
> +
> + if (dda->draw_mouse) {
> + ret = init_render_resources(avctx);
> + if (ret < 0)
> + goto fail;
> + }
> +
> + return 0;
> +fail:
> + ddagrab_uninit(avctx);
> + return ret;
> +}
> +
> +static int create_d3d11_pointer_tex(AVFilterContext *avctx,
> + uint8_t *buf,
> + DXGI_OUTDUPL_POINTER_SHAPE_INFO *shape_info,
> + ID3D11Texture2D **out_tex,
> + ID3D11ShaderResourceView **res_view)
> +{
> + DdagrabContext *dda = avctx->priv;
> + D3D11_TEXTURE2D_DESC desc = { 0 };
> + D3D11_SUBRESOURCE_DATA init_data = { 0 };
> + D3D11_SHADER_RESOURCE_VIEW_DESC resource_desc = { 0 };
> + HRESULT hr;
> +
> + desc.MipLevels = 1;
> + desc.ArraySize = 1;
> + desc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
> + desc.SampleDesc.Count = 1;
> + desc.SampleDesc.Quality = 0;
> + desc.Usage = D3D11_USAGE_IMMUTABLE;
> + desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
> +
> + desc.Width = shape_info->Width;
> + desc.Height = shape_info->Height;
> +
> + init_data.pSysMem = buf;
> + init_data.SysMemPitch = shape_info->Pitch;
> +
> + resource_desc.Format = desc.Format;
> + resource_desc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
> + resource_desc.Texture2D.MostDetailedMip = 0;
> + resource_desc.Texture2D.MipLevels = 1;
> +
> + hr = ID3D11Device_CreateTexture2D(dda->device_hwctx->device,
> + &desc,
> + &init_data,
> + out_tex);
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "Failed creating pointer texture\n");
> + return AVERROR_EXTERNAL;
> + }
> +
> + hr = ID3D11Device_CreateShaderResourceView(dda->device_hwctx->device,
> + (ID3D11Resource*)dda->mouse_texture,
> + &resource_desc,
> + res_view);
> + if (FAILED(hr)) {
> + release_resource(out_tex);
> + av_log(avctx, AV_LOG_ERROR, "CreateShaderResourceView for mouse failed: %lx\n", hr);
> + return AVERROR_EXTERNAL;
> + }
> +
> + return 0;
> +}
> +
> +static uint8_t *convert_mono_buffer(uint8_t *input, int *_width, int *_height, int *_pitch)
> +{
> + int width = *_width, height = *_height, pitch = *_pitch;
> + int real_height = height / 2;
> + uint8_t *output = av_malloc(real_height * width * 4);
> + int y, x;
> +
> + if (!output)
> + return NULL;
> +
> + // This simulates drawing the cursor on a full black surface
> + // i.e. ignore the AND mask, turn XOR mask into all 4 color channels
> + for (y = 0; y < real_height; y++) {
> + for (x = 0; x < width; x++) {
> + int v = input[(real_height + y) * pitch + (x / 8)];
> + v = (v >> (7 - (x % 8))) & 1;
> + memset(&output[4 * ((y*width) + x)], v ? 0xFF : 0, 4);
> + }
> + }
> +
> + *_pitch = width * 4;
> + *_height = real_height;
> +
> + return output;
> +}
> +
> +static void fixup_color_mask(uint8_t *buf, int width, int height, int pitch)
> +{
> + int x, y;
> + // There is no good way to replicate XOR'ig parts of the texture with the screen
> + // best effort is rendering the non-masked parts, and make the rest transparent
> + for (y = 0; y < height; y++) {
> + for (x = 0; x < width; x++) {
> + int pos = (y*pitch) + (4*x) + 3;
> + buf[pos] = buf[pos] ? 0 : 0xFF;
> + }
> + }
> +}
> +
> +static int update_mouse_pointer(AVFilterContext *avctx, DXGI_OUTDUPL_FRAME_INFO *frame_info)
> +{
> + DdagrabContext *dda = avctx->priv;
> + HRESULT hr;
> + int ret;
> +
> + if (frame_info->LastMouseUpdateTime.QuadPart == 0)
> + return 0;
> +
> + if (frame_info->PointerPosition.Visible) {
> + dda->mouse_x = frame_info->PointerPosition.Position.x;
> + dda->mouse_y = frame_info->PointerPosition.Position.y;
> + } else {
> + dda->mouse_x = dda->mouse_y = -1;
> + }
> +
> + if (frame_info->PointerShapeBufferSize) {
> + UINT size = frame_info->PointerShapeBufferSize;
> + DXGI_OUTDUPL_POINTER_SHAPE_INFO shape_info;
> + uint8_t *buf = av_malloc(size);
> + if (!buf)
> + return AVERROR(ENOMEM);
> +
> + hr = IDXGIOutputDuplication_GetFramePointerShape(dda->dxgi_outdupl,
> + size,
> + buf,
> + &size,
> + &shape_info);
> + if (FAILED(hr)) {
> + av_free(buf);
> + av_log(avctx, AV_LOG_ERROR, "Failed getting pointer shape: %lx\n", hr);
> + return AVERROR_EXTERNAL;
> + }
> +
> + if (shape_info.Type == DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MONOCHROME) {
> + uint8_t *new_buf = convert_mono_buffer(buf, &shape_info.Width, &shape_info.Height, &shape_info.Pitch);
> + av_free(buf);
> + if (!new_buf)
> + return AVERROR(ENOMEM);
> + buf = new_buf;
> + } else if (shape_info.Type == DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MASKED_COLOR) {
> + fixup_color_mask(buf, shape_info.Width, shape_info.Height, shape_info.Pitch);
> + } else if (shape_info.Type != DXGI_OUTDUPL_POINTER_SHAPE_TYPE_COLOR) {
> + av_log(avctx, AV_LOG_WARNING, "Unsupported pointer shape type: %d\n", (int)shape_info.Type);
> + av_free(buf);
> + return 0;
> + }
> +
> + release_resource(&dda->mouse_resource_view);
> + release_resource(&dda->mouse_texture);
> +
> + ret = create_d3d11_pointer_tex(avctx, buf, &shape_info, &dda->mouse_texture, &dda->mouse_resource_view);
> + av_freep(&buf);
> + if (ret < 0)
> + return ret;
> +
> + av_log(avctx, AV_LOG_VERBOSE, "Updated pointer shape texture\n");
> + }
> +
> + return 0;
> +}
> +
> +static int next_frame_internal(AVFilterContext *avctx, ID3D11Texture2D **desktop_texture)
> +{
> + DXGI_OUTDUPL_FRAME_INFO frame_info;
> + DdagrabContext *dda = avctx->priv;
> + IDXGIResource *desktop_resource = NULL;
> + HRESULT hr;
> + int ret;
> +
> + hr = IDXGIOutputDuplication_AcquireNextFrame(
> + dda->dxgi_outdupl,
> + dda->time_timeout,
> + &frame_info,
> + &desktop_resource);
> + if (hr == DXGI_ERROR_WAIT_TIMEOUT) {
> + return AVERROR(EAGAIN);
> + } else if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "AcquireNextFrame failed: %lx\n", hr);
> + return AVERROR_EXTERNAL;
> + }
> +
> + if (dda->draw_mouse) {
> + ret = update_mouse_pointer(avctx, &frame_info);
> + if (ret < 0)
> + return ret;
> + }
> +
> + hr = IDXGIResource_QueryInterface(desktop_resource, &IID_ID3D11Texture2D, (void**)desktop_texture);
> + IDXGIResource_Release(desktop_resource);
> + desktop_resource = NULL;
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "DXGIResource QueryInterface failed\n");
> + return AVERROR_EXTERNAL;
> + }
> +
> + return 0;
> +}
> +
> +static int probe_output_format(AVFilterContext *avctx)
> +{
> + DdagrabContext *dda = avctx->priv;
> + D3D11_TEXTURE2D_DESC desc;
> + int ret;
> +
> + av_assert1(!dda->probed_texture);
> +
> + do {
> + ret = next_frame_internal(avctx, &dda->probed_texture);
> + } while(ret == AVERROR(EAGAIN));
> + if (ret < 0)
> + return ret;
> +
> + ID3D11Texture2D_GetDesc(dda->probed_texture, &desc);
> +
> + dda->raw_format = desc.Format;
> +
> + return 0;
> +}
> +
> +static av_cold int init_hwframes_ctx(AVFilterContext *avctx)
> +{
> + DdagrabContext *dda = avctx->priv;
> + int ret = 0;
> +
> + dda->frames_ref = av_hwframe_ctx_alloc(dda->device_ref);
> + if (!dda->frames_ref)
> + return AVERROR(ENOMEM);
> + dda->frames_ctx = (AVHWFramesContext*)dda->frames_ref->data;
> + dda->frames_hwctx = (AVD3D11VAFramesContext*)dda->frames_ctx->hwctx;
> +
> + dda->frames_ctx->format = AV_PIX_FMT_D3D11;
> + dda->frames_ctx->width = dda->width;
> + dda->frames_ctx->height = dda->height;
> +
> + switch (dda->raw_format) {
> + case DXGI_FORMAT_B8G8R8A8_UNORM:
> + av_log(avctx, AV_LOG_VERBOSE, "Probed 8 bit RGB frame format\n");
> + dda->frames_ctx->sw_format = AV_PIX_FMT_BGRA;
> + break;
> + case DXGI_FORMAT_R10G10B10A2_UNORM:
> + av_log(avctx, AV_LOG_VERBOSE, "Probed 10 bit RGB frame format\n");
> + dda->frames_ctx->sw_format = AV_PIX_FMT_X2BGR10;
> + break;
> + default:
> + av_log(avctx, AV_LOG_ERROR, "Unexpected texture output format!\n");
> + return AVERROR_BUG;
> + }
> +
> + if (dda->draw_mouse)
> + dda->frames_hwctx->BindFlags |= D3D11_BIND_RENDER_TARGET;
> +
> + ret = av_hwframe_ctx_init(dda->frames_ref);
> + if (ret < 0) {
> + av_log(avctx, AV_LOG_ERROR, "Failed to initialise hardware frames context: %d.\n", ret);
> + goto fail;
> + }
> +
> + return 0;
> +fail:
> + av_buffer_unref(&dda->frames_ref);
> + return ret;
> +}
> +
> +static int ddagrab_config_props(AVFilterLink *outlink)
> +{
> + AVFilterContext *avctx = outlink->src;
> + DdagrabContext *dda = avctx->priv;
> + int ret;
> +
> + outlink->w = dda->width;
> + outlink->h = dda->height;
> + outlink->time_base = (AVRational){1, TIMER_RES};
> + outlink->frame_rate = dda->framerate;
> +
> + ret = probe_output_format(avctx);
> + if (ret < 0)
> + return ret;
> +
> + ret = init_hwframes_ctx(avctx);
> + if (ret < 0)
> + return ret;
> +
> + outlink->hw_frames_ctx = av_buffer_ref(dda->frames_ref);
> + if (!outlink->hw_frames_ctx)
> + return AVERROR(ENOMEM);
> +
> + return 0;
> +}
> +
> +static int draw_mouse_pointer(AVFilterContext *avctx, AVFrame *frame)
> +{
> + DdagrabContext *dda = avctx->priv;
> + ID3D11DeviceContext *devctx = dda->device_hwctx->device_context;
> + ID3D11Texture2D *frame_tex = (ID3D11Texture2D*)frame->data[0];
> + D3D11_RENDER_TARGET_VIEW_DESC target_desc = { 0 };
> + ID3D11RenderTargetView* target_view = NULL;
> + ID3D11Buffer *mouse_vertex_buffer = NULL;
> + D3D11_TEXTURE2D_DESC tex_desc;
> + int num_vertices = 0;
> + int x, y;
> + HRESULT hr;
> + int ret = 0;
> +
> + if (!dda->mouse_texture || dda->mouse_x < 0 || dda->mouse_y < 0)
> + return 0;
> +
> + ID3D11Texture2D_GetDesc(dda->mouse_texture, &tex_desc);
> +
> + x = dda->mouse_x - dda->offset_x;
> + y = dda->mouse_y - dda->offset_y;
> +
> + if (x >= dda->width || y >= dda->height ||
> + -x >= (int)tex_desc.Width || -y >= (int)tex_desc.Height)
> + return 0;
> +
> + target_desc.Format = dda->raw_format;
> + target_desc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
> + target_desc.Texture2D.MipSlice = 0;
> +
> + hr = ID3D11Device_CreateRenderTargetView(dda->device_hwctx->device,
> + (ID3D11Resource*)frame_tex,
> + &target_desc,
> + &target_view);
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "CreateRenderTargetView failed: %lx\n", hr);
> + ret = AVERROR_EXTERNAL;
> + goto end;
> + }
> +
> + ID3D11DeviceContext_ClearState(devctx);
> +
> + {
> + D3D11_VIEWPORT viewport = { 0 };
> + viewport.Width = dda->width;
> + viewport.Height = dda->height;
> + viewport.MinDepth = 0.0f;
> + viewport.MaxDepth = 1.0f;
> +
> + ID3D11DeviceContext_RSSetViewports(devctx, 1, &viewport);
> + }
> +
> + {
> + FLOAT vertices[] = {
> + // x, y, z, u, v
> + x , y + tex_desc.Height, 0.0f, 0.0f, 1.0f,
> + x , y , 0.0f, 0.0f, 0.0f,
> + x + tex_desc.Width, y + tex_desc.Height, 0.0f, 1.0f, 1.0f,
> + x + tex_desc.Width, y , 0.0f, 1.0f, 0.0f,
> + };
> + UINT stride = sizeof(FLOAT) * 5;
> + UINT offset = 0;
> +
> + D3D11_SUBRESOURCE_DATA init_data = { 0 };
> + D3D11_BUFFER_DESC buf_desc = { 0 };
> +
> + num_vertices = sizeof(vertices) / (sizeof(FLOAT) * 5);
> +
> + buf_desc.Usage = D3D11_USAGE_DEFAULT;
> + buf_desc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
> + buf_desc.ByteWidth = sizeof(vertices);
> + init_data.pSysMem = vertices;
> +
> + hr = ID3D11Device_CreateBuffer(dda->device_hwctx->device,
> + &buf_desc,
> + &init_data,
> + &mouse_vertex_buffer);
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "CreateBuffer failed: %lx\n", hr);
> + ret = AVERROR_EXTERNAL;
> + goto end;
> + }
> +
> + ID3D11DeviceContext_IASetVertexBuffers(devctx, 0, 1, &mouse_vertex_buffer, &stride, &offset);
> + ID3D11DeviceContext_IASetInputLayout(devctx, dda->input_layout);
> + ID3D11DeviceContext_IASetPrimitiveTopology(devctx, D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP);
> + }
> +
> + ID3D11DeviceContext_VSSetShader(devctx, dda->vertex_shader, NULL, 0);
> + ID3D11DeviceContext_VSSetConstantBuffers(devctx, 0, 1, &dda->const_buffer);
> + ID3D11DeviceContext_PSSetSamplers(devctx, 0, 1, &dda->sampler_state);
> + ID3D11DeviceContext_PSSetShaderResources(devctx, 0, 1, &dda->mouse_resource_view);
> + ID3D11DeviceContext_PSSetShader(devctx, dda->pixel_shader, NULL, 0);
> +
> + ID3D11DeviceContext_OMSetBlendState(devctx, dda->blend_state, NULL, 0xFFFFFFFF);
> + ID3D11DeviceContext_OMSetRenderTargets(devctx, 1, &target_view, NULL);
> +
> + ID3D11DeviceContext_Draw(devctx, num_vertices, 0);
> +
> +end:
> + release_resource(&mouse_vertex_buffer);
> + release_resource(&target_view);
> +
> + return ret;
> +}
> +
> +static int ddagrab_request_frame(AVFilterLink *outlink)
> +{
> + AVFilterContext *avctx = outlink->src;
> + DdagrabContext *dda = avctx->priv;
> +
> + ID3D11Texture2D *cur_texture = NULL;
> + D3D11_TEXTURE2D_DESC desc = { 0 };
> + D3D11_BOX box = { 0 };
> +
> + int64_t time_frame = dda->time_frame;
> + int64_t now, delay;
> + AVFrame *frame = NULL;
> + HRESULT hr;
> + int ret;
> +
> + /* time_frame is in units of microseconds divided by the time_base.
> + * This means that adding a clean 1M to it is the equivalent of adding
> + * 1M*time_base microseconds to it, except it avoids all rounding error.
> + * The only time rounding error occurs is when multiplying to calculate
> + * the delay. So any rounding error there corrects itself over time.
> + */
> + time_frame += TIMER_RES64;
> + for (;;) {
> + now = av_gettime_relative();
> + delay = time_frame * av_q2d(dda->time_base) - now;
> + if (delay <= 0) {
> + if (delay < -TIMER_RES64 * av_q2d(dda->time_base)) {
> + time_frame += TIMER_RES64;
> + }
> + break;
> + }
> + av_usleep(delay);
> + }
> +
> + if (!dda->first_pts)
> + dda->first_pts = now;
> + now -= dda->first_pts;
> +
> + if (!dda->probed_texture) {
> + ret = next_frame_internal(avctx, &cur_texture);
> + } else {
> + cur_texture = dda->probed_texture;
> + dda->probed_texture = NULL;
> + ret = 0;
> + }
> +
> + if (ret == AVERROR(EAGAIN) && dda->last_frame->buf[0]) {
> + frame = av_frame_alloc();
> + if (!frame)
> + return AVERROR(ENOMEM);
> +
> + ret = av_frame_ref(frame, dda->last_frame);
> + if (ret < 0) {
> + av_frame_free(&frame);
> + return ret;
> + }
> +
> + av_log(avctx, AV_LOG_DEBUG, "Duplicated output frame\n");
> +
> + goto frame_done;
> + } else if (ret == AVERROR(EAGAIN)) {
> + av_log(avctx, AV_LOG_VERBOSE, "Initial DDA AcquireNextFrame timeout!\n");
> + return AVERROR(EAGAIN);
> + } else if (ret < 0) {
> + return ret;
> + }
> +
> + // AcquireNextFrame sometimes has bursts of delay.
> + // This increases accuracy of the timestamp, but might upset consumers due to more jittery framerate?
> + now = av_gettime_relative() - dda->first_pts;
> +
> + ID3D11Texture2D_GetDesc(cur_texture, &desc);
> + if (desc.Format != dda->raw_format ||
> + (int)desc.Width != dda->raw_width ||
> + (int)desc.Height != dda->raw_height) {
> + av_log(avctx, AV_LOG_ERROR, "Output parameters changed!");
> + ret = AVERROR_OUTPUT_CHANGED;
> + goto fail;
> + }
> +
> + frame = ff_get_video_buffer(outlink, dda->width, dda->height);
> + if (!frame) {
> + ret = AVERROR(ENOMEM);
> + goto fail;
> + }
> +
> + box.left = dda->offset_x;
> + box.top = dda->offset_y;
> + box.right = box.left + dda->width;
> + box.bottom = box.top + dda->height;
> + box.front = 0;
> + box.back = 1;
> +
> + ID3D11DeviceContext_CopySubresourceRegion(
> + dda->device_hwctx->device_context,
> + (ID3D11Resource*)frame->data[0], (UINT)(intptr_t)frame->data[1],
> + 0, 0, 0,
> + (ID3D11Resource*)cur_texture, 0,
> + &box);
> +
> + release_resource(&cur_texture);
> +
> + hr = IDXGIOutputDuplication_ReleaseFrame(dda->dxgi_outdupl);
> + if (FAILED(hr)) {
> + av_log(avctx, AV_LOG_ERROR, "DDA ReleaseFrame failed!\n");
> + ret = AVERROR_EXTERNAL;
> + goto fail;
> + }
> +
> + if (dda->draw_mouse) {
> + ret = draw_mouse_pointer(avctx, frame);
> + if (ret < 0)
> + goto fail;
> + }
> +
> + frame->sample_aspect_ratio = (AVRational){1, 1};
> +
> + av_frame_unref(dda->last_frame);
> + ret = av_frame_ref(dda->last_frame, frame);
> + if (ret < 0)
> + return ret;
> +
> +frame_done:
> + frame->pts = now;
> + dda->time_frame = time_frame;
> +
> + return ff_filter_frame(outlink, frame);
> +
> +fail:
> + if (frame)
> + av_frame_free(&frame);
> +
> + if (cur_texture)
> + IDXGIOutputDuplication_ReleaseFrame(dda->dxgi_outdupl);
> +
> + release_resource(&cur_texture);
> + return ret;
> +}
> +
> +static const AVFilterPad ddagrab_outputs[] = {
> + {
> + .name = "default",
> + .type = AVMEDIA_TYPE_VIDEO,
> + .request_frame = ddagrab_request_frame,
> + .config_props = ddagrab_config_props,
> + },
> +};
> +
> +const AVFilter ff_vsrc_ddagrab = {
> + .name = "ddagrab",
> + .description = NULL_IF_CONFIG_SMALL("Grab Windows Desktop images using Desktop Duplication API"),
> + .priv_size = sizeof(DdagrabContext),
> + .priv_class = &ddagrab_class,
> + .init = ddagrab_init,
> + .uninit = ddagrab_uninit,
> + .inputs = NULL,
> + FILTER_OUTPUTS(ddagrab_outputs),
> + FILTER_SINGLE_PIXFMT(AV_PIX_FMT_D3D11),
> + .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
> +};
> diff --git a/libavfilter/vsrc_ddagrab_shaders.h b/libavfilter/vsrc_ddagrab_shaders.h
> new file mode 100644
> index 0000000000..0305f8de1c
> --- /dev/null
> +++ b/libavfilter/vsrc_ddagrab_shaders.h
> @@ -0,0 +1,120 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#ifndef AVFILTER_VSRC_DDAGRAB_SHADERS_H
> +#define AVFILTER_VSRC_DDAGRAB_SHADERS_H
> +
> +#if 0
> +
> +cbuffer PARAMS : register ( b0 )
> +{
> + float2 Dimensions;
> +};
> +
> +struct VS_INPUT
> +{
> + float3 Pos : POSITION;
> + float2 Tex : TEXCOORD;
> +};
> +
> +struct VS_OUTPUT
> +{
> + float4 Pos : SV_POSITION;
> + float2 Tex : TEXCOORD;
> +};
> +
> +VS_OUTPUT VS(VS_INPUT input)
> +{
> + VS_OUTPUT output;
> + float2 center = Dimensions / 2;
> + output.Pos = float4((input.Pos.xy - center) / center, input.Pos.z, 1.0f);
> + output.Pos.y *= -1;
> + output.Tex = input.Tex;
> + return output;
> +}
> +
> +Texture2D tx : register( t0 );
> +SamplerState samLinear : register( s0 );
> +
> +float4 PS(VS_OUTPUT input) : SV_Target
> +{
> + return tx.Sample(samLinear, input.Tex);
> +}
> +
> +#endif
> +
> +static const uint8_t vertex_shader_bytes[] =
> +{
> + 68, 88, 66, 67, 207, 194, 142, 193, 255, 85, 32, 72, 116, 77, 242, 140, 26, 229, 67, 69, 1, 0, 0, 0,
> + 40, 3, 0, 0, 4, 0, 0, 0, 48, 0, 0, 0, 56, 1, 0, 0, 124, 2, 0, 0, 208, 2, 0, 0,
> + 65, 111, 110, 57, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 254, 255, 204, 0, 0, 0, 52, 0, 0, 0,
> + 1, 0, 36, 0, 0, 0, 48, 0, 0, 0, 48, 0, 0, 0, 36, 0, 1, 0, 48, 0, 0, 0, 0, 0,
> + 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 254, 255, 81, 0, 0, 5, 2, 0, 15, 160,
> + 0, 0, 0, 63, 0, 0, 128, 63, 0, 0, 0, 0, 0, 0, 0, 0, 31, 0, 0, 2, 5, 0, 0, 128,
> + 0, 0, 15, 144, 31, 0, 0, 2, 5, 0, 1, 128, 1, 0, 15, 144, 1, 0, 0, 2, 0, 0, 1, 128,
> + 2, 0, 0, 160, 4, 0, 0, 4, 0, 0, 6, 128, 1, 0, 208, 160, 0, 0, 0, 129, 0, 0, 208, 144,
> + 5, 0, 0, 3, 0, 0, 9, 128, 0, 0, 0, 128, 1, 0, 100, 160, 6, 0, 0, 2, 1, 0, 1, 128,
> + 0, 0, 0, 128, 6, 0, 0, 2, 1, 0, 2, 128, 0, 0, 255, 128, 5, 0, 0, 3, 0, 0, 3, 128,
> + 0, 0, 233, 128, 1, 0, 228, 128, 1, 0, 0, 2, 0, 0, 4, 128, 0, 0, 85, 129, 2, 0, 0, 3,
> + 0, 0, 3, 192, 0, 0, 232, 128, 0, 0, 228, 160, 4, 0, 0, 4, 0, 0, 12, 192, 0, 0, 170, 144,
> + 2, 0, 148, 160, 2, 0, 100, 160, 1, 0, 0, 2, 0, 0, 3, 224, 1, 0, 228, 144, 255, 255, 0, 0,
> + 83, 72, 68, 82, 60, 1, 0, 0, 64, 0, 1, 0, 79, 0, 0, 0, 89, 0, 0, 4, 70, 142, 32, 0,
> + 0, 0, 0, 0, 1, 0, 0, 0, 95, 0, 0, 3, 114, 16, 16, 0, 0, 0, 0, 0, 95, 0, 0, 3,
> + 50, 16, 16, 0, 1, 0, 0, 0, 103, 0, 0, 4, 242, 32, 16, 0, 0, 0, 0, 0, 1, 0, 0, 0,
> + 101, 0, 0, 3, 50, 32, 16, 0, 1, 0, 0, 0, 104, 0, 0, 2, 1, 0, 0, 0, 56, 0, 0, 11,
> + 50, 0, 16, 0, 0, 0, 0, 0, 70, 128, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 64, 0, 0,
> + 0, 0, 0, 63, 0, 0, 0, 63, 0, 0, 0, 0, 0, 0, 0, 0, 50, 0, 0, 14, 194, 0, 16, 0,
> + 0, 0, 0, 0, 6, 132, 32, 128, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 64, 0, 0,
> + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 63, 0, 0, 0, 63, 6, 20, 16, 0, 0, 0, 0, 0,
> + 14, 0, 0, 7, 50, 0, 16, 0, 0, 0, 0, 0, 230, 10, 16, 0, 0, 0, 0, 0, 70, 0, 16, 0,
> + 0, 0, 0, 0, 56, 0, 0, 10, 50, 32, 16, 0, 0, 0, 0, 0, 70, 0, 16, 0, 0, 0, 0, 0,
> + 2, 64, 0, 0, 0, 0, 128, 63, 0, 0, 128, 191, 0, 0, 0, 0, 0, 0, 0, 0, 54, 0, 0, 5,
> + 66, 32, 16, 0, 0, 0, 0, 0, 42, 16, 16, 0, 0, 0, 0, 0, 54, 0, 0, 5, 130, 32, 16, 0,
> + 0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 128, 63, 54, 0, 0, 5, 50, 32, 16, 0, 1, 0, 0, 0,
> + 70, 16, 16, 0, 1, 0, 0, 0, 62, 0, 0, 1, 73, 83, 71, 78, 76, 0, 0, 0, 2, 0, 0, 0,
> + 8, 0, 0, 0, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0,
> + 7, 7, 0, 0, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0,
> + 3, 3, 0, 0, 80, 79, 83, 73, 84, 73, 79, 78, 0, 84, 69, 88, 67, 79, 79, 82, 68, 0, 171, 171,
> + 79, 83, 71, 78, 80, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0, 0, 56, 0, 0, 0, 0, 0, 0, 0,
> + 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 68, 0, 0, 0, 0, 0, 0, 0,
> + 0, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0, 3, 12, 0, 0, 83, 86, 95, 80, 79, 83, 73, 84,
> + 73, 79, 78, 0, 84, 69, 88, 67, 79, 79, 82, 68, 0, 171, 171, 171
> +};
> +
> +static const uint8_t pixel_shader_bytes[] =
> +{
> + 68, 88, 66, 67, 0, 95, 83, 169, 90, 60, 208, 75, 219, 179, 108, 203, 8, 232, 255, 27, 1, 0, 0, 0,
> + 148, 1, 0, 0, 4, 0, 0, 0, 48, 0, 0, 0, 156, 0, 0, 0, 8, 1, 0, 0, 96, 1, 0, 0,
> + 65, 111, 110, 57, 100, 0, 0, 0, 100, 0, 0, 0, 0, 2, 255, 255, 60, 0, 0, 0, 40, 0, 0, 0,
> + 0, 0, 40, 0, 0, 0, 40, 0, 0, 0, 40, 0, 1, 0, 36, 0, 0, 0, 40, 0, 0, 0, 0, 0,
> + 0, 2, 255, 255, 31, 0, 0, 2, 0, 0, 0, 128, 0, 0, 3, 176, 31, 0, 0, 2, 0, 0, 0, 144,
> + 0, 8, 15, 160, 66, 0, 0, 3, 0, 0, 15, 128, 0, 0, 228, 176, 0, 8, 228, 160, 1, 0, 0, 2,
> + 0, 8, 15, 128, 0, 0, 228, 128, 255, 255, 0, 0, 83, 72, 68, 82, 100, 0, 0, 0, 64, 0, 0, 0,
> + 25, 0, 0, 0, 90, 0, 0, 3, 0, 96, 16, 0, 0, 0, 0, 0, 88, 24, 0, 4, 0, 112, 16, 0,
> + 0, 0, 0, 0, 85, 85, 0, 0, 98, 16, 0, 3, 50, 16, 16, 0, 1, 0, 0, 0, 101, 0, 0, 3,
> + 242, 32, 16, 0, 0, 0, 0, 0, 69, 0, 0, 9, 242, 32, 16, 0, 0, 0, 0, 0, 70, 16, 16, 0,
> + 1, 0, 0, 0, 70, 126, 16, 0, 0, 0, 0, 0, 0, 96, 16, 0, 0, 0, 0, 0, 62, 0, 0, 1,
> + 73, 83, 71, 78, 80, 0, 0, 0, 2, 0, 0, 0, 8, 0, 0, 0, 56, 0, 0, 0, 0, 0, 0, 0,
> + 1, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 68, 0, 0, 0, 0, 0, 0, 0,
> + 0, 0, 0, 0, 3, 0, 0, 0, 1, 0, 0, 0, 3, 3, 0, 0, 83, 86, 95, 80, 79, 83, 73, 84,
> + 73, 79, 78, 0, 84, 69, 88, 67, 79, 79, 82, 68, 0, 171, 171, 171, 79, 83, 71, 78, 44, 0, 0, 0,
> + 1, 0, 0, 0, 8, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0,
> + 0, 0, 0, 0, 15, 0, 0, 0, 83, 86, 95, 84, 97, 114, 103, 101, 116, 0, 171, 171
> +};
> +
> +#endif
More information about the ffmpeg-devel
mailing list