[FFmpeg-devel] [PATCH] Moves yuv2yuvX_sse3 to yasm, unrolls main loop and other small optimizations for ~20% speedup.
Alan Kelly
alankelly at google.com
Tue Dec 1 10:33:32 EET 2020
Ping
On Thu, Nov 19, 2020 at 9:42 AM Alan Kelly <alankelly at google.com> wrote:
> ---
> All of Henrik's suggestions have been implemented. Additionally,
> m3 and m6 are permuted in avx2 before storing to ensure bit by bit
> identical results in avx2.
> libswscale/x86/Makefile | 1 +
> libswscale/x86/swscale.c | 75 +++--------------------
> libswscale/x86/yuv2yuvX.asm | 118 ++++++++++++++++++++++++++++++++++++
> 3 files changed, 129 insertions(+), 65 deletions(-)
> create mode 100644 libswscale/x86/yuv2yuvX.asm
>
> diff --git a/libswscale/x86/Makefile b/libswscale/x86/Makefile
> index 831d5359aa..bfe383364e 100644
> --- a/libswscale/x86/Makefile
> +++ b/libswscale/x86/Makefile
> @@ -13,3 +13,4 @@ X86ASM-OBJS += x86/input.o
> \
> x86/scale.o \
> x86/rgb_2_rgb.o \
> x86/yuv_2_rgb.o \
> + x86/yuv2yuvX.o \
> diff --git a/libswscale/x86/swscale.c b/libswscale/x86/swscale.c
> index 3160fedf04..758c8e540f 100644
> --- a/libswscale/x86/swscale.c
> +++ b/libswscale/x86/swscale.c
> @@ -197,80 +197,25 @@ void ff_updateMMXDitherTables(SwsContext *c, int
> dstY)
> }
>
> #if HAVE_MMXEXT
> +void ff_yuv2yuvX_sse3(const int16_t *filter, long filterSize,
> + uint8_t *dest, int dstW,
> + const uint8_t *dither, int offset);
> +
> static void yuv2yuvX_sse3(const int16_t *filter, int filterSize,
> const int16_t **src, uint8_t *dest, int dstW,
> const uint8_t *dither, int offset)
> {
> + int remainder = (dstW % 32);
> + int pixelsProcessed = dstW - remainder;
> if(((uintptr_t)dest) & 15){
> yuv2yuvX_mmxext(filter, filterSize, src, dest, dstW, dither,
> offset);
> return;
> }
> - filterSize--;
> -#define MAIN_FUNCTION \
> - "pxor %%xmm0, %%xmm0 \n\t" \
> - "punpcklbw %%xmm0, %%xmm3 \n\t" \
> - "movd %4, %%xmm1 \n\t" \
> - "punpcklwd %%xmm1, %%xmm1 \n\t" \
> - "punpckldq %%xmm1, %%xmm1 \n\t" \
> - "punpcklqdq %%xmm1, %%xmm1 \n\t" \
> - "psllw $3, %%xmm1 \n\t" \
> - "paddw %%xmm1, %%xmm3 \n\t" \
> - "psraw $4, %%xmm3 \n\t" \
> - "movdqa %%xmm3, %%xmm4 \n\t" \
> - "movdqa %%xmm3, %%xmm7 \n\t" \
> - "movl %3, %%ecx \n\t" \
> - "mov %0, %%"FF_REG_d"
> \n\t"\
> - "mov (%%"FF_REG_d"), %%"FF_REG_S"
> \n\t"\
> - ".p2align 4 \n\t" /*
> FIXME Unroll? */\
> - "1: \n\t"\
> - "movddup 8(%%"FF_REG_d"), %%xmm0 \n\t" /*
> filterCoeff */\
> - "movdqa (%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm2
> \n\t" /* srcData */\
> - "movdqa 16(%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm5
> \n\t" /* srcData */\
> - "add $16, %%"FF_REG_d"
> \n\t"\
> - "mov (%%"FF_REG_d"), %%"FF_REG_S"
> \n\t"\
> - "test %%"FF_REG_S", %%"FF_REG_S"
> \n\t"\
> - "pmulhw %%xmm0, %%xmm2 \n\t"\
> - "pmulhw %%xmm0, %%xmm5 \n\t"\
> - "paddw %%xmm2, %%xmm3 \n\t"\
> - "paddw %%xmm5, %%xmm4 \n\t"\
> - " jnz 1b \n\t"\
> - "psraw $3, %%xmm3 \n\t"\
> - "psraw $3, %%xmm4 \n\t"\
> - "packuswb %%xmm4, %%xmm3 \n\t"\
> - "movntdq %%xmm3, (%1, %%"FF_REG_c")
> \n\t"\
> - "add $16, %%"FF_REG_c" \n\t"\
> - "cmp %2, %%"FF_REG_c" \n\t"\
> - "movdqa %%xmm7, %%xmm3 \n\t" \
> - "movdqa %%xmm7, %%xmm4 \n\t" \
> - "mov %0, %%"FF_REG_d"
> \n\t"\
> - "mov (%%"FF_REG_d"), %%"FF_REG_S"
> \n\t"\
> - "jb 1b \n\t"
> -
> - if (offset) {
> - __asm__ volatile(
> - "movq %5, %%xmm3 \n\t"
> - "movdqa %%xmm3, %%xmm4 \n\t"
> - "psrlq $24, %%xmm3 \n\t"
> - "psllq $40, %%xmm4 \n\t"
> - "por %%xmm4, %%xmm3 \n\t"
> - MAIN_FUNCTION
> - :: "g" (filter),
> - "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m"
> (offset),
> - "m"(filterSize), "m"(((uint64_t *) dither)[0])
> - : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" ,
> "%xmm4" , "%xmm5" , "%xmm7" ,)
> - "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c
> - );
> - } else {
> - __asm__ volatile(
> - "movq %5, %%xmm3 \n\t"
> - MAIN_FUNCTION
> - :: "g" (filter),
> - "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m"
> (offset),
> - "m"(filterSize), "m"(((uint64_t *) dither)[0])
> - : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" ,
> "%xmm4" , "%xmm5" , "%xmm7" ,)
> - "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c
> - );
> + ff_yuv2yuvX_sse3(filter, filterSize - 1, dest - offset,
> pixelsProcessed + offset, dither, offset);
> + if(remainder > 0){
> + yuv2yuvX_mmxext(filter, filterSize, src, dest + pixelsProcessed,
> remainder, dither, offset + pixelsProcessed);
> }
> + return;
> }
> #endif
>
> diff --git a/libswscale/x86/yuv2yuvX.asm b/libswscale/x86/yuv2yuvX.asm
> new file mode 100644
> index 0000000000..ca5095cf8c
> --- /dev/null
> +++ b/libswscale/x86/yuv2yuvX.asm
> @@ -0,0 +1,118 @@
>
> +;******************************************************************************
> +;* x86-optimized yuv2yuvX
> +;* Copyright 2020 Google LLC
> +;* Copyright (C) 2001-2011 Michael Niedermayer <michaelni at gmx.at>
> +;*
> +;* This file is part of FFmpeg.
> +;*
> +;* FFmpeg is free software; you can redistribute it and/or
> +;* modify it under the terms of the GNU Lesser General Public
> +;* License as published by the Free Software Foundation; either
> +;* version 2.1 of the License, or (at your option) any later version.
> +;*
> +;* FFmpeg is distributed in the hope that it will be useful,
> +;* but WITHOUT ANY WARRANTY; without even the implied warranty of
> +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
> +;* Lesser General Public License for more details.
> +;*
> +;* You should have received a copy of the GNU Lesser General Public
> +;* License along with FFmpeg; if not, write to the Free Software
> +;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
> 02110-1301 USA
>
> +;******************************************************************************
> +
> +%include "libavutil/x86/x86util.asm"
> +
> +SECTION .text
> +
>
> +;-----------------------------------------------------------------------------
> +; yuv2yuvX
> +;
> +; void ff_yuv2yuvX_<opt>(const int16_t *filter, int filterSize,
> +; uint8_t *dest, int dstW,
> +; const uint8_t *dither, int offset);
> +;
>
> +;-----------------------------------------------------------------------------
> +
> +%macro YUV2YUVX_FUNC 0
> +cglobal yuv2yuvX, 6, 7, 8, filter, filterSize, dest, dstW, dither,
> offset, src
> +%if ARCH_X86_64
> + movsxd dstWq, dstWd
> + movsxd offsetq, offsetd
> +%endif ; x86-64
> + movddup m0, [filterq + gprsize]
> +%if cpuflag(avx2)
> + vpbroadcastq m3, [ditherq]
> +%else
> + movq xmm3, [ditherq]
> +%endif ; avx2
> + cmp offsetd, 0
> + jz .offset
> +
> + ; offset != 0 path.
> + psrlq m5, m3, $18
> + psllq m3, m3, $28
> + por m3, m3, m5
> +
> +.offset:
> + movd xmm1, filterSized
> +%if cpuflag(avx2)
> + vpbroadcastw m1, xmm1
> +%else
> + pshuflw m1, m1, q0000
> + punpcklqdq m1, m1
> +%endif ; avx2
> + pxor m0, m0, m0
> + mov filterSizeq, filterq
> + mov srcq, [filterSizeq]
> + punpcklbw m3, m0
> + psllw m1, m1, 3
> + paddw m3, m3, m1
> + psraw m7, m3, 4
> +.outerloop:
> + mova m4, m7
> + mova m3, m7
> + mova m6, m7
> + mova m1, m7
> +.loop:
> +%if cpuflag(avx2)
> + vpbroadcastq m0, [filterSizeq + gprsize]
> +%else
> + movddup m0, [filterSizeq + gprsize]
> +%endif
> + pmulhw m2, m0, [srcq + offsetq * 2]
> + pmulhw m5, m0, [srcq + offsetq * 2 + mmsize]
> + paddw m3, m3, m2
> + paddw m4, m4, m5
> + pmulhw m2, m0, [srcq + offsetq * 2 + 2 * mmsize]
> + pmulhw m5, m0, [srcq + offsetq * 2 + 3 * mmsize]
> + paddw m6, m6, m2
> + paddw m1, m1, m5
> + add filterSizeq, 2 * gprsize
> + mov srcq, [filterSizeq]
> + test srcq, srcq
> + jnz .loop
> + psraw m3, m3, 3
> + psraw m4, m4, 3
> + psraw m6, m6, 3
> + psraw m1, m1, 3
> + packuswb m3, m3, m4
> + packuswb m6, m6, m1
> + mov srcq, [filterq]
> +%if cpuflag(avx2)
> + vpermq m3, m3, 216
> + vpermq m6, m6, 216
> +%endif
> + movntdq [destq + offsetq], m3
> + movntdq [destq + offsetq + mmsize], m6
> + add offsetq, mmsize * 2
> + mov filterSizeq, filterq
> + cmp offsetq, dstWq
> + jb .outerloop
> + sfence
> + RET
> +%endmacro
> +
> +INIT_XMM sse3
> +YUV2YUVX_FUNC
> +INIT_YMM avx2
> +YUV2YUVX_FUNC
> --
> 2.29.2.299.gdc1121823c-goog
>
>
More information about the ffmpeg-devel
mailing list