[FFmpeg-devel] [PATCH] Moves yuv2yuvX_sse3 to yasm, unrolls main loop and other small optimizations for ~20% speedup. AVX2 version is ready and tested, however, although local tests show a significant speed-up in this function using avx2 swscale code overall slows down probably due cpu frequency scaling.
Alan Kelly
alankelly at google.com
Fri Oct 23 16:17:16 EEST 2020
Fixed. The wrong step size was used causing a write passed the end of
the buffer. yuv2yuvX_mmxext is now called if there are any remaining pixels.
---
libswscale/x86/Makefile | 1 +
libswscale/x86/swscale.c | 75 ++++----------------------
libswscale/x86/yuv2yuvX.asm | 105 ++++++++++++++++++++++++++++++++++++
3 files changed, 116 insertions(+), 65 deletions(-)
create mode 100644 libswscale/x86/yuv2yuvX.asm
diff --git a/libswscale/x86/Makefile b/libswscale/x86/Makefile
index 831d5359aa..bfe383364e 100644
--- a/libswscale/x86/Makefile
+++ b/libswscale/x86/Makefile
@@ -13,3 +13,4 @@ X86ASM-OBJS += x86/input.o \
x86/scale.o \
x86/rgb_2_rgb.o \
x86/yuv_2_rgb.o \
+ x86/yuv2yuvX.o \
diff --git a/libswscale/x86/swscale.c b/libswscale/x86/swscale.c
index 3160fedf04..fec9fa22e0 100644
--- a/libswscale/x86/swscale.c
+++ b/libswscale/x86/swscale.c
@@ -197,80 +197,25 @@ void ff_updateMMXDitherTables(SwsContext *c, int dstY)
}
#if HAVE_MMXEXT
+void ff_yuv2yuvX_sse3(const int16_t *filter, int filterSize,
+ uint8_t *dest, int dstW,
+ const uint8_t *dither, int offset);
+
static void yuv2yuvX_sse3(const int16_t *filter, int filterSize,
const int16_t **src, uint8_t *dest, int dstW,
const uint8_t *dither, int offset)
{
+ int remainder = (dstW % 32);
+ int pixelsProcessed = dstW - remainder;
if(((uintptr_t)dest) & 15){
yuv2yuvX_mmxext(filter, filterSize, src, dest, dstW, dither, offset);
return;
}
- filterSize--;
-#define MAIN_FUNCTION \
- "pxor %%xmm0, %%xmm0 \n\t" \
- "punpcklbw %%xmm0, %%xmm3 \n\t" \
- "movd %4, %%xmm1 \n\t" \
- "punpcklwd %%xmm1, %%xmm1 \n\t" \
- "punpckldq %%xmm1, %%xmm1 \n\t" \
- "punpcklqdq %%xmm1, %%xmm1 \n\t" \
- "psllw $3, %%xmm1 \n\t" \
- "paddw %%xmm1, %%xmm3 \n\t" \
- "psraw $4, %%xmm3 \n\t" \
- "movdqa %%xmm3, %%xmm4 \n\t" \
- "movdqa %%xmm3, %%xmm7 \n\t" \
- "movl %3, %%ecx \n\t" \
- "mov %0, %%"FF_REG_d" \n\t"\
- "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
- ".p2align 4 \n\t" /* FIXME Unroll? */\
- "1: \n\t"\
- "movddup 8(%%"FF_REG_d"), %%xmm0 \n\t" /* filterCoeff */\
- "movdqa (%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm2 \n\t" /* srcData */\
- "movdqa 16(%%"FF_REG_S", %%"FF_REG_c", 2), %%xmm5 \n\t" /* srcData */\
- "add $16, %%"FF_REG_d" \n\t"\
- "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
- "test %%"FF_REG_S", %%"FF_REG_S" \n\t"\
- "pmulhw %%xmm0, %%xmm2 \n\t"\
- "pmulhw %%xmm0, %%xmm5 \n\t"\
- "paddw %%xmm2, %%xmm3 \n\t"\
- "paddw %%xmm5, %%xmm4 \n\t"\
- " jnz 1b \n\t"\
- "psraw $3, %%xmm3 \n\t"\
- "psraw $3, %%xmm4 \n\t"\
- "packuswb %%xmm4, %%xmm3 \n\t"\
- "movntdq %%xmm3, (%1, %%"FF_REG_c") \n\t"\
- "add $16, %%"FF_REG_c" \n\t"\
- "cmp %2, %%"FF_REG_c" \n\t"\
- "movdqa %%xmm7, %%xmm3 \n\t" \
- "movdqa %%xmm7, %%xmm4 \n\t" \
- "mov %0, %%"FF_REG_d" \n\t"\
- "mov (%%"FF_REG_d"), %%"FF_REG_S" \n\t"\
- "jb 1b \n\t"
-
- if (offset) {
- __asm__ volatile(
- "movq %5, %%xmm3 \n\t"
- "movdqa %%xmm3, %%xmm4 \n\t"
- "psrlq $24, %%xmm3 \n\t"
- "psllq $40, %%xmm4 \n\t"
- "por %%xmm4, %%xmm3 \n\t"
- MAIN_FUNCTION
- :: "g" (filter),
- "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" (offset),
- "m"(filterSize), "m"(((uint64_t *) dither)[0])
- : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , "%xmm4" , "%xmm5" , "%xmm7" ,)
- "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c
- );
- } else {
- __asm__ volatile(
- "movq %5, %%xmm3 \n\t"
- MAIN_FUNCTION
- :: "g" (filter),
- "r" (dest-offset), "g" ((x86_reg)(dstW+offset)), "m" (offset),
- "m"(filterSize), "m"(((uint64_t *) dither)[0])
- : XMM_CLOBBERS("%xmm0" , "%xmm1" , "%xmm2" , "%xmm3" , "%xmm4" , "%xmm5" , "%xmm7" ,)
- "%"FF_REG_d, "%"FF_REG_S, "%"FF_REG_c
- );
+ ff_yuv2yuvX_sse3(filter, filterSize - 1, dest - offset, pixelsProcessed + offset, dither, offset);
+ if(remainder > 0){
+ yuv2yuvX_mmxext(filter, filterSize, src, dest + pixelsProcessed, remainder, dither, offset + pixelsProcessed);
}
+ return;
}
#endif
diff --git a/libswscale/x86/yuv2yuvX.asm b/libswscale/x86/yuv2yuvX.asm
new file mode 100644
index 0000000000..84727de599
--- /dev/null
+++ b/libswscale/x86/yuv2yuvX.asm
@@ -0,0 +1,105 @@
+;******************************************************************************
+;* x86-optimized yuv2yuvX
+;* Copyright 2020 Google LLC
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+SECTION .text
+
+;-----------------------------------------------------------------------------
+; yuv2yuvX
+;
+; void ff_yuv2yuvX_<opt>(const int16_t *filter, int filterSize,
+; uint8_t *dest, int dstW,
+; const uint8_t *dither, int offset);
+;
+;-----------------------------------------------------------------------------
+
+%macro YUV2YUVX_FUNC 0
+cglobal yuv2yuvX, 6, 7, 16, filter, rsi, dest, dstW, dither, offset, src
+%if ARCH_X86_64
+ movsxd dstWq, dstWd
+ movsxd offsetq, offsetd
+%endif ; x86-64
+ movq xmm3, [ditherq]
+ cmp offsetd, 0
+ jz .offset
+
+ ; offset != 0 path.
+ psrlq m5, m3, $18
+ psllq m3, m3, $28
+ por m3, m3, m5
+
+.offset:
+%if cpuflag(avx2)
+ vperm2i128 m3, m3, m3, 0
+%endif ; avx2
+%if ARCH_X86_64
+ movq xmm1, rsiq
+%else
+ movd mm1, rsi
+%endif
+ vpbroadcastw m1, xmm1
+ pxor m0, m0, m0
+ mov rsiq, filterq
+ mov srcq, [rsiq]
+ punpcklbw m3, m0
+ psllw m1, m1, 3
+ paddw m3, m3, m1
+ psraw m7, m3, 4
+.outerloop:
+ mova m4, m7
+ mova m3, m7
+ mova m6, m7
+ mova m1, m7
+.loop:
+ vpbroadcastq m0, [rsiq + 8]
+ pmulhw m2, m0, [srcq + offsetq * 2]
+ pmulhw m5, m0, [srcq + offsetq * 2 + mmsize]
+ paddw m3, m3, m2
+ paddw m4, m4, m5
+ pmulhw m2, m0, [srcq + offsetq * 2 + 2 * mmsize]
+ pmulhw m5, m0, [srcq + offsetq * 2 + 3 * mmsize]
+ paddw m6, m6, m2
+ paddw m1, m1, m5
+ add rsiq, $10
+ mov srcq, [rsiq]
+ test srcd, srcd
+ jnz .loop
+ psraw m3, m3, 3
+ psraw m4, m4, 3
+ psraw m6, m6, 3
+ psraw m1, m1, 3
+ packuswb m3, m3, m4
+ packuswb m6, m6, m1
+ mov srcq, [filterq]
+ movntdq [destq + offsetq], m3
+ movntdq [destq + offsetq + mmsize], m6
+ add offsetq, mmsize * 2
+ mov rsiq, filterq
+ cmp offsetq, dstWq
+ jb .outerloop
+ REP_RET
+%endmacro
+
+INIT_XMM sse3
+YUV2YUVX_FUNC
+INIT_YMM avx2
+YUV2YUVX_FUNC
--
2.29.0.rc1.297.gfa9743e501-goog
More information about the ffmpeg-devel
mailing list