[FFmpeg-devel] [PATCH v2 5/6] lavc/apv: AVX2 transquant for x86-64

James Almer jamrial at gmail.com
Tue Apr 22 23:00:40 EEST 2025


On 4/21/2025 4:50 PM, Mark Thompson wrote:
> On 21/04/2025 17:53, James Almer wrote:
>> On 4/21/2025 12:24 PM, Mark Thompson wrote:
>>> Typical checkasm result on Alder Lake:
>>>
>>> decode_transquant_8_c:                                 461.1 ( 1.00x)
>>> decode_transquant_8_avx2:                               97.5 ( 4.73x)
>>> decode_transquant_10_c:                                483.9 ( 1.00x)
>>> decode_transquant_10_avx2:                              91.7 ( 5.28x)
>>> ---
>>>    libavcodec/apv_dsp.c          |   4 +
>>>    libavcodec/apv_dsp.h          |   2 +
>>>    libavcodec/x86/Makefile       |   2 +
>>>    libavcodec/x86/apv_dsp.asm    | 279 ++++++++++++++++++++++++++++++++++
>>>    libavcodec/x86/apv_dsp_init.c |  40 +++++
>>>    tests/checkasm/Makefile       |   1 +
>>>    tests/checkasm/apv_dsp.c      | 109 +++++++++++++
>>>    tests/checkasm/checkasm.c     |   3 +
>>>    tests/checkasm/checkasm.h     |   1 +
>>>    tests/fate/checkasm.mak       |   1 +
>>>    10 files changed, 442 insertions(+)
>>>    create mode 100644 libavcodec/x86/apv_dsp.asm
>>>    create mode 100644 libavcodec/x86/apv_dsp_init.c
>>>    create mode 100644 tests/checkasm/apv_dsp.c
>>>
>>> ...
>>> diff --git a/libavcodec/x86/apv_dsp.asm b/libavcodec/x86/apv_dsp.asm
>>> new file mode 100644
>>> index 0000000000..6b045e989a
>>> --- /dev/null
>>> +++ b/libavcodec/x86/apv_dsp.asm
>>> @@ -0,0 +1,279 @@
>>> +;************************************************************************
>>> +;* This file is part of FFmpeg.
>>> +;*
>>> +;* FFmpeg is free software; you can redistribute it and/or
>>> +;* modify it under the terms of the GNU Lesser General Public
>>> +;* License as published by the Free Software Foundation; either
>>> +;* version 2.1 of the License, or (at your option) any later version.
>>> +;*
>>> +;* FFmpeg is distributed in the hope that it will be useful,
>>> +;* but WITHOUT ANY WARRANTY; without even the implied warranty of
>>> +;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
>>> +;* Lesser General Public License for more details.
>>> +;*
>>> +;* You should have received a copy of the GNU Lesser General Public
>>> +;* License along with FFmpeg; if not, write to the Free Software
>>> +;* 51, Inc., Foundation Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
>>> +;******************************************************************************
>>> +
>>> +%include "libavutil/x86/x86util.asm"
>>> +
>>> +SECTION_RODATA 32
>>> +
>>> +; Full matrix for row transform.
>>> +const tmatrix_row
>>> +    dw  64,  89,  84,  75,  64,  50,  35,  18
>>> +    dw  64, -18, -84,  50,  64, -75, -35,  89
>>> +    dw  64,  75,  35, -18, -64, -89, -84, -50
>>> +    dw  64, -50, -35,  89, -64, -18,  84, -75
>>> +    dw  64,  50, -35, -89, -64,  18,  84,  75
>>> +    dw  64, -75,  35,  18, -64,  89, -84,  50
>>> +    dw  64,  18, -84, -50,  64,  75, -35, -89
>>> +    dw  64, -89,  84, -75,  64, -50,  35, -18
>>> +
>>> +; Constant pairs for broadcast in column transform.
>>> +const tmatrix_col_even
>>> +    dw  64,  64,  64, -64
>>> +    dw  84,  35,  35, -84
>>> +const tmatrix_col_odd
>>> +    dw  89,  75,  50,  18
>>> +    dw  75, -18, -89, -50
>>> +    dw  50, -89,  18,  75
>>> +    dw  18, -50,  75, -89
>>> +
>>> +; Memory targets for vpbroadcastd (register version requires AVX512).
>>> +cextern pd_1
>>> +const sixtyfour
>>> +    dd  64
>>> +
>>> +SECTION .text
>>> +
>>> +; void ff_apv_decode_transquant_avx2(void *output,
>>> +;                                    ptrdiff_t pitch,
>>> +;                                    const int16_t *input,
>>> +;                                    const int16_t *qmatrix,
>>> +;                                    int bit_depth,
>>> +;                                    int qp_shift);
>>> +
>>> +INIT_YMM avx2
>>> +
>>> +cglobal apv_decode_transquant, 6, 7, 16, output, pitch, input, qmatrix, bit_depth, qp_shift, tmp
>>> +
>>> +    ; Load input and dequantise
>>> +
>>> +    vpbroadcastd  m10, [pd_1]
>>> +    lea       tmpq, [bit_depthq - 2]
>>
>> lea       tmpd, [bit_depthd - 2]
>>
>> The upper 32 bits of the register may have garbage.
> 
> Ah, I was assuming that lea had to be pointer-sized, but apparently it doesn't.  Changed.
> 
>>> +    movd      xm8, qp_shiftd
>>
>> If you declare the function as 5, 7, 16, then qp_shift will not be loaded into a gpr on ABIs where it's on stack (Win64, and x86_32 if it was supported), and then you can do
>>
>>      movd      xm8, qp_shiftm
>>
>> Which will load it directly to the simd register from memory, saving one instruction in the prologue.
> 
> This seems like highly dubious magic since it is lying about the number of arguments.

You're not lying. That value is to tell x86inc to load x arguments onto 
gprs in the prologue if they are in stack. If they are not (As is the 
case with the first six arguments on Unix64, first four on Win64), 
qp_shiftm will be equivalent of qp_shiftd, and if they are, it will be 
the stack memory address.

So with my suggestion, on Win64 you get

movd xmm8, [rsp]; qp_shiftm points to memory

instead of

mov r11, [rsp] ; prologue loads argument into what will be qp_shiftd
movd xmm8, r11d ; qp_shiftd is an alias of r11d

It's ricing, yes, but it's free.

> 
> I've changed it, but I want to check a Windows machine as well.
> 
>>> +    movd      xm9, tmpd
>>> +    vpslld    m10, m10, xm9
>>> +    vpsrld    m10, m10, 1
>>> +
>>> +    ; m8  = scalar qp_shift
>>> +    ; m9  = scalar bd_shift
>>> +    ; m10 = vector 1 << (bd_shift - 1)
>>> +    ; m11 = qmatrix load
>>> +
>>> +%macro LOAD_AND_DEQUANT 2 ; (xmm input, constant offset)
>>> +    vpmovsxwd m%1, [inputq   + %2]
>>> +    vpmovsxwd m11, [qmatrixq + %2]
>>> +    vpmaddwd  m%1, m%1, m11
>>> +    vpslld    m%1, m%1, xm8
>>> +    vpaddd    m%1, m%1, m10
>>> +    vpsrad    m%1, m%1, xm9
>>> +    vpackssdw m%1, m%1, m%1
>>> +%endmacro
>>> +
>>> +    LOAD_AND_DEQUANT 0, 0x00
>>> +    LOAD_AND_DEQUANT 1, 0x10
>>> +    LOAD_AND_DEQUANT 2, 0x20
>>> +    LOAD_AND_DEQUANT 3, 0x30
>>> +    LOAD_AND_DEQUANT 4, 0x40
>>> +    LOAD_AND_DEQUANT 5, 0x50
>>> +    LOAD_AND_DEQUANT 6, 0x60
>>> +    LOAD_AND_DEQUANT 7, 0x70
>>> +
>>> +    ; mN = row N words 0 1 2 3 0 1 2 3 4 5 6 7 4 5 6 7
>>> +
>>> +    ; Transform columns
>>> +    ; This applies a 1-D DCT butterfly
>>> +
>>> +    vpunpcklwd  m12, m0,  m4
>>> +    vpunpcklwd  m13, m2,  m6
>>> +    vpunpcklwd  m14, m1,  m3
>>> +    vpunpcklwd  m15, m5,  m7
>>> +
>>> +    ; m12 = rows 0 and 4 interleaved
>>> +    ; m13 = rows 2 and 6 interleaved
>>> +    ; m14 = rows 1 and 3 interleaved
>>> +    ; m15 = rows 5 and 7 interleaved
>>> +
>>> +    vpbroadcastd   m0, [tmatrix_col_even + 0x00]
>>> +    vpbroadcastd   m1, [tmatrix_col_even + 0x04]
>>> +    vpbroadcastd   m2, [tmatrix_col_even + 0x08]
>>> +    vpbroadcastd   m3, [tmatrix_col_even + 0x0c]
>>
>> Maybe do
>>
>> lea tmpq, [tmatrix_col_even]
>> vpbroadcastd   m0, [tmpq + 0x00]
>> vpbroadcastd   m1, [tmpq + 0x04]
>> ...
>>
>> To emit smaller instructions. Same for tmatrix_col_odd and tmatrix_row below.
> 
>   150:   48 8d 05 00 00 00 00    lea    0x0(%rip),%rax        # 157 <ff_apv_decode_transquant_avx2+0x157>
>   157:   c4 e2 7d 58 00          vpbroadcastd (%rax),%ymm0
>   15c:   c4 e2 7d 58 48 04       vpbroadcastd 0x4(%rax),%ymm1
>   162:   c4 e2 7d 58 50 08       vpbroadcastd 0x8(%rax),%ymm2
>   168:   c4 e2 7d 58 58 0c       vpbroadcastd 0xc(%rax),%ymm3
> 
>   18e:   c4 e2 7d 58 05 00 00    vpbroadcastd 0x0(%rip),%ymm0        # 197 <ff_apv_decode_transquant_avx2+0x197>
>   195:   00 00
>   197:   c4 e2 7d 58 0d 00 00    vpbroadcastd 0x0(%rip),%ymm1        # 1a0 <ff_apv_decode_transquant_avx2+0x1a0>
>   19e:   00 00
>   1a0:   c4 e2 7d 58 15 00 00    vpbroadcastd 0x0(%rip),%ymm2        # 1a9 <ff_apv_decode_transquant_avx2+0x1a9>
>   1a7:   00 00
>   1a9:   c4 e2 7d 58 1d 00 00    vpbroadcastd 0x0(%rip),%ymm3        # 1b2 <ff_apv_decode_transquant_avx2+0x1b2>
>   1b0:   00 00
> 
> Saves 6 bytes, but there is now a dependency which wasn't there before.  Is it really better?

You could do the lea several instructions earlier, so the dependency 
wouldn't matter, but unless you can measure a difference in speed, then 
maybe don't bother.

> 
>>> +
>>> +    vpmaddwd  m4,  m12, m0
>>> +    vpmaddwd  m5,  m12, m1
>>> +    vpmaddwd  m6,  m13, m2
>>> +    vpmaddwd  m7,  m13, m3
>>> +    vpaddd    m8,  m4,  m6
>>> +    vpaddd    m9,  m5,  m7
>>> +    vpsubd    m10, m5,  m7
>>> +    vpsubd    m11, m4,  m6
>>> +
>>> +    vpbroadcastd   m0, [tmatrix_col_odd + 0x00]
>>> +    vpbroadcastd   m1, [tmatrix_col_odd + 0x04]
>>> +    vpbroadcastd   m2, [tmatrix_col_odd + 0x08]
>>> +    vpbroadcastd   m3, [tmatrix_col_odd + 0x0c]
>>> +
>>> +    vpmaddwd  m4,  m14, m0
>>> +    vpmaddwd  m5,  m15, m1
>>> +    vpmaddwd  m6,  m14, m2
>>> +    vpmaddwd  m7,  m15, m3
>>> +    vpaddd    m12, m4,  m5
>>> +    vpaddd    m13, m6,  m7
>>> +
>>> +    vpbroadcastd   m0, [tmatrix_col_odd + 0x10]
>>> +    vpbroadcastd   m1, [tmatrix_col_odd + 0x14]
>>> +    vpbroadcastd   m2, [tmatrix_col_odd + 0x18]
>>> +    vpbroadcastd   m3, [tmatrix_col_odd + 0x1c]
>>> +
>>> +    vpmaddwd  m4,  m14, m0
>>> +    vpmaddwd  m5,  m15, m1
>>> +    vpmaddwd  m6,  m14, m2
>>> +    vpmaddwd  m7,  m15, m3
>>> +    vpaddd    m14, m4,  m5
>>> +    vpaddd    m15, m6,  m7
>>> +
>>> +    vpaddd    m0,  m8,  m12
>>> +    vpaddd    m1,  m9,  m13
>>> +    vpaddd    m2,  m10, m14
>>> +    vpaddd    m3,  m11, m15
>>> +    vpsubd    m4,  m11, m15
>>> +    vpsubd    m5,  m10, m14
>>> +    vpsubd    m6,  m9,  m13
>>> +    vpsubd    m7,  m8,  m12
>>> +
>>> +    ; Mid-transform normalisation
>>> +    ; Note that outputs here are fitted to 16 bits
>>> +
>>> +    vpbroadcastd  m8, [sixtyfour]
>>> +
>>> +%macro NORMALISE 1
>>> +    vpaddd    m%1, m%1, m8
>>> +    vpsrad    m%1, m%1, 7
>>> +    vpackssdw m%1, m%1, m%1
>>> +    vpermq    m%1, m%1, q3120
>>> +%endmacro
>>> +
>>> +    NORMALISE 0
>>> +    NORMALISE 1
>>> +    NORMALISE 2
>>> +    NORMALISE 3
>>> +    NORMALISE 4
>>> +    NORMALISE 5
>>> +    NORMALISE 6
>>> +    NORMALISE 7
>>> +
>>> +    ; mN = row N words 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
>>> +
>>> +    ; Transform rows
>>> +    ; This multiplies the rows directly by the transform matrix,
>>> +    ; avoiding the need to transpose anything
>>> +
>>> +    mova      m12, [tmatrix_row + 0x00]
>>> +    mova      m13, [tmatrix_row + 0x20]
>>> +    mova      m14, [tmatrix_row + 0x40]
>>> +    mova      m15, [tmatrix_row + 0x60]
>>> +
>>> +%macro TRANS_ROW_STEP 1
>>> +    vpmaddwd  m8,  m%1, m12
>>> +    vpmaddwd  m9,  m%1, m13
>>> +    vpmaddwd  m10, m%1, m14
>>> +    vpmaddwd  m11, m%1, m15
>>> +    vphaddd   m8,  m8,  m9
>>> +    vphaddd   m10, m10, m11
>>> +    vphaddd   m%1, m8,  m10
>>> +%endmacro
>>> +
>>> +    TRANS_ROW_STEP 0
>>> +    TRANS_ROW_STEP 1
>>> +    TRANS_ROW_STEP 2
>>> +    TRANS_ROW_STEP 3
>>> +    TRANS_ROW_STEP 4
>>> +    TRANS_ROW_STEP 5
>>> +    TRANS_ROW_STEP 6
>>> +    TRANS_ROW_STEP 7
>>> +
>>> +    ; Renormalise, clip and store output
>>> +
>>> +    vpbroadcastd  m14, [pd_1]
>>> +    mov       tmpd, 20
>>> +    sub       tmpd, bit_depthd
>>> +    movd      xm9, tmpd
>>> +    dec       tmpd
>>> +    movd      xm13, tmpd
>>> +    movd      xm15, bit_depthd
>>> +    vpslld    m8,  m14, xm13
>>> +    vpslld    m12, m14, xm15
>>> +    vpsrld    m10, m12, 1
>>> +    vpsubd    m12, m12, m14
>>> +    vpxor     m11, m11, m11
>>> +
>>> +    ; m8  = vector 1 << (bd_shift - 1)
>>> +    ; m9  = scalar bd_shift
>>> +    ; m10 = vector 1 << (bit_depth - 1)
>>> +    ; m11 = zero
>>> +    ; m12 = vector (1 << bit_depth) - 1
>>> +
>>> +    cmp       bit_depthd, 8
>>> +    jne       store_10
>>> +
>>> +%macro NORMALISE_AND_STORE_8 1
>>> +    vpaddd    m%1, m%1, m8
>>> +    vpsrad    m%1, m%1, xm9
>>> +    vpaddd    m%1, m%1, m10
>>> +    vextracti128  xm13, m%1, 0
>>> +    vextracti128  xm14, m%1, 1
>>> +    vpackusdw xm%1, xm13, xm14
>>> +    vpackuswb xm%1, xm%1, xm%1
>>
>>      vpaddd    m%1, m%1, m10
>>      vextracti128  xm14, m%1, 1
>>      vpackusdw xm%1, xm%1, xm14
>>      vpackuswb xm%1, xm%1, xm%1
>>
>> vextracti128 with 0 as third argument is the same as a mova for the lower 128 bits, so it's not needed.
> 
> Thinking about this a bit more makes me want to combine rows to not waste elements.  It's not obvious that this is better, but how about:

It may be better just for having six crosslane instructions instead of 
eight.

> 
> %macro NORMALISE_AND_STORE_8 4
>      vpaddd    m%1, m%1, m8
>      vpaddd    m%2, m%2, m8
>      vpaddd    m%3, m%3, m8
>      vpaddd    m%4, m%4, m8
>      vpsrad    m%1, m%1, xm9
>      vpsrad    m%2, m%2, xm9
>      vpsrad    m%3, m%3, xm9
>      vpsrad    m%4, m%4, xm9
>      vpaddd    m%1, m%1, m10
>      vpaddd    m%2, m%2, m10
>      vpaddd    m%3, m%3, m10
>      vpaddd    m%4, m%4, m10
>      ; m%1 = 32x4   A0-3 A4-7
>      ; m%2 = 32x4   B0-3 B4-7
>      ; m%3 = 32x8   C0-3 C4-7
>      ; m%4 = 32x8   D0-3 D4-7
>      vpackusdw m%1, m%1, m%2
>      vpackusdw m%3, m%3, m%4
>      ; m%1 = 16x16  A0-3 B0-3 A4-7 B4-7
>      ; m%2 = 16x16  C0-3 D0-3 C4-7 D4-7
>      vpermq    m%1, m%1, q3120
>      vpermq    m%2, m%3, q3120
>      ; m%1 = 16x16  A0-3 A4-7 B0-3 B4-7
>      ; m%2 = 16x16  C0-3 C4-7 D0-3 D4-7
>      vpackuswb m%1, m%1, m%2
>      ; m%1 = 32x8   A0-3 A4-7 C0-3 C4-7 B0-3 B4-7 D0-3 D4-7
>      vextracti128  xm%2, m%1, 1
>      vpsrldq   xm%3, xm%1, 8
>      vpsrldq   xm%4, xm%2, 8
>      vmovq     [outputq],          xm%1
>      vmovq     [outputq + pitchq], xm%2
>      lea       outputq, [outputq + 2*pitchq]

Maybe instead load pitch*3 onto tmpq outside of the macro

lea       tmpq, [pitchq+pitchq*2]

Then you can do:

vmovq     [outputq],          xm%1
vmovq     [outputq+pitchq],   xm%2
vmovq     [outputq+pitchq*2], xm%3
vmovq     [outputq+tmpq],     xm%4
lea       outputq, [outputq+pitchq*4]

Inside it.

>      vmovq     [outputq],          xm%3
>      vmovq     [outputq + pitchq], xm%4
>      lea       outputq, [outputq + 2*pitchq]
> %endmacro
> 
>      NORMALISE_AND_STORE_8 0, 1, 2, 3
>      NORMALISE_AND_STORE_8 4, 5, 6, 7
> 
>>> +    movq      [outputq], xm%1
>>> +    add       outputq, pitchq
>>> +%endmacro
>>> +
>>> +    NORMALISE_AND_STORE_8 0
>>> +    NORMALISE_AND_STORE_8 1
>>> +    NORMALISE_AND_STORE_8 2
>>> +    NORMALISE_AND_STORE_8 3
>>> +    NORMALISE_AND_STORE_8 4
>>> +    NORMALISE_AND_STORE_8 5
>>> +    NORMALISE_AND_STORE_8 6
>>> +    NORMALISE_AND_STORE_8 7
>>> +
>>> +    RET
>>> +
>>> +store_10:
>>> +
>>> +%macro NORMALISE_AND_STORE_10 1
>>> +    vpaddd    m%1, m%1, m8
>>> +    vpsrad    m%1, m%1, xm9
>>> +    vpaddd    m%1, m%1, m10
>>> +    vpmaxsd   m%1, m%1, m11
>>> +    vpminsd   m%1, m%1, m12
>>> +    vextracti128  xm13, m%1, 0
>>> +    vextracti128  xm14, m%1, 1
>>> +    vpackusdw xm%1, xm13, xm14
>>
>> Same.
> 
> A similar method for pairs applies here as well.
> 
>>> +    mova      [outputq], xm%1
>>> +    add       outputq, pitchq
>>> +%endmacro
>>> +
>>> +    NORMALISE_AND_STORE_10 0
>>> +    NORMALISE_AND_STORE_10 1
>>> +    NORMALISE_AND_STORE_10 2
>>> +    NORMALISE_AND_STORE_10 3
>>> +    NORMALISE_AND_STORE_10 4
>>> +    NORMALISE_AND_STORE_10 5
>>> +    NORMALISE_AND_STORE_10 6
>>> +    NORMALISE_AND_STORE_10 7
>>> +
>>> +    RET
>>> ...
> 
> Thanks,
> 
> - Mark
> 
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel at ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> 
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request at ffmpeg.org with subject "unsubscribe".

-------------- next part --------------
A non-text attachment was scrubbed...
Name: OpenPGP_signature.asc
Type: application/pgp-signature
Size: 495 bytes
Desc: OpenPGP digital signature
URL: <https://ffmpeg.org/pipermail/ffmpeg-devel/attachments/20250422/5053a7fb/attachment.sig>


More information about the ffmpeg-devel mailing list