[FFmpeg-cvslog] avfilter/x86/vf_spp: Remove permutation-specific code

Andreas Rheinhardt git at videolan.org
Sat May 31 02:53:02 EEST 2025


ffmpeg | branch: master | Andreas Rheinhardt <andreas.rheinhardt at outlook.com> | Tue May 27 19:34:29 2025 +0200| [0435cd5a6208f2569869f34672c6729095f27ef9] | committer: Andreas Rheinhardt

avfilter/x86/vf_spp: Remove permutation-specific code

The MMX requantize functions have the MMX permutation
(i.e. FF_IDCT_PERM_SIMPLE) hardcoded and therefore
check for the used permutation (namely via a CRC).
Yet this is very ugly and could even lead to misdetection;
furthermore, since d7246ea9f229db64ed909d7446196128d6f53de0
the permutation used here is de-facto and since
bfb28b5ce89f3e950214b67ea95b45e3355c2caf definitely
impossible on x64, making this code dead on x64.
So remove it.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt at outlook.com>

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=0435cd5a6208f2569869f34672c6729095f27ef9
---

 libavfilter/x86/vf_spp.c | 163 -----------------------------------------------
 1 file changed, 163 deletions(-)

diff --git a/libavfilter/x86/vf_spp.c b/libavfilter/x86/vf_spp.c
index 498660d7d0..f8e5727bfc 100644
--- a/libavfilter/x86/vf_spp.c
+++ b/libavfilter/x86/vf_spp.c
@@ -21,159 +21,9 @@
 
 #include "libavutil/attributes.h"
 #include "libavutil/cpu.h"
-#include "libavutil/crc.h"
-#include "libavutil/x86/asm.h"
 #include "libavfilter/vf_spp.h"
 
 #if HAVE_MMX_INLINE
-static void hardthresh_mmx(int16_t dst[64], const int16_t src[64],
-                           int qp, const uint8_t *permutation)
-{
-    int bias = 0; //FIXME
-    unsigned int threshold1;
-
-    threshold1 = qp * ((1<<4) - bias) - 1;
-
-#define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3)    \
-    "movq " #src0 ", %%mm0      \n"                                     \
-    "movq " #src1 ", %%mm1      \n"                                     \
-    "movq " #src2 ", %%mm2      \n"                                     \
-    "movq " #src3 ", %%mm3      \n"                                     \
-    "psubw %%mm4, %%mm0         \n"                                     \
-    "psubw %%mm4, %%mm1         \n"                                     \
-    "psubw %%mm4, %%mm2         \n"                                     \
-    "psubw %%mm4, %%mm3         \n"                                     \
-    "paddusw %%mm5, %%mm0       \n"                                     \
-    "paddusw %%mm5, %%mm1       \n"                                     \
-    "paddusw %%mm5, %%mm2       \n"                                     \
-    "paddusw %%mm5, %%mm3       \n"                                     \
-    "paddw %%mm6, %%mm0         \n"                                     \
-    "paddw %%mm6, %%mm1         \n"                                     \
-    "paddw %%mm6, %%mm2         \n"                                     \
-    "paddw %%mm6, %%mm3         \n"                                     \
-    "psubusw %%mm6, %%mm0       \n"                                     \
-    "psubusw %%mm6, %%mm1       \n"                                     \
-    "psubusw %%mm6, %%mm2       \n"                                     \
-    "psubusw %%mm6, %%mm3       \n"                                     \
-    "psraw $3, %%mm0            \n"                                     \
-    "psraw $3, %%mm1            \n"                                     \
-    "psraw $3, %%mm2            \n"                                     \
-    "psraw $3, %%mm3            \n"                                     \
-                                                                        \
-    "movq %%mm0, %%mm7          \n"                                     \
-    "punpcklwd %%mm2, %%mm0     \n" /*A*/                               \
-    "punpckhwd %%mm2, %%mm7     \n" /*C*/                               \
-    "movq %%mm1, %%mm2          \n"                                     \
-    "punpcklwd %%mm3, %%mm1     \n" /*B*/                               \
-    "punpckhwd %%mm3, %%mm2     \n" /*D*/                               \
-    "movq %%mm0, %%mm3          \n"                                     \
-    "punpcklwd %%mm1, %%mm0     \n" /*A*/                               \
-    "punpckhwd %%mm7, %%mm3     \n" /*C*/                               \
-    "punpcklwd %%mm2, %%mm7     \n" /*B*/                               \
-    "punpckhwd %%mm2, %%mm1     \n" /*D*/                               \
-                                                                        \
-    "movq %%mm0, " #dst0 "      \n"                                     \
-    "movq %%mm7, " #dst1 "      \n"                                     \
-    "movq %%mm3, " #dst2 "      \n"                                     \
-    "movq %%mm1, " #dst3 "      \n"
-
-    __asm__ volatile(
-        "movd %2, %%mm4             \n"
-        "movd %3, %%mm5             \n"
-        "movd %4, %%mm6             \n"
-        "packssdw %%mm4, %%mm4      \n"
-        "packssdw %%mm5, %%mm5      \n"
-        "packssdw %%mm6, %%mm6      \n"
-        "packssdw %%mm4, %%mm4      \n"
-        "packssdw %%mm5, %%mm5      \n"
-        "packssdw %%mm6, %%mm6      \n"
-        REQUANT_CORE(  (%1),  8(%1), 16(%1), 24(%1),  (%0), 8(%0), 64(%0), 72(%0))
-        REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
-        REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
-        REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
-        : : "r" (src), "r" (dst), "g" (threshold1+1), "g" (threshold1+5), "g" (threshold1-4) //FIXME maybe more accurate then needed?
-    );
-    dst[0] = (src[0] + 4) >> 3;
-}
-
-static void softthresh_mmx(int16_t dst[64], const int16_t src[64],
-                           int qp, const uint8_t *permutation)
-{
-    int bias = 0; //FIXME
-    unsigned int threshold1;
-
-    threshold1 = qp*((1<<4) - bias) - 1;
-
-#undef REQUANT_CORE
-#define REQUANT_CORE(dst0, dst1, dst2, dst3, src0, src1, src2, src3)    \
-    "movq " #src0 ", %%mm0      \n"                                     \
-    "movq " #src1 ", %%mm1      \n"                                     \
-    "pxor %%mm6, %%mm6          \n"                                     \
-    "pxor %%mm7, %%mm7          \n"                                     \
-    "pcmpgtw %%mm0, %%mm6       \n"                                     \
-    "pcmpgtw %%mm1, %%mm7       \n"                                     \
-    "pxor %%mm6, %%mm0          \n"                                     \
-    "pxor %%mm7, %%mm1          \n"                                     \
-    "psubusw %%mm4, %%mm0       \n"                                     \
-    "psubusw %%mm4, %%mm1       \n"                                     \
-    "pxor %%mm6, %%mm0          \n"                                     \
-    "pxor %%mm7, %%mm1          \n"                                     \
-    "movq " #src2 ", %%mm2      \n"                                     \
-    "movq " #src3 ", %%mm3      \n"                                     \
-    "pxor %%mm6, %%mm6          \n"                                     \
-    "pxor %%mm7, %%mm7          \n"                                     \
-    "pcmpgtw %%mm2, %%mm6       \n"                                     \
-    "pcmpgtw %%mm3, %%mm7       \n"                                     \
-    "pxor %%mm6, %%mm2          \n"                                     \
-    "pxor %%mm7, %%mm3          \n"                                     \
-    "psubusw %%mm4, %%mm2       \n"                                     \
-    "psubusw %%mm4, %%mm3       \n"                                     \
-    "pxor %%mm6, %%mm2          \n"                                     \
-    "pxor %%mm7, %%mm3          \n"                                     \
-                                                                        \
-    "paddsw %%mm5, %%mm0        \n"                                     \
-    "paddsw %%mm5, %%mm1        \n"                                     \
-    "paddsw %%mm5, %%mm2        \n"                                     \
-    "paddsw %%mm5, %%mm3        \n"                                     \
-    "psraw $3, %%mm0            \n"                                     \
-    "psraw $3, %%mm1            \n"                                     \
-    "psraw $3, %%mm2            \n"                                     \
-    "psraw $3, %%mm3            \n"                                     \
-                                                                        \
-    "movq %%mm0, %%mm7          \n"                                     \
-    "punpcklwd %%mm2, %%mm0     \n" /*A*/                               \
-    "punpckhwd %%mm2, %%mm7     \n" /*C*/                               \
-    "movq %%mm1, %%mm2          \n"                                     \
-    "punpcklwd %%mm3, %%mm1     \n" /*B*/                               \
-    "punpckhwd %%mm3, %%mm2     \n" /*D*/                               \
-    "movq %%mm0, %%mm3          \n"                                     \
-    "punpcklwd %%mm1, %%mm0     \n" /*A*/                               \
-    "punpckhwd %%mm7, %%mm3     \n" /*C*/                               \
-    "punpcklwd %%mm2, %%mm7     \n" /*B*/                               \
-    "punpckhwd %%mm2, %%mm1     \n" /*D*/                               \
-                                                                        \
-    "movq %%mm0, " #dst0 "      \n"                                     \
-    "movq %%mm7, " #dst1 "      \n"                                     \
-    "movq %%mm3, " #dst2 "      \n"                                     \
-    "movq %%mm1, " #dst3 "      \n"
-
-    __asm__ volatile(
-        "movd %2, %%mm4             \n"
-        "movd %3, %%mm5             \n"
-        "packssdw %%mm4, %%mm4      \n"
-        "packssdw %%mm5, %%mm5      \n"
-        "packssdw %%mm4, %%mm4      \n"
-        "packssdw %%mm5, %%mm5      \n"
-        REQUANT_CORE(  (%1),  8(%1), 16(%1), 24(%1),  (%0), 8(%0), 64(%0), 72(%0))
-        REQUANT_CORE(32(%1), 40(%1), 48(%1), 56(%1),16(%0),24(%0), 48(%0), 56(%0))
-        REQUANT_CORE(64(%1), 72(%1), 80(%1), 88(%1),32(%0),40(%0), 96(%0),104(%0))
-        REQUANT_CORE(96(%1),104(%1),112(%1),120(%1),80(%0),88(%0),112(%0),120(%0))
-        : : "r" (src), "r" (dst), "g" (threshold1), "rm" (4) //FIXME maybe more accurate then needed?
-    );
-
-    dst[0] = (src[0] + 4) >> 3;
-}
-
 static void store_slice_mmx(uint8_t *dst, const int16_t *src,
                             int dst_stride, int src_stride,
                             int width, int height, int log2_scale,
@@ -223,20 +73,7 @@ av_cold void ff_spp_init_x86(SPPContext *s)
     int cpu_flags = av_get_cpu_flags();
 
     if (cpu_flags & AV_CPU_FLAG_MMX) {
-        static const uint32_t mmx_idct_perm_crc = 0xe5e8adc4;
-        uint32_t idct_perm_crc =
-            av_crc(av_crc_get_table(AV_CRC_32_IEEE), 0,
-                   s->dct->idct_permutation,
-                   sizeof(s->dct->idct_permutation));
-        int64_t bps;
         s->store_slice = store_slice_mmx;
-        av_opt_get_int(s->dct, "bits_per_sample", 0, &bps);
-        if (bps <= 8 && idct_perm_crc == mmx_idct_perm_crc) {
-            switch (s->mode) {
-            case 0: s->requantize = hardthresh_mmx; break;
-            case 1: s->requantize = softthresh_mmx; break;
-            }
-        }
     }
 #endif
 }



More information about the ffmpeg-cvslog mailing list