[FFmpeg-devel] [PATCH 20/41] avcodec/x86/me_cmp: Disable overridden functions on x64

Andreas Rheinhardt andreas.rheinhardt at outlook.com
Fri Jun 10 02:55:02 EEST 2022


x64 always has MMX, MMXEXT, SSE and SSE2 and this means
that some functions for MMX, MMXEXT, SSE and 3dnow are always
overridden by other functions (unless one e.g. explicitly
disables SSE2). This commit therefore disables such me_cmp functions
at compile-time for x64.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt at outlook.com>
---
 libavcodec/x86/me_cmp.asm    |  6 ++++
 libavcodec/x86/me_cmp_init.c | 61 +++++++++++++++++++++---------------
 2 files changed, 42 insertions(+), 25 deletions(-)

diff --git a/libavcodec/x86/me_cmp.asm b/libavcodec/x86/me_cmp.asm
index ad06d485ab..05e521cb08 100644
--- a/libavcodec/x86/me_cmp.asm
+++ b/libavcodec/x86/me_cmp.asm
@@ -261,11 +261,15 @@ hadamard8_16_wrapper 0, 14
 %endif
 %endmacro
 
+%if ARCH_X86_32
 INIT_MMX mmx
 HADAMARD8_DIFF
+%endif
 
+%if ARCH_X86_32 || HAVE_ALIGNED_STACK == 0
 INIT_MMX mmxext
 HADAMARD8_DIFF
+%endif
 
 INIT_XMM sse2
 %if ARCH_X86_64
@@ -385,10 +389,12 @@ cglobal sum_abs_dctelem, 1, 1, %1, block
     RET
 %endmacro
 
+%if ARCH_X86_32
 INIT_MMX mmx
 SUM_ABS_DCTELEM 0, 4
 INIT_MMX mmxext
 SUM_ABS_DCTELEM 0, 4
+%endif
 INIT_XMM sse2
 SUM_ABS_DCTELEM 7, 2
 INIT_XMM ssse3
diff --git a/libavcodec/x86/me_cmp_init.c b/libavcodec/x86/me_cmp_init.c
index 9af911bb88..6144bb9496 100644
--- a/libavcodec/x86/me_cmp_init.c
+++ b/libavcodec/x86/me_cmp_init.c
@@ -126,6 +126,7 @@ static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
 
 #if HAVE_INLINE_ASM
 
+#if ARCH_X86_32
 static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
                             ptrdiff_t stride, int h)
 {
@@ -270,6 +271,7 @@ static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return tmp & 0x7FFF;
 }
 #undef SUM
+#endif
 
 DECLARE_ASM_CONST(8, uint64_t, round_tab)[3] = {
     0x0000000000000000ULL,
@@ -478,20 +480,6 @@ static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2,            \
     return sum_ ## suf();                                               \
 }                                                                       \
                                                                         \
-static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2,           \
-                            uint8_t *blk1, ptrdiff_t stride, int h)     \
-{                                                                       \
-    av_assert2(h == 8);                                                     \
-    __asm__ volatile (                                                  \
-        "pxor %%mm7, %%mm7     \n\t"                                    \
-        "pxor %%mm6, %%mm6     \n\t"                                    \
-        ::);                                                            \
-                                                                        \
-    sad8_4_ ## suf(blk1, blk2, stride, 8);                              \
-                                                                        \
-    return sum_ ## suf();                                               \
-}                                                                       \
-                                                                        \
 static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2,              \
                          uint8_t *blk1, ptrdiff_t stride, int h)        \
 {                                                                       \
@@ -535,7 +523,8 @@ static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2,           \
                                                                         \
     return sum_ ## suf();                                               \
 }                                                                       \
-                                                                        \
+
+#define PIX_SADXY(suf)                                                  \
 static int sad16_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2,          \
                              uint8_t *blk1, ptrdiff_t stride, int h)    \
 {                                                                       \
@@ -549,8 +538,25 @@ static int sad16_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2,          \
                                                                         \
     return sum_ ## suf();                                               \
 }                                                                       \
+                                                                        \
+static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2,           \
+                            uint8_t *blk1, ptrdiff_t stride, int h)     \
+{                                                                       \
+    av_assert2(h == 8);                                                 \
+    __asm__ volatile (                                                  \
+        "pxor %%mm7, %%mm7     \n\t"                                    \
+        "pxor %%mm6, %%mm6     \n\t"                                    \
+        ::);                                                            \
+                                                                        \
+    sad8_4_ ## suf(blk1, blk2, stride, 8);                              \
+                                                                        \
+    return sum_ ## suf();                                               \
+}                                                                       \
 
+#if ARCH_X86_32
 PIX_SAD(mmx)
+#endif
+PIX_SADXY(mmx)
 
 #endif /* HAVE_INLINE_ASM */
 
@@ -560,32 +566,35 @@ av_cold void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx)
 
 #if HAVE_INLINE_ASM
     if (INLINE_MMX(cpu_flags)) {
+#if ARCH_X86_32
+        c->sad[0] = sad16_mmx;
+        c->sad[1] = sad8_mmx;
+
+        if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
+            c->vsad[0] = vsad16_mmx;
+        }
+        c->vsad[4] = vsad_intra16_mmx;
+
         c->pix_abs[0][0] = sad16_mmx;
         c->pix_abs[0][1] = sad16_x2_mmx;
         c->pix_abs[0][2] = sad16_y2_mmx;
-        c->pix_abs[0][3] = sad16_xy2_mmx;
         c->pix_abs[1][0] = sad8_mmx;
         c->pix_abs[1][1] = sad8_x2_mmx;
         c->pix_abs[1][2] = sad8_y2_mmx;
+#endif
+        c->pix_abs[0][3] = sad16_xy2_mmx;
         c->pix_abs[1][3] = sad8_xy2_mmx;
-
-        c->sad[0] = sad16_mmx;
-        c->sad[1] = sad8_mmx;
-
-        c->vsad[4] = vsad_intra16_mmx;
-
-        if (!(avctx->flags & AV_CODEC_FLAG_BITEXACT)) {
-            c->vsad[0] = vsad16_mmx;
-        }
     }
 
 #endif /* HAVE_INLINE_ASM */
 
     if (EXTERNAL_MMX(cpu_flags)) {
+#if ARCH_X86_32
         c->hadamard8_diff[0] = ff_hadamard8_diff16_mmx;
         c->hadamard8_diff[1] = ff_hadamard8_diff_mmx;
         c->sum_abs_dctelem   = ff_sum_abs_dctelem_mmx;
         c->sse[0]            = ff_sse16_mmx;
+#endif
         c->sse[1]            = ff_sse8_mmx;
 #if HAVE_X86ASM
         c->nsse[0]           = nsse16_mmx;
@@ -594,9 +603,11 @@ av_cold void ff_me_cmp_init_x86(MECmpContext *c, AVCodecContext *avctx)
     }
 
     if (EXTERNAL_MMXEXT(cpu_flags)) {
+#if ARCH_X86_32 || !HAVE_ALIGNED_STACK
         c->hadamard8_diff[0] = ff_hadamard8_diff16_mmxext;
         c->hadamard8_diff[1] = ff_hadamard8_diff_mmxext;
         c->sum_abs_dctelem   = ff_sum_abs_dctelem_mmxext;
+#endif
 
         c->sad[0] = ff_sad16_mmxext;
         c->sad[1] = ff_sad8_mmxext;
-- 
2.34.1



More information about the ffmpeg-devel mailing list