[FFmpeg-devel] [PATCH] [WIP] swr: rewrite resample_common/linear_float_sse/avx in yasm.
Ronald S. Bultje
rsbultje at gmail.com
Fri Jun 20 02:37:27 CEST 2014
DO NOT MERGE. Speed not tested, avx not yet tested.
---
configure | 3 +-
libswresample/resample_template.c | 12 +-
libswresample/x86/Makefile | 1 +
libswresample/x86/resample.asm | 327 +++++++++++++++++++++++++++++++++++
libswresample/x86/resample_mmx.h | 118 -------------
libswresample/x86/resample_x86_dsp.c | 34 ++--
6 files changed, 346 insertions(+), 149 deletions(-)
create mode 100644 libswresample/x86/resample.asm
diff --git a/configure b/configure
index 3a11b2c..d9bf497 100755
--- a/configure
+++ b/configure
@@ -4434,8 +4434,7 @@ EOF
check_inline_asm inline_asm_direct_symbol_refs '"movl '$extern_prefix'test, %eax"' ||
check_inline_asm inline_asm_direct_symbol_refs '"movl '$extern_prefix'test(%rip), %eax"'
- # check whether binutils is new enough to compile AVX/SSSE3/MMXEXT
- enabled avx && check_inline_asm avx_inline '"vextractf128 $1, %ymm0, %xmm1"'
+ # check whether binutils is new enough to compile SSSE3/MMXEXT
enabled ssse3 && check_inline_asm ssse3_inline '"pabsw %xmm0, %xmm0"'
enabled mmxext && check_inline_asm mmxext_inline '"pmaxub %mm0, %mm1"'
diff --git a/libswresample/resample_template.c b/libswresample/resample_template.c
index 335bf33..560c767 100644
--- a/libswresample/resample_template.c
+++ b/libswresample/resample_template.c
@@ -43,9 +43,7 @@
# define RENAME(N) N ## _double_sse2
# endif
-#elif defined(TEMPLATE_RESAMPLE_FLT) \
- || defined(TEMPLATE_RESAMPLE_FLT_SSE) \
- || defined(TEMPLATE_RESAMPLE_FLT_AVX)
+#elif defined(TEMPLATE_RESAMPLE_FLT)
# define FILTER_SHIFT 0
# define DELEM float
@@ -56,14 +54,6 @@
# if defined(TEMPLATE_RESAMPLE_FLT)
# define RENAME(N) N ## _float
-# elif defined(TEMPLATE_RESAMPLE_FLT_SSE)
-# define COMMON_CORE COMMON_CORE_FLT_SSE
-# define LINEAR_CORE LINEAR_CORE_FLT_SSE
-# define RENAME(N) N ## _float_sse
-# elif defined(TEMPLATE_RESAMPLE_FLT_AVX)
-# define COMMON_CORE COMMON_CORE_FLT_AVX
-# define LINEAR_CORE LINEAR_CORE_FLT_AVX
-# define RENAME(N) N ## _float_avx
# endif
#elif defined(TEMPLATE_RESAMPLE_S32)
diff --git a/libswresample/x86/Makefile b/libswresample/x86/Makefile
index cc3e65f..cb6371a 100644
--- a/libswresample/x86/Makefile
+++ b/libswresample/x86/Makefile
@@ -1,6 +1,7 @@
YASM-OBJS += x86/swresample_x86.o\
x86/audio_convert.o\
x86/rematrix.o\
+ x86/resample.o\
OBJS += x86/resample_x86_dsp.o\
diff --git a/libswresample/x86/resample.asm b/libswresample/x86/resample.asm
new file mode 100644
index 0000000..5b2b62a
--- /dev/null
+++ b/libswresample/x86/resample.asm
@@ -0,0 +1,327 @@
+;******************************************************************************
+;* Copyright (c) 2012 Michael Niedermayer
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+%if ARCH_X86_64
+%define pointer resq
+%else
+%define pointer resd
+%endif
+
+struc ResampleContext
+ .av_class: pointer 1
+ .filter_bank: pointer 1
+ .filter_length: resd 1
+ .filter_alloc: resd 1
+ .ideal_dst_incr: resd 1
+ .dst_incr: resd 1
+ .dst_incr_div: resd 1
+ .dst_incr_mod: resd 1
+ .index: resd 1
+ .frac: resd 1
+ .src_incr: resd 1
+ .compensation_distance: resd 1
+ .phase_shift: resd 1
+ .phase_mask: resd 1
+
+ ; there's a few more here but we only care about the first few
+endstruc
+
+SECTION .text
+
+%macro RESAMPLE_FLOAT_FNS 0
+; int resample_common_float(ResampleContext *ctx, float *dst,
+; const float *src, int size, int update_ctx)
+%if UNIX64
+cglobal resample_common_float, 6, 9, 2, ctx, dst, src, phase_shift, index, srcptr, frac, filter, filter_len
+ cmp dword r3m, 0
+ je .nodata
+
+%define stack_size_pushed 24
+%define orig_src_stack qword [rsp+16]
+%define size_stackd dword [rsp+8]
+%define update_ctx_stackd dword [rsp+0]
+ PUSH r2
+ PUSH r3
+ PUSH r4
+ mov phase_shiftd, dword [ctxq+ResampleContext.phase_shift]
+ mov fracd, dword [ctxq+ResampleContext.frac]
+%elif WIN64
+cglobal resample_common_float, 4, 9, 2, phase_shift, dst, src, ctx, index, srcptr, frac, filter, filter_len
+ cmp dword r3m, 0
+ je .nodata
+
+%define stack_size_pushed 16
+%define orig_src_stack qword [rsp+8]
+%define size_stackd dword [rsp]
+%define update_ctx_stackd dword r4m
+ mov ctxq, r0mp
+ PUSH r2
+ PUSH r3
+ mov phase_shiftd, dword [ctxq+ResampleContext.phase_shift]
+ mov fracd, dword [ctxq+ResampleContext.frac]
+%else ; x86-32
+cglobal resample_common_float, 0, 7, 2, filter, filter_len, src, srcptr, dst, ctx, index
+ cmp dword r3m, 0
+ je .nodata
+
+%define stack_size_pushed 4
+%define frac_stackd [rsp]
+%define orig_src_stack r2mp
+%define size_stackd dword r3m
+%define update_ctx_stackd dword r4m
+
+ mov ctxq, r0mp
+ PUSH dword [ctxq+ResampleContext.frac]
+ mov dstq, r1mp
+ mov srcq, r2mp
+%endif
+
+ mov indexd, dword [ctxq+ResampleContext.index]
+.loop:
+ mov filterd, dword [ctxq+ResampleContext.filter_alloc]
+ imul filterd, indexd
+ shl filterq, 2
+ add filterq, [ctxq+ResampleContext.filter_bank]
+
+ mov filter_lend, dword [ctxq+ResampleContext.filter_length]
+ lea srcptrq, [srcq +filter_lenq*4]
+ lea filterq, [filterq+filter_lenq*4]
+ neg filter_lenq
+
+ ; accumulator for val (m0)
+ xorps m0, m0
+.inner_loop:
+ movu m1, [srcptrq+filter_lenq*4]
+ mulps m1, [filterq+filter_lenq*4]
+ addps m0, m1
+ add filter_lenq, mmsize/4
+ js .inner_loop
+
+%if cpuflag(avx)
+ vextractf128 xm1, m0, 0x1
+ addps xm0, xm1
+%endif
+
+ ; horizontal sum
+ movhlps xm1, xm0
+ addps xm0, xm1
+ movss xm1, xm0
+ shufps xm0, xm0, q0001
+ addps xm0, xm1
+ movss [dstq], xm0
+
+%if ARCH_X86_32
+ DEFINE_ARGS frac, phase_shift, src, sample_index, dst, ctx, index
+ mov fracd, frac_stackd
+%elif UNIX64
+ DEFINE_ARGS ctx, dst, src, phase_shift, index, srcptr, frac, sample_index, filter_len
+%else ; win64
+ DEFINE_ARGS phase_shift, dst, src, ctx, index, srcptr, frac, sample_index, filter_len
+%endif
+ add fracd, [ctxq+ResampleContext.dst_incr_mod]
+ add indexd, [ctxq+ResampleContext.dst_incr_div]
+ cmp fracd, [ctxq+ResampleContext.src_incr]
+ jl .skip
+ sub fracd, [ctxq+ResampleContext.src_incr]
+ inc indexd
+.skip:
+ mov sample_indexd, indexd
+ and indexd, dword [ctxq+ResampleContext.phase_mask]
+%if ARCH_X86_32
+ mov frac_stackd, fracd
+ mov phase_shiftd, dword [ctxq+ResampleContext.phase_shift]
+%endif
+ shr sample_indexd, phase_shiftb
+ lea srcq, [srcq+sample_indexq*4]
+ add dstq, 4
+ dec size_stackd
+ jg .loop
+
+ cmp update_ctx_stackd, 1
+ jne .end
+ mov [ctxq+ResampleContext.frac ], fracd
+ mov [ctxq+ResampleContext.index], indexd
+.end:
+ mov rax, srcq
+ sub rax, orig_src_stack
+ shr rax, 2
+ ADD rsp, stack_size_pushed
+ RET
+
+.nodata:
+ xor eax, eax
+ RET
+
+; int resample_linear_float(ResampleContext *ctx, float *dst,
+; const float *src, int size, int update_ctx)
+%if UNIX64
+cglobal resample_linear_float, 6, 10, 4, ctx, dst, src, phase_shift, index, srcptr, frac, filter1, filter2, filter_len
+ cmp dword r3m, 0
+ je .nodata
+
+%define stack_size_pushed 24
+%define frac_stackd fracd
+%define index_stackd indexd
+%define orig_src_stack qword [rsp+16]
+%define size_stackd dword [rsp+8]
+%define update_ctx_stackd dword [rsp+0]
+ PUSH r2
+ PUSH r3
+ PUSH r4
+ mov phase_shiftd, dword [ctxq+ResampleContext.phase_shift]
+ mov fracd, dword [ctxq+ResampleContext.frac]
+ mov indexd, dword [ctxq+ResampleContext.index]
+%elif WIN64
+cglobal resample_linear_float, 4, 10, 4, phase_shift, dst, src, ctx, index, srcptr, frac, filter1, filter2, filter_len
+ cmp dword r3m, 0
+ je .nodata
+
+%define stack_size_pushed 16
+%define frac_stackd fracd
+%define index_stackd indexd
+%define orig_src_stack qword [rsp+8]
+%define size_stackd dword [rsp+0]
+%define update_ctx_stackd dword r4m
+ mov ctxq, r0mp
+ PUSH r2
+ PUSH r3
+ mov phase_shiftd, dword [ctxq+ResampleContext.phase_shift]
+ mov fracd, dword [ctxq+ResampleContext.frac]
+ mov indexd, dword [ctxq+ResampleContext.index]
+%else ; x86-32
+cglobal resample_linear_float, 0, 7, 2, filter1, filter_len, src, srcptr, dst, ctx, filter2
+ cmp dword r3m, 0
+ je .nodata
+
+%define stack_size_pushed 8
+%define frac_stackd [rsp+4]
+%define index_stackd [rsp+0]
+%define orig_src_stack r2mp
+%define size_stackd dword r3m
+%define update_ctx_stackd dword r4m
+
+ mov ctxq, r0mp
+ PUSH dword [ctxq+ResampleContext.frac]
+ PUSH dword [ctxq+ResampleContext.index]
+ mov dstq, r1mp
+ mov srcq, r2mp
+%endif
+
+.loop:
+ mov filter1d, dword [ctxq+ResampleContext.filter_alloc]
+ mov filter2d, filter1d
+ imul filter1d, index_stackd
+ shl filter1q, 2
+ add filter1q, [ctxq+ResampleContext.filter_bank]
+
+ mov filter_lend, dword [ctxq+ResampleContext.filter_length]
+ lea srcptrq, [srcq +filter_lenq*4]
+ lea filter1q, [filter1q+filter_lenq*4]
+ lea filter2q, [filter1q+filter2q*4]
+ neg filter_lenq
+
+ ; accumulator for val (m0) and v2 (m2)
+ xorps m0, m0
+ xorps m2, m2
+.inner_loop:
+ movu m1, [srcptrq+filter_lenq*4]
+ mulps m3, m1, [filter2q+filter_lenq*4]
+ mulps m1, [filter1q+filter_lenq*4]
+ addps m0, m1
+ addps m2, m3
+ add filter_lenq, mmsize/4
+ js .inner_loop
+
+%if cpuflag(avx)
+ vextractf128 xm3, m2, 0x1
+ vextractf128 xm1, m0, 0x1
+ addps xm2, xm3
+ addps xm0, xm1
+%endif
+
+ ; val += (v2 - val) * (FELEML) frac / c->src_incr;
+ subps xm2, xm0
+ cvtsi2ss xm1, frac_stackd
+ cvtsi2ss xm3, dword [ctxq+ResampleContext.src_incr]
+ divss xm1, xm3
+ shufps xm1, xm1, q0000
+ mulps xm2, xm1
+ addps xm0, xm2
+
+ ; horizontal sum
+ movhlps xm1, xm0
+ addps xm0, xm1
+ movss xm1, xm0
+ shufps xm0, xm0, q0001
+ addps xm0, xm1
+ movss [dstq], xm0
+
+%if ARCH_X86_32
+ DEFINE_ARGS frac, phase_shift, src, sample_index, dst, ctx, index
+ mov fracd, frac_stackd
+ mov indexd, index_stackd
+%elif UNIX64
+ DEFINE_ARGS ctx, dst, src, phase_shift, index, srcptr, frac, sample_index, filter2, filter_len
+%else ; win64
+ DEFINE_ARGS phase_shift, dst, src, ctx, index, srcptr, frac, sample_index, filter2, filter_len
+%endif
+ add fracd, [ctxq+ResampleContext.dst_incr_mod]
+ add indexd, [ctxq+ResampleContext.dst_incr_div]
+ cmp fracd, [ctxq+ResampleContext.src_incr]
+ jl .skip
+ sub fracd, [ctxq+ResampleContext.src_incr]
+ inc indexd
+.skip:
+ mov sample_indexd, indexd
+ and indexd, dword [ctxq+ResampleContext.phase_mask]
+%if ARCH_X86_32
+ mov frac_stackd, fracd
+ mov index_stackd, indexd
+ mov phase_shiftd, dword [ctxq+ResampleContext.phase_shift]
+%endif
+ shr sample_indexd, phase_shiftb
+ lea srcq, [srcq+sample_indexq*4]
+ add dstq, 4
+ dec size_stackd
+ jg .loop
+
+ cmp update_ctx_stackd, 1
+ jne .end
+ mov [ctxq+ResampleContext.frac ], fracd
+ mov [ctxq+ResampleContext.index], indexd
+.end:
+ mov rax, srcq
+ sub rax, orig_src_stack
+ shr rax, 2
+ ADD rsp, stack_size_pushed
+ RET
+
+.nodata:
+ xor eax, eax
+ RET
+%endmacro
+
+INIT_XMM sse
+RESAMPLE_FLOAT_FNS
+
+INIT_YMM avx
+RESAMPLE_FLOAT_FNS
diff --git a/libswresample/x86/resample_mmx.h b/libswresample/x86/resample_mmx.h
index a4da1e9..94237b0 100644
--- a/libswresample/x86/resample_mmx.h
+++ b/libswresample/x86/resample_mmx.h
@@ -132,124 +132,6 @@ __asm__ volatile(\
XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
);
-#define COMMON_CORE_FLT_SSE \
- x86_reg len= -4*c->filter_length;\
-__asm__ volatile(\
- "xorps %%xmm0, %%xmm0 \n\t"\
- "1: \n\t"\
- "movups (%1, %0), %%xmm1 \n\t"\
- "mulps (%2, %0), %%xmm1 \n\t"\
- "addps %%xmm1, %%xmm0 \n\t"\
- "add $16, %0 \n\t"\
- " js 1b \n\t"\
- "movhlps %%xmm0, %%xmm1 \n\t"\
- "addps %%xmm1, %%xmm0 \n\t"\
- "movss %%xmm0, %%xmm1 \n\t"\
- "shufps $1, %%xmm0, %%xmm0 \n\t"\
- "addps %%xmm1, %%xmm0 \n\t"\
- "movss %%xmm0, (%3) \n\t"\
- : "+r" (len)\
- : "r" (((uint8_t*)(src+sample_index))-len),\
- "r" (((uint8_t*)filter)-len),\
- "r" (dst+dst_index)\
- XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
-);
-
-#define LINEAR_CORE_FLT_SSE \
- x86_reg len= -4*c->filter_length;\
-__asm__ volatile(\
- "xorps %%xmm0, %%xmm0 \n\t"\
- "xorps %%xmm2, %%xmm2 \n\t"\
- "1: \n\t"\
- "movups (%3, %0), %%xmm1 \n\t"\
- "movaps %%xmm1, %%xmm3 \n\t"\
- "mulps (%4, %0), %%xmm1 \n\t"\
- "mulps (%5, %0), %%xmm3 \n\t"\
- "addps %%xmm1, %%xmm0 \n\t"\
- "addps %%xmm3, %%xmm2 \n\t"\
- "add $16, %0 \n\t"\
- " js 1b \n\t"\
- "movhlps %%xmm0, %%xmm1 \n\t"\
- "movhlps %%xmm2, %%xmm3 \n\t"\
- "addps %%xmm1, %%xmm0 \n\t"\
- "addps %%xmm3, %%xmm2 \n\t"\
- "movss %%xmm0, %%xmm1 \n\t"\
- "movss %%xmm2, %%xmm3 \n\t"\
- "shufps $1, %%xmm0, %%xmm0 \n\t"\
- "shufps $1, %%xmm2, %%xmm2 \n\t"\
- "addps %%xmm1, %%xmm0 \n\t"\
- "addps %%xmm3, %%xmm2 \n\t"\
- "movss %%xmm0, %1 \n\t"\
- "movss %%xmm2, %2 \n\t"\
- : "+r" (len),\
- "=m" (val),\
- "=m" (v2)\
- : "r" (((uint8_t*)(src+sample_index))-len),\
- "r" (((uint8_t*)filter)-len),\
- "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
- XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
-);
-
-#define COMMON_CORE_FLT_AVX \
- x86_reg len= -4*c->filter_length;\
-__asm__ volatile(\
- "vxorps %%ymm0, %%ymm0, %%ymm0 \n\t"\
- "1: \n\t"\
- "vmovups (%1, %0), %%ymm1 \n\t"\
- "vmulps (%2, %0), %%ymm1, %%ymm1 \n\t"\
- "vaddps %%ymm1, %%ymm0, %%ymm0 \n\t"\
- "add $32, %0 \n\t"\
- " js 1b \n\t"\
- "vextractf128 $1, %%ymm0, %%xmm1 \n\t"\
- "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
- "vmovhlps %%xmm0, %%xmm1, %%xmm1 \n\t"\
- "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
- "vshufps $1, %%xmm0, %%xmm0, %%xmm1 \n\t"\
- "vaddss %%xmm1, %%xmm0, %%xmm0 \n\t"\
- "vmovss %%xmm0, (%3) \n\t"\
- : "+r" (len)\
- : "r" (((uint8_t*)(src+sample_index))-len),\
- "r" (((uint8_t*)filter)-len),\
- "r" (dst+dst_index)\
- XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
-);
-
-#define LINEAR_CORE_FLT_AVX \
- x86_reg len= -4*c->filter_length;\
-__asm__ volatile(\
- "vxorps %%ymm0, %%ymm0, %%ymm0 \n\t"\
- "vxorps %%ymm2, %%ymm2, %%ymm2 \n\t"\
- "1: \n\t"\
- "vmovups (%3, %0), %%ymm1 \n\t"\
- "vmulps (%5, %0), %%ymm1, %%ymm3 \n\t"\
- "vmulps (%4, %0), %%ymm1, %%ymm1 \n\t"\
- "vaddps %%ymm1, %%ymm0, %%ymm0 \n\t"\
- "vaddps %%ymm3, %%ymm2, %%ymm2 \n\t"\
- "add $32, %0 \n\t"\
- " js 1b \n\t"\
- "vextractf128 $1, %%ymm0, %%xmm1 \n\t"\
- "vextractf128 $1, %%ymm2, %%xmm3 \n\t"\
- "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
- "vaddps %%xmm3, %%xmm2, %%xmm2 \n\t"\
- "vmovhlps %%xmm0, %%xmm1, %%xmm1 \n\t"\
- "vmovhlps %%xmm2, %%xmm3, %%xmm3 \n\t"\
- "vaddps %%xmm1, %%xmm0, %%xmm0 \n\t"\
- "vaddps %%xmm3, %%xmm2, %%xmm2 \n\t"\
- "vshufps $1, %%xmm0, %%xmm0, %%xmm1 \n\t"\
- "vshufps $1, %%xmm2, %%xmm2, %%xmm3 \n\t"\
- "vaddss %%xmm1, %%xmm0, %%xmm0 \n\t"\
- "vaddss %%xmm3, %%xmm2, %%xmm2 \n\t"\
- "vmovss %%xmm0, %1 \n\t"\
- "vmovss %%xmm2, %2 \n\t"\
- : "+r" (len),\
- "=m" (val),\
- "=m" (v2)\
- : "r" (((uint8_t*)(src+sample_index))-len),\
- "r" (((uint8_t*)filter)-len),\
- "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
- XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
-);
-
#define COMMON_CORE_DBL_SSE2 \
x86_reg len= -8*c->filter_length;\
__asm__ volatile(\
diff --git a/libswresample/x86/resample_x86_dsp.c b/libswresample/x86/resample_x86_dsp.c
index 02a7db5..4628272 100644
--- a/libswresample/x86/resample_x86_dsp.c
+++ b/libswresample/x86/resample_x86_dsp.c
@@ -37,12 +37,6 @@
#include "libswresample/resample_template.c"
#undef TEMPLATE_RESAMPLE_S16_MMX2
-#if HAVE_SSE_INLINE
-#define TEMPLATE_RESAMPLE_FLT_SSE
-#include "libswresample/resample_template.c"
-#undef TEMPLATE_RESAMPLE_FLT_SSE
-#endif
-
#if HAVE_SSE2_INLINE
#define TEMPLATE_RESAMPLE_S16_SSE2
#include "libswresample/resample_template.c"
@@ -53,16 +47,20 @@
#undef TEMPLATE_RESAMPLE_DBL_SSE2
#endif
-#if HAVE_AVX_INLINE
-#define TEMPLATE_RESAMPLE_FLT_AVX
-#include "libswresample/resample_template.c"
-#undef TEMPLATE_RESAMPLE_FLT_AVX
-#endif
-
#undef DO_RESAMPLE_ONE
#endif // HAVE_MMXEXT_INLINE
+int ff_resample_common_float_sse(ResampleContext *c, uint8_t *dst,
+ const uint8_t *src, int sz, int upd);
+int ff_resample_linear_float_sse(ResampleContext *c, uint8_t *dst,
+ const uint8_t *src, int sz, int upd);
+
+int ff_resample_common_float_avx(ResampleContext *c, uint8_t *dst,
+ const uint8_t *src, int sz, int upd);
+int ff_resample_linear_float_avx(ResampleContext *c, uint8_t *dst,
+ const uint8_t *src, int sz, int upd);
+
void swresample_dsp_x86_init(ResampleContext *c)
{
int av_unused mm_flags = av_get_cpu_flags();
@@ -72,9 +70,9 @@ void swresample_dsp_x86_init(ResampleContext *c)
c->dsp.resample_common[FNIDX(S16P)] = (resample_fn) resample_common_int16_mmx2;
c->dsp.resample_linear[FNIDX(S16P)] = (resample_fn) resample_linear_int16_mmx2;
}
- if (HAVE_SSE_INLINE && mm_flags & AV_CPU_FLAG_SSE) {
- c->dsp.resample_common[FNIDX(FLTP)] = (resample_fn) resample_common_float_sse;
- c->dsp.resample_linear[FNIDX(FLTP)] = (resample_fn) resample_linear_float_sse;
+ if (HAVE_SSE_EXTERNAL && mm_flags & AV_CPU_FLAG_SSE) {
+ c->dsp.resample_common[FNIDX(FLTP)] = ff_resample_common_float_sse;
+ c->dsp.resample_linear[FNIDX(FLTP)] = ff_resample_linear_float_sse;
}
if (HAVE_SSE2_INLINE && mm_flags & AV_CPU_FLAG_SSE2) {
c->dsp.resample_common[FNIDX(S16P)] = (resample_fn) resample_common_int16_sse2;
@@ -82,8 +80,8 @@ void swresample_dsp_x86_init(ResampleContext *c)
c->dsp.resample_common[FNIDX(DBLP)] = (resample_fn) resample_common_double_sse2;
c->dsp.resample_linear[FNIDX(DBLP)] = (resample_fn) resample_linear_double_sse2;
}
- if (HAVE_AVX_INLINE && mm_flags & AV_CPU_FLAG_AVX) {
- c->dsp.resample_common[FNIDX(FLTP)] = (resample_fn) resample_common_float_avx;
- c->dsp.resample_linear[FNIDX(FLTP)] = (resample_fn) resample_linear_float_avx;
+ if (HAVE_AVX_EXTERNAL && mm_flags & AV_CPU_FLAG_AVX) {
+ c->dsp.resample_common[FNIDX(FLTP)] = ff_resample_common_float_avx;
+ c->dsp.resample_linear[FNIDX(FLTP)] = ff_resample_linear_float_avx;
}
}
--
1.8.5.5
More information about the ffmpeg-devel
mailing list