[FFmpeg-devel] [PATCH] [WIP] swr: rewrite resample_common/linear_float_sse in yasm.
Ronald S. Bultje
rsbultje at gmail.com
Fri Jun 20 01:19:03 CEST 2014
DO NOT MERGE. Speed not tested, avx not yet ported.
---
libswresample/x86/Makefile | 1 +
libswresample/x86/resample.asm | 306 +++++++++++++++++++++++++++++++++++
libswresample/x86/resample_mmx.h | 58 -------
libswresample/x86/resample_x86_dsp.c | 17 +-
4 files changed, 315 insertions(+), 67 deletions(-)
create mode 100644 libswresample/x86/resample.asm
diff --git a/libswresample/x86/Makefile b/libswresample/x86/Makefile
index cc3e65f..cb6371a 100644
--- a/libswresample/x86/Makefile
+++ b/libswresample/x86/Makefile
@@ -1,6 +1,7 @@
YASM-OBJS += x86/swresample_x86.o\
x86/audio_convert.o\
x86/rematrix.o\
+ x86/resample.o\
OBJS += x86/resample_x86_dsp.o\
diff --git a/libswresample/x86/resample.asm b/libswresample/x86/resample.asm
new file mode 100644
index 0000000..10f5a6f
--- /dev/null
+++ b/libswresample/x86/resample.asm
@@ -0,0 +1,306 @@
+;******************************************************************************
+;* Copyright (c) 2012 Michael Niedermayer
+;*
+;* This file is part of FFmpeg.
+;*
+;* FFmpeg is free software; you can redistribute it and/or
+;* modify it under the terms of the GNU Lesser General Public
+;* License as published by the Free Software Foundation; either
+;* version 2.1 of the License, or (at your option) any later version.
+;*
+;* FFmpeg is distributed in the hope that it will be useful,
+;* but WITHOUT ANY WARRANTY; without even the implied warranty of
+;* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+;* Lesser General Public License for more details.
+;*
+;* You should have received a copy of the GNU Lesser General Public
+;* License along with FFmpeg; if not, write to the Free Software
+;* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+;******************************************************************************
+
+%include "libavutil/x86/x86util.asm"
+
+%if ARCH_X86_64
+%define pointer resq
+%else
+%define pointer resd
+%endif
+
+struc ResampleContext
+ .av_class: pointer 1
+ .filter_bank: pointer 1
+ .filter_length: resd 1
+ .filter_alloc: resd 1
+ .ideal_dst_incr: resd 1
+ .dst_incr: resd 1
+ .dst_incr_div: resd 1
+ .dst_incr_mod: resd 1
+ .index: resd 1
+ .frac: resd 1
+ .src_incr: resd 1
+ .compensation_distance: resd 1
+ .phase_shift: resd 1
+ .phase_mask: resd 1
+
+ ; there's a few more here but we only care about the first few
+endstruc
+
+SECTION .text
+
+; int resample_common_float(ResampleContext *ctx, float *dst,
+; const float *src, int size, int update_ctx)
+INIT_XMM sse
+%if UNIX64
+cglobal resample_common_float, 6, 9, 2, ctx, dst, src, phase_shift, index, srcptr, frac, filter, filter_len
+ cmp dword r3m, 0
+ je .nodata
+
+%define stack_size_pushed 24
+%define orig_src_stack qword [rsp+16]
+%define size_stackd dword [rsp+8]
+%define update_ctx_stackd dword [rsp+0]
+ PUSH r2
+ PUSH r3
+ PUSH r4
+ mov phase_shiftd, dword [ctxq+ResampleContext.phase_shift]
+ mov fracd, dword [ctxq+ResampleContext.frac]
+%elif WIN64
+cglobal resample_common_float, 4, 9, 2, phase_shift, dst, src, ctx, index, srcptr, frac, filter, filter_len
+ cmp dword r3m, 0
+ je .nodata
+
+%define stack_size_pushed 16
+%define orig_src_stack qword [rsp+8]
+%define size_stackd dword [rsp]
+%define update_ctx_stackd dword r4m
+ mov ctxq, r0mp
+ PUSH r2
+ PUSH r3
+ mov phase_shiftd, dword [ctxq+ResampleContext.phase_shift]
+ mov fracd, dword [ctxq+ResampleContext.frac]
+%else ; x86-32
+cglobal resample_common_float, 0, 7, 2, filter, filter_len, src, srcptr, dst, ctx, index
+ cmp dword r3m, 0
+ je .nodata
+
+%define stack_size_pushed 4
+%define frac_stackd [rsp]
+%define orig_src_stack r2mp
+%define size_stackd dword r3m
+%define update_ctx_stackd dword r4m
+
+ mov ctxq, r0mp
+ PUSH dword [ctxq+ResampleContext.frac]
+ mov dstq, r1mp
+ mov srcq, r2mp
+%endif
+
+ mov indexd, dword [ctxq+ResampleContext.index]
+.loop:
+ mov filterd, dword [ctxq+ResampleContext.filter_alloc]
+ imul filterd, indexd
+ shl filterq, 2
+ add filterq, [ctxq+ResampleContext.filter_bank]
+
+ mov filter_lend, dword [ctxq+ResampleContext.filter_length]
+ lea srcptrq, [srcq +filter_lenq*4]
+ lea filterq, [filterq+filter_lenq*4]
+ neg filter_lenq
+
+ xorps m0, m0
+.inner_loop:
+ movu m1, [srcptrq+filter_lenq*4]
+ mulps m1, [filterq+filter_lenq*4]
+ addps m0, m1
+ add filter_lenq, mmsize/4
+ js .inner_loop
+ movhlps m1, m0
+ addps m0, m1
+ movss m1, m0
+ shufps m0, m0, q0001
+ addps m0, m1
+ movss [dstq], m0
+
+%if ARCH_X86_32
+ DEFINE_ARGS frac, phase_shift, src, sample_index, dst, ctx, index
+ mov fracd, frac_stackd
+%elif UNIX64
+ DEFINE_ARGS ctx, dst, src, phase_shift, index, srcptr, frac, sample_index, filter_len
+%else ; win64
+ DEFINE_ARGS phase_shift, dst, src, ctx, index, srcptr, frac, sample_index, filter_len
+%endif
+ add fracd, [ctxq+ResampleContext.dst_incr_mod]
+ add indexd, [ctxq+ResampleContext.dst_incr_div]
+ cmp fracd, [ctxq+ResampleContext.src_incr]
+ jl .skip
+ sub fracd, [ctxq+ResampleContext.src_incr]
+ inc indexd
+.skip:
+ mov sample_indexd, indexd
+ and indexd, dword [ctxq+ResampleContext.phase_mask]
+%if ARCH_X86_32
+ mov frac_stackd, fracd
+ mov phase_shiftd, dword [ctxq+ResampleContext.phase_shift]
+%endif
+ shr sample_indexd, phase_shiftb
+ lea srcq, [srcq+sample_indexq*4]
+ add dstq, 4
+ dec size_stackd
+ jg .loop
+
+ cmp update_ctx_stackd, 1
+ jne .end
+ mov [ctxq+ResampleContext.frac ], fracd
+ mov [ctxq+ResampleContext.index], indexd
+.end:
+ mov rax, srcq
+ sub rax, orig_src_stack
+ shr rax, 2
+ ADD rsp, stack_size_pushed
+ RET
+
+.nodata:
+ xor eax, eax
+ RET
+
+; int resample_linear_float(ResampleContext *ctx, float *dst,
+; const float *src, int size, int update_ctx)
+INIT_XMM sse
+%if UNIX64
+cglobal resample_linear_float, 6, 10, 4, ctx, dst, src, phase_shift, index, srcptr, frac, filter1, filter2, filter_len
+ cmp dword r3m, 0
+ je .nodata
+
+%define stack_size_pushed 24
+%define frac_stackd fracd
+%define index_stackd indexd
+%define orig_src_stack qword [rsp+16]
+%define size_stackd dword [rsp+8]
+%define update_ctx_stackd dword [rsp+0]
+ PUSH r2
+ PUSH r3
+ PUSH r4
+ mov phase_shiftd, dword [ctxq+ResampleContext.phase_shift]
+ mov fracd, dword [ctxq+ResampleContext.frac]
+ mov indexd, dword [ctxq+ResampleContext.index]
+%elif WIN64
+cglobal resample_linear_float, 4, 10, 4, phase_shift, dst, src, ctx, index, srcptr, frac, filter1, filter2, filter_len
+ cmp dword r3m, 0
+ je .nodata
+
+%define stack_size_pushed 16
+%define frac_stackd fracd
+%define index_stackd indexd
+%define orig_src_stack qword [rsp+8]
+%define size_stackd dword [rsp+0]
+%define update_ctx_stackd dword r4m
+ mov ctxq, r0mp
+ PUSH r2
+ PUSH r3
+ mov phase_shiftd, dword [ctxq+ResampleContext.phase_shift]
+ mov fracd, dword [ctxq+ResampleContext.frac]
+ mov indexd, dword [ctxq+ResampleContext.index]
+%else ; x86-32
+cglobal resample_linear_float, 0, 7, 2, filter1, filter_len, src, srcptr, dst, ctx, filter2
+ cmp dword r3m, 0
+ je .nodata
+
+%define stack_size_pushed 8
+%define frac_stackd [rsp+4]
+%define index_stackd [rsp+0]
+%define orig_src_stack r2mp
+%define size_stackd dword r3m
+%define update_ctx_stackd dword r4m
+
+ mov ctxq, r0mp
+ PUSH dword [ctxq+ResampleContext.frac]
+ PUSH dword [ctxq+ResampleContext.index]
+ mov dstq, r1mp
+ mov srcq, r2mp
+%endif
+
+.loop:
+ mov filter1d, dword [ctxq+ResampleContext.filter_alloc]
+ mov filter2d, filter1d
+ imul filter1d, index_stackd
+ shl filter1q, 2
+ add filter1q, [ctxq+ResampleContext.filter_bank]
+
+ mov filter_lend, dword [ctxq+ResampleContext.filter_length]
+ lea srcptrq, [srcq +filter_lenq*4]
+ lea filter1q, [filter1q+filter_lenq*4]
+ lea filter2q, [filter1q+filter2q*4]
+ neg filter_lenq
+
+ ; sum val (m0) and v2 (m2)
+ xorps m0, m0
+ xorps m2, m2
+.inner_loop:
+ movu m1, [srcptrq+filter_lenq*4]
+ mulps m3, m1, [filter2q+filter_lenq*4]
+ mulps m1, [filter1q+filter_lenq*4]
+ addps m0, m1
+ addps m2, m3
+ add filter_lenq, mmsize/4
+ js .inner_loop
+
+ ; val += (v2 - val) * (FELEML) frac / c->src_incr;
+ subps m2, m0
+ cvtsi2ss m1, frac_stackd
+ cvtsi2ss m3, dword [ctxq+ResampleContext.src_incr]
+ divss m1, m3
+ shufps m1, m1, q0000
+ mulps m2, m1
+ addps m0, m2
+
+ ; horizontal sum
+ movhlps m1, m0
+ addps m0, m1
+ movss m1, m0
+ shufps m0, m0, q0001
+ addps m0, m1
+ movss [dstq], m0
+
+%if ARCH_X86_32
+ DEFINE_ARGS frac, phase_shift, src, sample_index, dst, ctx, index
+ mov fracd, frac_stackd
+ mov indexd, index_stackd
+%elif UNIX64
+ DEFINE_ARGS ctx, dst, src, phase_shift, index, srcptr, frac, sample_index, filter2, filter_len
+%else ; win64
+ DEFINE_ARGS phase_shift, dst, src, ctx, index, srcptr, frac, sample_index, filter2, filter_len
+%endif
+ add fracd, [ctxq+ResampleContext.dst_incr_mod]
+ add indexd, [ctxq+ResampleContext.dst_incr_div]
+ cmp fracd, [ctxq+ResampleContext.src_incr]
+ jl .skip
+ sub fracd, [ctxq+ResampleContext.src_incr]
+ inc indexd
+.skip:
+ mov sample_indexd, indexd
+ and indexd, dword [ctxq+ResampleContext.phase_mask]
+%if ARCH_X86_32
+ mov frac_stackd, fracd
+ mov index_stackd, indexd
+ mov phase_shiftd, dword [ctxq+ResampleContext.phase_shift]
+%endif
+ shr sample_indexd, phase_shiftb
+ lea srcq, [srcq+sample_indexq*4]
+ add dstq, 4
+ dec size_stackd
+ jg .loop
+
+ cmp update_ctx_stackd, 1
+ jne .end
+ mov [ctxq+ResampleContext.frac ], fracd
+ mov [ctxq+ResampleContext.index], indexd
+.end:
+ mov rax, srcq
+ sub rax, orig_src_stack
+ shr rax, 2
+ ADD rsp, stack_size_pushed
+ RET
+
+.nodata:
+ xor eax, eax
+ RET
diff --git a/libswresample/x86/resample_mmx.h b/libswresample/x86/resample_mmx.h
index a4da1e9..649ba31 100644
--- a/libswresample/x86/resample_mmx.h
+++ b/libswresample/x86/resample_mmx.h
@@ -132,64 +132,6 @@ __asm__ volatile(\
XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
);
-#define COMMON_CORE_FLT_SSE \
- x86_reg len= -4*c->filter_length;\
-__asm__ volatile(\
- "xorps %%xmm0, %%xmm0 \n\t"\
- "1: \n\t"\
- "movups (%1, %0), %%xmm1 \n\t"\
- "mulps (%2, %0), %%xmm1 \n\t"\
- "addps %%xmm1, %%xmm0 \n\t"\
- "add $16, %0 \n\t"\
- " js 1b \n\t"\
- "movhlps %%xmm0, %%xmm1 \n\t"\
- "addps %%xmm1, %%xmm0 \n\t"\
- "movss %%xmm0, %%xmm1 \n\t"\
- "shufps $1, %%xmm0, %%xmm0 \n\t"\
- "addps %%xmm1, %%xmm0 \n\t"\
- "movss %%xmm0, (%3) \n\t"\
- : "+r" (len)\
- : "r" (((uint8_t*)(src+sample_index))-len),\
- "r" (((uint8_t*)filter)-len),\
- "r" (dst+dst_index)\
- XMM_CLOBBERS_ONLY("%xmm0", "%xmm1")\
-);
-
-#define LINEAR_CORE_FLT_SSE \
- x86_reg len= -4*c->filter_length;\
-__asm__ volatile(\
- "xorps %%xmm0, %%xmm0 \n\t"\
- "xorps %%xmm2, %%xmm2 \n\t"\
- "1: \n\t"\
- "movups (%3, %0), %%xmm1 \n\t"\
- "movaps %%xmm1, %%xmm3 \n\t"\
- "mulps (%4, %0), %%xmm1 \n\t"\
- "mulps (%5, %0), %%xmm3 \n\t"\
- "addps %%xmm1, %%xmm0 \n\t"\
- "addps %%xmm3, %%xmm2 \n\t"\
- "add $16, %0 \n\t"\
- " js 1b \n\t"\
- "movhlps %%xmm0, %%xmm1 \n\t"\
- "movhlps %%xmm2, %%xmm3 \n\t"\
- "addps %%xmm1, %%xmm0 \n\t"\
- "addps %%xmm3, %%xmm2 \n\t"\
- "movss %%xmm0, %%xmm1 \n\t"\
- "movss %%xmm2, %%xmm3 \n\t"\
- "shufps $1, %%xmm0, %%xmm0 \n\t"\
- "shufps $1, %%xmm2, %%xmm2 \n\t"\
- "addps %%xmm1, %%xmm0 \n\t"\
- "addps %%xmm3, %%xmm2 \n\t"\
- "movss %%xmm0, %1 \n\t"\
- "movss %%xmm2, %2 \n\t"\
- : "+r" (len),\
- "=m" (val),\
- "=m" (v2)\
- : "r" (((uint8_t*)(src+sample_index))-len),\
- "r" (((uint8_t*)filter)-len),\
- "r" (((uint8_t*)(filter+c->filter_alloc))-len)\
- XMM_CLOBBERS_ONLY("%xmm0", "%xmm1", "%xmm2", "%xmm3")\
-);
-
#define COMMON_CORE_FLT_AVX \
x86_reg len= -4*c->filter_length;\
__asm__ volatile(\
diff --git a/libswresample/x86/resample_x86_dsp.c b/libswresample/x86/resample_x86_dsp.c
index 02a7db5..a16cd87 100644
--- a/libswresample/x86/resample_x86_dsp.c
+++ b/libswresample/x86/resample_x86_dsp.c
@@ -37,12 +37,6 @@
#include "libswresample/resample_template.c"
#undef TEMPLATE_RESAMPLE_S16_MMX2
-#if HAVE_SSE_INLINE
-#define TEMPLATE_RESAMPLE_FLT_SSE
-#include "libswresample/resample_template.c"
-#undef TEMPLATE_RESAMPLE_FLT_SSE
-#endif
-
#if HAVE_SSE2_INLINE
#define TEMPLATE_RESAMPLE_S16_SSE2
#include "libswresample/resample_template.c"
@@ -63,6 +57,11 @@
#endif // HAVE_MMXEXT_INLINE
+int ff_resample_common_float_sse(ResampleContext *c, uint8_t *dst,
+ const uint8_t *src, int sz, int upd);
+int ff_resample_linear_float_sse(ResampleContext *c, uint8_t *dst,
+ const uint8_t *src, int sz, int upd);
+
void swresample_dsp_x86_init(ResampleContext *c)
{
int av_unused mm_flags = av_get_cpu_flags();
@@ -72,9 +71,9 @@ void swresample_dsp_x86_init(ResampleContext *c)
c->dsp.resample_common[FNIDX(S16P)] = (resample_fn) resample_common_int16_mmx2;
c->dsp.resample_linear[FNIDX(S16P)] = (resample_fn) resample_linear_int16_mmx2;
}
- if (HAVE_SSE_INLINE && mm_flags & AV_CPU_FLAG_SSE) {
- c->dsp.resample_common[FNIDX(FLTP)] = (resample_fn) resample_common_float_sse;
- c->dsp.resample_linear[FNIDX(FLTP)] = (resample_fn) resample_linear_float_sse;
+ if (HAVE_SSE_EXTERNAL && mm_flags & AV_CPU_FLAG_SSE) {
+ c->dsp.resample_common[FNIDX(FLTP)] = ff_resample_common_float_sse;
+ c->dsp.resample_linear[FNIDX(FLTP)] = ff_resample_linear_float_sse;
}
if (HAVE_SSE2_INLINE && mm_flags & AV_CPU_FLAG_SSE2) {
c->dsp.resample_common[FNIDX(S16P)] = (resample_fn) resample_common_int16_sse2;
--
1.8.5.5
More information about the ffmpeg-devel
mailing list