[Mplayer-cvslog] CVS: main/postproc rgb2rgb.c, 1.61, 1.62 rgb2rgb_template.c, 1.71, 1.72 swscale-example.c, 1.4, 1.5 swscale.c, 1.146, 1.147 swscale_template.c, 1.107, 1.108 yuv2rgb.c, 1.26, 1.27 yuv2rgb_template.c, 1.15, 1.16
Aurelien Jacobs CVS
syncmail at mplayerhq.hu
Thu Oct 21 13:55:24 CEST 2004
- Previous message: [Mplayer-cvslog] CVS: main/libmpcodecs pullup.c, 1.19, 1.20 vf_decimate.c, 1.1, 1.2 vf_divtc.c, 1.1, 1.2 vf_eq.c, 1.7, 1.8 vf_eq2.c, 1.8, 1.9 vf_filmdint.c, 1.3, 1.4 vf_halfpack.c, 1.5, 1.6 vf_ilpack.c, 1.4, 1.5 vf_ivtc.c, 1.3, 1.4 vf_noise.c, 1.13, 1.14 vf_spp.c, 1.23, 1.24 vf_tfields.c, 1.7, 1.8
- Next message: [Mplayer-cvslog] CVS: main/postproc rgb2rgb.c, 1.61, 1.62 rgb2rgb_template.c, 1.71, 1.72 swscale-example.c, 1.4, 1.5 swscale.c, 1.146, 1.147 swscale_template.c, 1.107, 1.108 yuv2rgb.c, 1.26, 1.27 yuv2rgb_template.c, 1.15, 1.16
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
CVS change done by Aurelien Jacobs CVS
Update of /cvsroot/mplayer/main/postproc
In directory mail:/var2/tmp/cvs-serv9471/postproc
Modified Files:
rgb2rgb.c rgb2rgb_template.c swscale-example.c swscale.c
swscale_template.c yuv2rgb.c yuv2rgb_template.c
Log Message:
adapting existing mmx/mmx2/sse/3dnow optimizations so they work on x86_64
Index: rgb2rgb.c
===================================================================
RCS file: /cvsroot/mplayer/main/postproc/rgb2rgb.c,v
retrieving revision 1.61
retrieving revision 1.62
diff -u -r1.61 -r1.62
--- rgb2rgb.c 26 Apr 2004 19:38:17 -0000 1.61
+++ rgb2rgb.c 21 Oct 2004 11:55:20 -0000 1.62
@@ -11,6 +11,7 @@
#include "../config.h"
#include "rgb2rgb.h"
#include "swscale.h"
+#include "../cpudetect.h"
#include "../mangle.h"
#include "../bswap.h"
#include "../libvo/fastmemcpy.h"
@@ -68,7 +69,7 @@
int srcStride1, int srcStride2,
int srcStride3, int dstStride);
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
static const uint64_t mmx_null __attribute__((aligned(8))) = 0x0000000000000000ULL;
static const uint64_t mmx_one __attribute__((aligned(8))) = 0xFFFFFFFFFFFFFFFFULL;
static const uint64_t mask32b attribute_used __attribute__((aligned(8))) = 0x000000FF000000FFULL;
@@ -152,7 +153,7 @@
#define RENAME(a) a ## _C
#include "rgb2rgb_template.c"
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
//MMX versions
#undef RENAME
@@ -181,7 +182,7 @@
#define RENAME(a) a ## _3DNOW
#include "rgb2rgb_template.c"
-#endif //ARCH_X86
+#endif //ARCH_X86 || ARCH_X86_64
/*
rgb15->rgb16 Original by Strepto/Astral
@@ -191,7 +192,7 @@
*/
void sws_rgb2rgb_init(int flags){
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
if(flags & SWS_CPU_CAPS_MMX2){
rgb15to16= rgb15to16_MMX2;
rgb15to24= rgb15to24_MMX2;
Index: rgb2rgb_template.c
===================================================================
RCS file: /cvsroot/mplayer/main/postproc/rgb2rgb_template.c,v
retrieving revision 1.71
retrieving revision 1.72
diff -u -r1.71 -r1.72
--- rgb2rgb_template.c 21 Sep 2004 17:23:49 -0000 1.71
+++ rgb2rgb_template.c 21 Oct 2004 11:55:20 -0000 1.72
@@ -349,9 +349,9 @@
"pslld $11, %%mm3 \n\t"
"por %%mm3, %%mm0 \n\t"
MOVNTQ" %%mm0, (%0) \n\t"
- "addl $16, %1 \n\t"
- "addl $8, %0 \n\t"
- "cmpl %2, %1 \n\t"
+ "add $16, %1 \n\t"
+ "add $8, %0 \n\t"
+ "cmp %2, %1 \n\t"
" jb 1b \n\t"
: "+r" (d), "+r"(s)
: "r" (mm_end), "m" (mask3216g), "m" (mask3216br), "m" (mul3216)
@@ -509,9 +509,9 @@
"pslld $10, %%mm3 \n\t"
"por %%mm3, %%mm0 \n\t"
MOVNTQ" %%mm0, (%0) \n\t"
- "addl $16, %1 \n\t"
- "addl $8, %0 \n\t"
- "cmpl %2, %1 \n\t"
+ "add $16, %1 \n\t"
+ "add $8, %0 \n\t"
+ "cmp %2, %1 \n\t"
" jb 1b \n\t"
: "+r" (d), "+r"(s)
: "r" (mm_end), "m" (mask3215g), "m" (mask3216br), "m" (mul3215)
@@ -1345,11 +1345,11 @@
#ifdef HAVE_MMX
/* TODO: unroll this loop */
asm volatile (
- "xorl %%eax, %%eax \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t"
".balign 16 \n\t"
"1: \n\t"
- PREFETCH" 32(%0, %%eax) \n\t"
- "movq (%0, %%eax), %%mm0 \n\t"
+ PREFETCH" 32(%0, %%"REG_a") \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
"movq %%mm0, %%mm1 \n\t"
"movq %%mm0, %%mm2 \n\t"
"pslld $16, %%mm0 \n\t"
@@ -1359,12 +1359,12 @@
"pand "MANGLE(mask32b)", %%mm1 \n\t"
"por %%mm0, %%mm2 \n\t"
"por %%mm1, %%mm2 \n\t"
- MOVNTQ" %%mm2, (%1, %%eax) \n\t"
- "addl $8, %%eax \n\t"
- "cmpl %2, %%eax \n\t"
+ MOVNTQ" %%mm2, (%1, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
+ "cmp %2, %%"REG_a" \n\t"
" jb 1b \n\t"
- :: "r" (src), "r"(dst), "r" (src_size-7)
- : "%eax"
+ :: "r" (src), "r"(dst), "r" ((long)src_size-7)
+ : "%"REG_a
);
__asm __volatile(SFENCE:::"memory");
@@ -1391,43 +1391,43 @@
{
unsigned i;
#ifdef HAVE_MMX
- int mmx_size= 23 - src_size;
+ long mmx_size= 23 - src_size;
asm volatile (
"movq "MANGLE(mask24r)", %%mm5 \n\t"
"movq "MANGLE(mask24g)", %%mm6 \n\t"
"movq "MANGLE(mask24b)", %%mm7 \n\t"
".balign 16 \n\t"
"1: \n\t"
- PREFETCH" 32(%1, %%eax) \n\t"
- "movq (%1, %%eax), %%mm0 \n\t" // BGR BGR BG
- "movq (%1, %%eax), %%mm1 \n\t" // BGR BGR BG
- "movq 2(%1, %%eax), %%mm2 \n\t" // R BGR BGR B
+ PREFETCH" 32(%1, %%"REG_a") \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
+ "movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG
+ "movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B
"psllq $16, %%mm0 \n\t" // 00 BGR BGR
"pand %%mm5, %%mm0 \n\t"
"pand %%mm6, %%mm1 \n\t"
"pand %%mm7, %%mm2 \n\t"
"por %%mm0, %%mm1 \n\t"
"por %%mm2, %%mm1 \n\t"
- "movq 6(%1, %%eax), %%mm0 \n\t" // BGR BGR BG
- MOVNTQ" %%mm1, (%2, %%eax) \n\t" // RGB RGB RG
- "movq 8(%1, %%eax), %%mm1 \n\t" // R BGR BGR B
- "movq 10(%1, %%eax), %%mm2 \n\t" // GR BGR BGR
+ "movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
+ MOVNTQ" %%mm1, (%2, %%"REG_a")\n\t" // RGB RGB RG
+ "movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B
+ "movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR
"pand %%mm7, %%mm0 \n\t"
"pand %%mm5, %%mm1 \n\t"
"pand %%mm6, %%mm2 \n\t"
"por %%mm0, %%mm1 \n\t"
"por %%mm2, %%mm1 \n\t"
- "movq 14(%1, %%eax), %%mm0 \n\t" // R BGR BGR B
- MOVNTQ" %%mm1, 8(%2, %%eax) \n\t" // B RGB RGB R
- "movq 16(%1, %%eax), %%mm1 \n\t" // GR BGR BGR
- "movq 18(%1, %%eax), %%mm2 \n\t" // BGR BGR BG
+ "movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B
+ MOVNTQ" %%mm1, 8(%2, %%"REG_a")\n\t" // B RGB RGB R
+ "movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR
+ "movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG
"pand %%mm6, %%mm0 \n\t"
"pand %%mm7, %%mm1 \n\t"
"pand %%mm5, %%mm2 \n\t"
"por %%mm0, %%mm1 \n\t"
"por %%mm2, %%mm1 \n\t"
- MOVNTQ" %%mm1, 16(%2, %%eax) \n\t"
- "addl $24, %%eax \n\t"
+ MOVNTQ" %%mm1, 16(%2, %%"REG_a")\n\t"
+ "add $24, %%"REG_a" \n\t"
" js 1b \n\t"
: "+a" (mmx_size)
: "r" (src-mmx_size), "r"(dst-mmx_size)
@@ -1465,20 +1465,20 @@
#ifdef HAVE_MMX
//FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
asm volatile(
- "xorl %%eax, %%eax \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t"
".balign 16 \n\t"
"1: \n\t"
- PREFETCH" 32(%1, %%eax, 2) \n\t"
- PREFETCH" 32(%2, %%eax) \n\t"
- PREFETCH" 32(%3, %%eax) \n\t"
- "movq (%2, %%eax), %%mm0 \n\t" // U(0)
+ PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
+ PREFETCH" 32(%2, %%"REG_a") \n\t"
+ PREFETCH" 32(%3, %%"REG_a") \n\t"
+ "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
"movq %%mm0, %%mm2 \n\t" // U(0)
- "movq (%3, %%eax), %%mm1 \n\t" // V(0)
+ "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
"punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
"punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
- "movq (%1, %%eax,2), %%mm3 \n\t" // Y(0)
- "movq 8(%1, %%eax,2), %%mm5 \n\t" // Y(8)
+ "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
+ "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
"movq %%mm3, %%mm4 \n\t" // Y(0)
"movq %%mm5, %%mm6 \n\t" // Y(8)
"punpcklbw %%mm0, %%mm3 \n\t" // YUYV YUYV(0)
@@ -1486,16 +1486,16 @@
"punpcklbw %%mm2, %%mm5 \n\t" // YUYV YUYV(8)
"punpckhbw %%mm2, %%mm6 \n\t" // YUYV YUYV(12)
- MOVNTQ" %%mm3, (%0, %%eax, 4) \n\t"
- MOVNTQ" %%mm4, 8(%0, %%eax, 4) \n\t"
- MOVNTQ" %%mm5, 16(%0, %%eax, 4) \n\t"
- MOVNTQ" %%mm6, 24(%0, %%eax, 4) \n\t"
+ MOVNTQ" %%mm3, (%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm5, 16(%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t"
- "addl $8, %%eax \n\t"
- "cmpl %4, %%eax \n\t"
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
" jb 1b \n\t"
- ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
- : "%eax"
+ ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" ((long)chromWidth)
+ : "%"REG_a
);
#else
@@ -1618,20 +1618,20 @@
#ifdef HAVE_MMX
//FIXME handle 2 lines a once (fewer prefetch, reuse some chrom, but very likely limited by mem anyway)
asm volatile(
- "xorl %%eax, %%eax \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t"
".balign 16 \n\t"
"1: \n\t"
- PREFETCH" 32(%1, %%eax, 2) \n\t"
- PREFETCH" 32(%2, %%eax) \n\t"
- PREFETCH" 32(%3, %%eax) \n\t"
- "movq (%2, %%eax), %%mm0 \n\t" // U(0)
+ PREFETCH" 32(%1, %%"REG_a", 2) \n\t"
+ PREFETCH" 32(%2, %%"REG_a") \n\t"
+ PREFETCH" 32(%3, %%"REG_a") \n\t"
+ "movq (%2, %%"REG_a"), %%mm0 \n\t" // U(0)
"movq %%mm0, %%mm2 \n\t" // U(0)
- "movq (%3, %%eax), %%mm1 \n\t" // V(0)
+ "movq (%3, %%"REG_a"), %%mm1 \n\t" // V(0)
"punpcklbw %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
"punpckhbw %%mm1, %%mm2 \n\t" // UVUV UVUV(8)
- "movq (%1, %%eax,2), %%mm3 \n\t" // Y(0)
- "movq 8(%1, %%eax,2), %%mm5 \n\t" // Y(8)
+ "movq (%1, %%"REG_a",2), %%mm3 \n\t" // Y(0)
+ "movq 8(%1, %%"REG_a",2), %%mm5 \n\t" // Y(8)
"movq %%mm0, %%mm4 \n\t" // Y(0)
"movq %%mm2, %%mm6 \n\t" // Y(8)
"punpcklbw %%mm3, %%mm0 \n\t" // YUYV YUYV(0)
@@ -1639,16 +1639,16 @@
"punpcklbw %%mm5, %%mm2 \n\t" // YUYV YUYV(8)
"punpckhbw %%mm5, %%mm6 \n\t" // YUYV YUYV(12)
- MOVNTQ" %%mm0, (%0, %%eax, 4) \n\t"
- MOVNTQ" %%mm4, 8(%0, %%eax, 4) \n\t"
- MOVNTQ" %%mm2, 16(%0, %%eax, 4) \n\t"
- MOVNTQ" %%mm6, 24(%0, %%eax, 4) \n\t"
+ MOVNTQ" %%mm0, (%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm4, 8(%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm2, 16(%0, %%"REG_a", 4)\n\t"
+ MOVNTQ" %%mm6, 24(%0, %%"REG_a", 4)\n\t"
- "addl $8, %%eax \n\t"
- "cmpl %4, %%eax \n\t"
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
" jb 1b \n\t"
- ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" (chromWidth)
- : "%eax"
+ ::"r"(dst), "r"(ysrc), "r"(usrc), "r"(vsrc), "g" ((long)chromWidth)
+ : "%"REG_a
);
#else
//FIXME adapt the alpha asm code from yv12->yuy2
@@ -1740,14 +1740,14 @@
{
#ifdef HAVE_MMX
asm volatile(
- "xorl %%eax, %%eax \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t"
"pcmpeqw %%mm7, %%mm7 \n\t"
"psrlw $8, %%mm7 \n\t" // FF,00,FF,00...
".balign 16 \n\t"
"1: \n\t"
- PREFETCH" 64(%0, %%eax, 4) \n\t"
- "movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
- "movq 8(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(4)
+ PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
+ "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
+ "movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4)
"movq %%mm0, %%mm2 \n\t" // YUYV YUYV(0)
"movq %%mm1, %%mm3 \n\t" // YUYV YUYV(4)
"psrlw $8, %%mm0 \n\t" // U0V0 U0V0(0)
@@ -1757,10 +1757,10 @@
"packuswb %%mm1, %%mm0 \n\t" // UVUV UVUV(0)
"packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(0)
- MOVNTQ" %%mm2, (%1, %%eax, 2) \n\t"
+ MOVNTQ" %%mm2, (%1, %%"REG_a", 2)\n\t"
- "movq 16(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(8)
- "movq 24(%0, %%eax, 4), %%mm2 \n\t" // YUYV YUYV(12)
+ "movq 16(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(8)
+ "movq 24(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(12)
"movq %%mm1, %%mm3 \n\t" // YUYV YUYV(8)
"movq %%mm2, %%mm4 \n\t" // YUYV YUYV(12)
"psrlw $8, %%mm1 \n\t" // U0V0 U0V0(8)
@@ -1770,7 +1770,7 @@
"packuswb %%mm2, %%mm1 \n\t" // UVUV UVUV(8)
"packuswb %%mm4, %%mm3 \n\t" // YYYY YYYY(8)
- MOVNTQ" %%mm3, 8(%1, %%eax, 2) \n\t"
+ MOVNTQ" %%mm3, 8(%1, %%"REG_a", 2)\n\t"
"movq %%mm0, %%mm2 \n\t" // UVUV UVUV(0)
"movq %%mm1, %%mm3 \n\t" // UVUV UVUV(8)
@@ -1781,28 +1781,28 @@
"packuswb %%mm1, %%mm0 \n\t" // VVVV VVVV(0)
"packuswb %%mm3, %%mm2 \n\t" // UUUU UUUU(0)
- MOVNTQ" %%mm0, (%3, %%eax) \n\t"
- MOVNTQ" %%mm2, (%2, %%eax) \n\t"
+ MOVNTQ" %%mm0, (%3, %%"REG_a") \n\t"
+ MOVNTQ" %%mm2, (%2, %%"REG_a") \n\t"
- "addl $8, %%eax \n\t"
- "cmpl %4, %%eax \n\t"
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
" jb 1b \n\t"
- ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
- : "memory", "%eax"
+ ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" ((long)chromWidth)
+ : "memory", "%"REG_a
);
ydst += lumStride;
src += srcStride;
asm volatile(
- "xorl %%eax, %%eax \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t"
".balign 16 \n\t"
"1: \n\t"
- PREFETCH" 64(%0, %%eax, 4) \n\t"
- "movq (%0, %%eax, 4), %%mm0 \n\t" // YUYV YUYV(0)
- "movq 8(%0, %%eax, 4), %%mm1 \n\t" // YUYV YUYV(4)
- "movq 16(%0, %%eax, 4), %%mm2 \n\t" // YUYV YUYV(8)
- "movq 24(%0, %%eax, 4), %%mm3 \n\t" // YUYV YUYV(12)
+ PREFETCH" 64(%0, %%"REG_a", 4) \n\t"
+ "movq (%0, %%"REG_a", 4), %%mm0 \n\t" // YUYV YUYV(0)
+ "movq 8(%0, %%"REG_a", 4), %%mm1\n\t" // YUYV YUYV(4)
+ "movq 16(%0, %%"REG_a", 4), %%mm2\n\t" // YUYV YUYV(8)
+ "movq 24(%0, %%"REG_a", 4), %%mm3\n\t" // YUYV YUYV(12)
"pand %%mm7, %%mm0 \n\t" // Y0Y0 Y0Y0(0)
"pand %%mm7, %%mm1 \n\t" // Y0Y0 Y0Y0(4)
"pand %%mm7, %%mm2 \n\t" // Y0Y0 Y0Y0(8)
@@ -1810,15 +1810,15 @@
"packuswb %%mm1, %%mm0 \n\t" // YYYY YYYY(0)
"packuswb %%mm3, %%mm2 \n\t" // YYYY YYYY(8)
- MOVNTQ" %%mm0, (%1, %%eax, 2) \n\t"
- MOVNTQ" %%mm2, 8(%1, %%eax, 2) \n\t"
+ MOVNTQ" %%mm0, (%1, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm2, 8(%1, %%"REG_a", 2)\n\t"
- "addl $8, %%eax \n\t"
- "cmpl %4, %%eax \n\t"
+ "add $8, %%"REG_a" \n\t"
+ "cmp %4, %%"REG_a" \n\t"
" jb 1b \n\t"
- ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" (chromWidth)
- : "memory", "%eax"
+ ::"r"(src), "r"(ydst), "r"(udst), "r"(vdst), "g" ((long)chromWidth)
+ : "memory", "%"REG_a
);
#else
unsigned i;
@@ -1877,16 +1877,16 @@
for(y=1; y<srcHeight; y++){
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
- const int mmxSize= srcWidth&~15;
+ const long mmxSize= srcWidth&~15;
asm volatile(
- "movl %4, %%eax \n\t"
+ "mov %4, %%"REG_a" \n\t"
"1: \n\t"
- "movq (%0, %%eax), %%mm0 \n\t"
- "movq (%1, %%eax), %%mm1 \n\t"
- "movq 1(%0, %%eax), %%mm2 \n\t"
- "movq 1(%1, %%eax), %%mm3 \n\t"
- "movq -1(%0, %%eax), %%mm4 \n\t"
- "movq -1(%1, %%eax), %%mm5 \n\t"
+ "movq (%0, %%"REG_a"), %%mm0 \n\t"
+ "movq (%1, %%"REG_a"), %%mm1 \n\t"
+ "movq 1(%0, %%"REG_a"), %%mm2 \n\t"
+ "movq 1(%1, %%"REG_a"), %%mm3 \n\t"
+ "movq -1(%0, %%"REG_a"), %%mm4 \n\t"
+ "movq -1(%1, %%"REG_a"), %%mm5 \n\t"
PAVGB" %%mm0, %%mm5 \n\t"
PAVGB" %%mm0, %%mm3 \n\t"
PAVGB" %%mm0, %%mm5 \n\t"
@@ -1902,22 +1902,22 @@
"punpcklbw %%mm2, %%mm4 \n\t"
"punpckhbw %%mm2, %%mm6 \n\t"
#if 1
- MOVNTQ" %%mm5, (%2, %%eax, 2) \n\t"
- MOVNTQ" %%mm7, 8(%2, %%eax, 2) \n\t"
- MOVNTQ" %%mm4, (%3, %%eax, 2) \n\t"
- MOVNTQ" %%mm6, 8(%3, %%eax, 2) \n\t"
-#else
- "movq %%mm5, (%2, %%eax, 2) \n\t"
- "movq %%mm7, 8(%2, %%eax, 2) \n\t"
- "movq %%mm4, (%3, %%eax, 2) \n\t"
- "movq %%mm6, 8(%3, %%eax, 2) \n\t"
+ MOVNTQ" %%mm5, (%2, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm7, 8(%2, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm4, (%3, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm6, 8(%3, %%"REG_a", 2)\n\t"
+#else
+ "movq %%mm5, (%2, %%"REG_a", 2) \n\t"
+ "movq %%mm7, 8(%2, %%"REG_a", 2)\n\t"
+ "movq %%mm4, (%3, %%"REG_a", 2) \n\t"
+ "movq %%mm6, 8(%3, %%"REG_a", 2)\n\t"
#endif
- "addl $8, %%eax \n\t"
+ "add $8, %%"REG_a" \n\t"
" js 1b \n\t"
:: "r" (src + mmxSize ), "r" (src + srcStride + mmxSize ),
"r" (dst + mmxSize*2), "r" (dst + dstStride + mmxSize*2),
"g" (-mmxSize)
- : "%eax"
+ : "%"REG_a
);
#else
@@ -2107,20 +2107,20 @@
for(i=0; i<2; i++)
{
asm volatile(
- "movl %2, %%eax \n\t"
+ "mov %2, %%"REG_a" \n\t"
"movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
"movq "MANGLE(w1111)", %%mm5 \n\t"
"pxor %%mm7, %%mm7 \n\t"
- "leal (%%eax, %%eax, 2), %%ebx \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
".balign 16 \n\t"
"1: \n\t"
- PREFETCH" 64(%0, %%ebx) \n\t"
- "movd (%0, %%ebx), %%mm0 \n\t"
- "movd 3(%0, %%ebx), %%mm1 \n\t"
+ PREFETCH" 64(%0, %%"REG_b") \n\t"
+ "movd (%0, %%"REG_b"), %%mm0 \n\t"
+ "movd 3(%0, %%"REG_b"), %%mm1 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
- "movd 6(%0, %%ebx), %%mm2 \n\t"
- "movd 9(%0, %%ebx), %%mm3 \n\t"
+ "movd 6(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 9(%0, %%"REG_b"), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
"punpcklbw %%mm7, %%mm3 \n\t"
"pmaddwd %%mm6, %%mm0 \n\t"
@@ -2140,12 +2140,12 @@
"packssdw %%mm2, %%mm0 \n\t"
"psraw $7, %%mm0 \n\t"
- "movd 12(%0, %%ebx), %%mm4 \n\t"
- "movd 15(%0, %%ebx), %%mm1 \n\t"
+ "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
+ "movd 15(%0, %%"REG_b"), %%mm1 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
- "movd 18(%0, %%ebx), %%mm2 \n\t"
- "movd 21(%0, %%ebx), %%mm3 \n\t"
+ "movd 18(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 21(%0, %%"REG_b"), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
"punpcklbw %%mm7, %%mm3 \n\t"
"pmaddwd %%mm6, %%mm4 \n\t"
@@ -2162,39 +2162,39 @@
"packssdw %%mm3, %%mm2 \n\t"
"pmaddwd %%mm5, %%mm4 \n\t"
"pmaddwd %%mm5, %%mm2 \n\t"
- "addl $24, %%ebx \n\t"
+ "add $24, %%"REG_b" \n\t"
"packssdw %%mm2, %%mm4 \n\t"
"psraw $7, %%mm4 \n\t"
"packuswb %%mm4, %%mm0 \n\t"
"paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
- MOVNTQ" %%mm0, (%1, %%eax) \n\t"
- "addl $8, %%eax \n\t"
+ MOVNTQ" %%mm0, (%1, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
" js 1b \n\t"
- : : "r" (src+width*3), "r" (ydst+width), "g" (-width)
- : "%eax", "%ebx"
+ : : "r" (src+width*3), "r" (ydst+width), "g" ((long)-width)
+ : "%"REG_a, "%"REG_b
);
ydst += lumStride;
src += srcStride;
}
src -= srcStride*2;
asm volatile(
- "movl %4, %%eax \n\t"
+ "mov %4, %%"REG_a" \n\t"
"movq "MANGLE(w1111)", %%mm5 \n\t"
"movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
"pxor %%mm7, %%mm7 \n\t"
- "leal (%%eax, %%eax, 2), %%ebx \n\t"
- "addl %%ebx, %%ebx \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
+ "add %%"REG_b", %%"REG_b" \n\t"
".balign 16 \n\t"
"1: \n\t"
- PREFETCH" 64(%0, %%ebx) \n\t"
- PREFETCH" 64(%1, %%ebx) \n\t"
+ PREFETCH" 64(%0, %%"REG_b") \n\t"
+ PREFETCH" 64(%1, %%"REG_b") \n\t"
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
- "movq (%0, %%ebx), %%mm0 \n\t"
- "movq (%1, %%ebx), %%mm1 \n\t"
- "movq 6(%0, %%ebx), %%mm2 \n\t"
- "movq 6(%1, %%ebx), %%mm3 \n\t"
+ "movq (%0, %%"REG_b"), %%mm0 \n\t"
+ "movq (%1, %%"REG_b"), %%mm1 \n\t"
+ "movq 6(%0, %%"REG_b"), %%mm2 \n\t"
+ "movq 6(%1, %%"REG_b"), %%mm3 \n\t"
PAVGB" %%mm1, %%mm0 \n\t"
PAVGB" %%mm3, %%mm2 \n\t"
"movq %%mm0, %%mm1 \n\t"
@@ -2206,10 +2206,10 @@
"punpcklbw %%mm7, %%mm0 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
#else
- "movd (%0, %%ebx), %%mm0 \n\t"
- "movd (%1, %%ebx), %%mm1 \n\t"
- "movd 3(%0, %%ebx), %%mm2 \n\t"
- "movd 3(%1, %%ebx), %%mm3 \n\t"
+ "movd (%0, %%"REG_b"), %%mm0 \n\t"
+ "movd (%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 3(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 3(%1, %%"REG_b"), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
@@ -2217,10 +2217,10 @@
"paddw %%mm1, %%mm0 \n\t"
"paddw %%mm3, %%mm2 \n\t"
"paddw %%mm2, %%mm0 \n\t"
- "movd 6(%0, %%ebx), %%mm4 \n\t"
- "movd 6(%1, %%ebx), %%mm1 \n\t"
- "movd 9(%0, %%ebx), %%mm2 \n\t"
- "movd 9(%1, %%ebx), %%mm3 \n\t"
+ "movd 6(%0, %%"REG_b"), %%mm4 \n\t"
+ "movd 6(%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 9(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 9(%1, %%"REG_b"), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
@@ -2252,10 +2252,10 @@
"psraw $7, %%mm0 \n\t"
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
- "movq 12(%0, %%ebx), %%mm4 \n\t"
- "movq 12(%1, %%ebx), %%mm1 \n\t"
- "movq 18(%0, %%ebx), %%mm2 \n\t"
- "movq 18(%1, %%ebx), %%mm3 \n\t"
+ "movq 12(%0, %%"REG_b"), %%mm4 \n\t"
+ "movq 12(%1, %%"REG_b"), %%mm1 \n\t"
+ "movq 18(%0, %%"REG_b"), %%mm2 \n\t"
+ "movq 18(%1, %%"REG_b"), %%mm3 \n\t"
PAVGB" %%mm1, %%mm4 \n\t"
PAVGB" %%mm3, %%mm2 \n\t"
"movq %%mm4, %%mm1 \n\t"
@@ -2267,10 +2267,10 @@
"punpcklbw %%mm7, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
#else
- "movd 12(%0, %%ebx), %%mm4 \n\t"
- "movd 12(%1, %%ebx), %%mm1 \n\t"
- "movd 15(%0, %%ebx), %%mm2 \n\t"
- "movd 15(%1, %%ebx), %%mm3 \n\t"
+ "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
+ "movd 12(%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 15(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 15(%1, %%"REG_b"), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
@@ -2278,10 +2278,10 @@
"paddw %%mm1, %%mm4 \n\t"
"paddw %%mm3, %%mm2 \n\t"
"paddw %%mm2, %%mm4 \n\t"
- "movd 18(%0, %%ebx), %%mm5 \n\t"
- "movd 18(%1, %%ebx), %%mm1 \n\t"
- "movd 21(%0, %%ebx), %%mm2 \n\t"
- "movd 21(%1, %%ebx), %%mm3 \n\t"
+ "movd 18(%0, %%"REG_b"), %%mm5 \n\t"
+ "movd 18(%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 21(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 21(%1, %%"REG_b"), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
@@ -2310,7 +2310,7 @@
"packssdw %%mm3, %%mm1 \n\t"
"pmaddwd %%mm5, %%mm4 \n\t"
"pmaddwd %%mm5, %%mm1 \n\t"
- "addl $24, %%ebx \n\t"
+ "add $24, %%"REG_b" \n\t"
"packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
"psraw $7, %%mm4 \n\t"
@@ -2319,14 +2319,13 @@
"punpckhdq %%mm4, %%mm1 \n\t"
"packsswb %%mm1, %%mm0 \n\t"
"paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
-
- "movd %%mm0, (%2, %%eax) \n\t"
+ "movd %%mm0, (%2, %%"REG_a") \n\t"
"punpckhdq %%mm0, %%mm0 \n\t"
- "movd %%mm0, (%3, %%eax) \n\t"
- "addl $4, %%eax \n\t"
+ "movd %%mm0, (%3, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
" js 1b \n\t"
- : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" (-chromWidth)
- : "%eax", "%ebx"
+ : : "r" (src+chromWidth*6), "r" (src+srcStride+chromWidth*6), "r" (udst+chromWidth), "r" (vdst+chromWidth), "g" ((long)-chromWidth)
+ : "%"REG_a, "%"REG_b
);
udst += chromStride;
@@ -2403,48 +2402,48 @@
#ifdef HAVE_MMX
#ifdef HAVE_SSE2
asm(
- "xorl %%eax, %%eax \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t"
"1: \n\t"
- PREFETCH" 64(%1, %%eax) \n\t"
- PREFETCH" 64(%2, %%eax) \n\t"
- "movdqa (%1, %%eax), %%xmm0 \n\t"
- "movdqa (%1, %%eax), %%xmm1 \n\t"
- "movdqa (%2, %%eax), %%xmm2 \n\t"
+ PREFETCH" 64(%1, %%"REG_a") \n\t"
+ PREFETCH" 64(%2, %%"REG_a") \n\t"
+ "movdqa (%1, %%"REG_a"), %%xmm0 \n\t"
+ "movdqa (%1, %%"REG_a"), %%xmm1 \n\t"
+ "movdqa (%2, %%"REG_a"), %%xmm2 \n\t"
"punpcklbw %%xmm2, %%xmm0 \n\t"
"punpckhbw %%xmm2, %%xmm1 \n\t"
- "movntdq %%xmm0, (%0, %%eax, 2) \n\t"
- "movntdq %%xmm1, 16(%0, %%eax, 2)\n\t"
- "addl $16, %%eax \n\t"
- "cmpl %3, %%eax \n\t"
+ "movntdq %%xmm0, (%0, %%"REG_a", 2)\n\t"
+ "movntdq %%xmm1, 16(%0, %%"REG_a", 2)\n\t"
+ "add $16, %%"REG_a" \n\t"
+ "cmp %3, %%"REG_a" \n\t"
" jb 1b \n\t"
- ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
- : "memory", "%eax"
+ ::"r"(dest), "r"(src1), "r"(src2), "r" ((long)width-15)
+ : "memory", "%"REG_a""
);
#else
asm(
- "xorl %%eax, %%eax \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t"
"1: \n\t"
- PREFETCH" 64(%1, %%eax) \n\t"
- PREFETCH" 64(%2, %%eax) \n\t"
- "movq (%1, %%eax), %%mm0 \n\t"
- "movq 8(%1, %%eax), %%mm2 \n\t"
+ PREFETCH" 64(%1, %%"REG_a") \n\t"
+ PREFETCH" 64(%2, %%"REG_a") \n\t"
+ "movq (%1, %%"REG_a"), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a"), %%mm2 \n\t"
"movq %%mm0, %%mm1 \n\t"
"movq %%mm2, %%mm3 \n\t"
- "movq (%2, %%eax), %%mm4 \n\t"
- "movq 8(%2, %%eax), %%mm5 \n\t"
+ "movq (%2, %%"REG_a"), %%mm4 \n\t"
+ "movq 8(%2, %%"REG_a"), %%mm5 \n\t"
"punpcklbw %%mm4, %%mm0 \n\t"
"punpckhbw %%mm4, %%mm1 \n\t"
"punpcklbw %%mm5, %%mm2 \n\t"
"punpckhbw %%mm5, %%mm3 \n\t"
- MOVNTQ" %%mm0, (%0, %%eax, 2) \n\t"
- MOVNTQ" %%mm1, 8(%0, %%eax, 2) \n\t"
- MOVNTQ" %%mm2, 16(%0, %%eax, 2) \n\t"
- MOVNTQ" %%mm3, 24(%0, %%eax, 2) \n\t"
- "addl $16, %%eax \n\t"
- "cmpl %3, %%eax \n\t"
+ MOVNTQ" %%mm0, (%0, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm1, 8(%0, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm2, 16(%0, %%"REG_a", 2)\n\t"
+ MOVNTQ" %%mm3, 24(%0, %%"REG_a", 2)\n\t"
+ "add $16, %%"REG_a" \n\t"
+ "cmp %3, %%"REG_a" \n\t"
" jb 1b \n\t"
- ::"r"(dest), "r"(src1), "r"(src2), "r" (width-15)
- : "memory", "%eax"
+ ::"r"(dest), "r"(src1), "r"(src2), "r" ((long)width-15)
+ : "memory", "%"REG_a
);
#endif
for(w= (width&(~15)); w < width; w++)
@@ -2582,7 +2581,7 @@
int srcStride1, int srcStride2,
int srcStride3, int dstStride)
{
- unsigned y,x,w,h;
+ unsigned long y,x,w,h;
w=width/2; h=height;
for(y=0;y<h;y++){
const uint8_t* yp=src1+srcStride1*y;
Index: swscale-example.c
===================================================================
RCS file: /cvsroot/mplayer/main/postproc/swscale-example.c,v
retrieving revision 1.4
retrieving revision 1.5
diff -u -r1.4 -r1.5
--- swscale-example.c 23 May 2003 20:12:44 -0000 1.4
+++ swscale-example.c 21 Oct 2004 11:55:20 -0000 1.5
@@ -104,7 +104,7 @@
sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride);
sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride);
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
asm volatile ("emms\n\t");
#endif
@@ -199,14 +199,14 @@
rgb_data[ x + y*4*W]= random();
}
}
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
sws_rgb2rgb_init(SWS_CPU_CAPS_MMX*0);
#else
sws_rgb2rgb_init(0);
#endif
sws_scale(sws, rgb_src, rgb_stride, 0, H , src, stride);
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
asm volatile ("emms\n\t");
#endif
Index: swscale.c
===================================================================
RCS file: /cvsroot/mplayer/main/postproc/swscale.c,v
retrieving revision 1.146
retrieving revision 1.147
diff -u -r1.146 -r1.147
--- swscale.c 5 Oct 2004 19:11:00 -0000 1.146
+++ swscale.c 21 Oct 2004 11:55:20 -0000 1.147
@@ -145,7 +145,7 @@
#define MIN(a,b) ((a) > (b) ? (b) : (a))
#define MAX(a,b) ((a) < (b) ? (b) : (a))
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
static uint64_t attribute_used __attribute__((aligned(8))) bF8= 0xF8F8F8F8F8F8F8F8LL;
static uint64_t attribute_used __attribute__((aligned(8))) bFC= 0xFCFCFCFCFCFCFCFCLL;
static uint64_t __attribute__((aligned(8))) w10= 0x0010001000100010LL;
@@ -204,7 +204,7 @@
extern const uint8_t dither_8x8_73[8][8];
extern const uint8_t dither_8x8_220[8][8];
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
void in_asm_used_var_warning_killer()
{
volatile int i= bF8+bFC+w10+
@@ -679,7 +679,7 @@
#endif //HAVE_ALTIVEC
#endif //ARCH_POWERPC
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
#if (defined (HAVE_MMX) && !defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
#define COMPILE_MMX
@@ -692,7 +692,7 @@
#if (defined (HAVE_3DNOW) && !defined (HAVE_MMX2)) || defined (RUNTIME_CPUDETECT)
#define COMPILE_3DNOW
#endif
-#endif //ARCH_X86
+#endif //ARCH_X86 || ARCH_X86_64
#undef HAVE_MMX
#undef HAVE_MMX2
@@ -716,7 +716,7 @@
#endif
#endif //ARCH_POWERPC
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
//X86 versions
/*
@@ -758,7 +758,7 @@
#include "swscale_template.c"
#endif
-#endif //ARCH_X86
+#endif //ARCH_X86 || ARCH_X86_64
// minor note: the HAVE_xyz is messed up after that line so don't use it
@@ -783,7 +783,7 @@
int minFilterSize;
double *filter=NULL;
double *filter2=NULL;
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
if(flags & SWS_CPU_CAPS_MMX)
asm volatile("emms\n\t"::: "memory"); //FIXME this shouldnt be required but it IS (even for non mmx versions)
#endif
@@ -1142,17 +1142,17 @@
free(filter);
}
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
static void initMMX2HScaler(int dstW, int xInc, uint8_t *funnyCode, int16_t *filter, int32_t *filterPos, int numSplits)
{
uint8_t *fragmentA;
- int imm8OfPShufW1A;
- int imm8OfPShufW2A;
- int fragmentLengthA;
+ long imm8OfPShufW1A;
+ long imm8OfPShufW2A;
+ long fragmentLengthA;
uint8_t *fragmentB;
- int imm8OfPShufW1B;
- int imm8OfPShufW2B;
- int fragmentLengthB;
+ long imm8OfPShufW1B;
+ long imm8OfPShufW2B;
+ long fragmentLengthB;
int fragmentPos;
int xpos, i;
@@ -1165,9 +1165,9 @@
"jmp 9f \n\t"
// Begin
"0: \n\t"
- "movq (%%edx, %%eax), %%mm3 \n\t"
- "movd (%%ecx, %%esi), %%mm0 \n\t"
- "movd 1(%%ecx, %%esi), %%mm1 \n\t"
+ "movq (%%"REG_d", %%"REG_a"), %%mm3\n\t"
+ "movd (%%"REG_c", %%"REG_S"), %%mm0\n\t"
+ "movd 1(%%"REG_c", %%"REG_S"), %%mm1\n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"pshufw $0xFF, %%mm1, %%mm1 \n\t"
@@ -1175,26 +1175,26 @@
"pshufw $0xFF, %%mm0, %%mm0 \n\t"
"2: \n\t"
"psubw %%mm1, %%mm0 \n\t"
- "movl 8(%%ebx, %%eax), %%esi \n\t"
+ "mov 8(%%"REG_b", %%"REG_a"), %%"REG_S"\n\t"
"pmullw %%mm3, %%mm0 \n\t"
"psllw $7, %%mm1 \n\t"
"paddw %%mm1, %%mm0 \n\t"
- "movq %%mm0, (%%edi, %%eax) \n\t"
+ "movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
- "addl $8, %%eax \n\t"
+ "add $8, %%"REG_a" \n\t"
// End
"9: \n\t"
// "int $3\n\t"
- "leal 0b, %0 \n\t"
- "leal 1b, %1 \n\t"
- "leal 2b, %2 \n\t"
- "decl %1 \n\t"
- "decl %2 \n\t"
- "subl %0, %1 \n\t"
- "subl %0, %2 \n\t"
- "leal 9b, %3 \n\t"
- "subl %0, %3 \n\t"
+ "lea 0b, %0 \n\t"
+ "lea 1b, %1 \n\t"
+ "lea 2b, %2 \n\t"
+ "dec %1 \n\t"
+ "dec %2 \n\t"
+ "sub %0, %1 \n\t"
+ "sub %0, %2 \n\t"
+ "lea 9b, %3 \n\t"
+ "sub %0, %3 \n\t"
:"=r" (fragmentA), "=r" (imm8OfPShufW1A), "=r" (imm8OfPShufW2A),
@@ -1205,34 +1205,34 @@
"jmp 9f \n\t"
// Begin
"0: \n\t"
- "movq (%%edx, %%eax), %%mm3 \n\t"
- "movd (%%ecx, %%esi), %%mm0 \n\t"
+ "movq (%%"REG_d", %%"REG_a"), %%mm3\n\t"
+ "movd (%%"REG_c", %%"REG_S"), %%mm0\n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"pshufw $0xFF, %%mm0, %%mm1 \n\t"
"1: \n\t"
"pshufw $0xFF, %%mm0, %%mm0 \n\t"
"2: \n\t"
"psubw %%mm1, %%mm0 \n\t"
- "movl 8(%%ebx, %%eax), %%esi \n\t"
+ "mov 8(%%"REG_b", %%"REG_a"), %%"REG_S"\n\t"
"pmullw %%mm3, %%mm0 \n\t"
"psllw $7, %%mm1 \n\t"
"paddw %%mm1, %%mm0 \n\t"
- "movq %%mm0, (%%edi, %%eax) \n\t"
+ "movq %%mm0, (%%"REG_D", %%"REG_a")\n\t"
- "addl $8, %%eax \n\t"
+ "add $8, %%"REG_a" \n\t"
// End
"9: \n\t"
// "int $3\n\t"
- "leal 0b, %0 \n\t"
- "leal 1b, %1 \n\t"
- "leal 2b, %2 \n\t"
- "decl %1 \n\t"
- "decl %2 \n\t"
- "subl %0, %1 \n\t"
- "subl %0, %2 \n\t"
- "leal 9b, %3 \n\t"
- "subl %0, %3 \n\t"
+ "lea 0b, %0 \n\t"
+ "lea 1b, %1 \n\t"
+ "lea 2b, %2 \n\t"
+ "dec %1 \n\t"
+ "dec %2 \n\t"
+ "sub %0, %1 \n\t"
+ "sub %0, %2 \n\t"
+ "lea 9b, %3 \n\t"
+ "sub %0, %3 \n\t"
:"=r" (fragmentB), "=r" (imm8OfPShufW1B), "=r" (imm8OfPShufW2B),
@@ -1313,7 +1313,7 @@
}
filterPos[i/2]= xpos>>16; // needed to jump to the next part
}
-#endif // ARCH_X86
+#endif // ARCH_X86 || ARCH_X86_64
static void globalInit(){
// generating tables:
@@ -1327,7 +1327,7 @@
static SwsFunc getSwsFunc(int flags){
#ifdef RUNTIME_CPUDETECT
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
// ordered per speed fasterst first
if(flags & SWS_CPU_CAPS_MMX2)
return swScale_MMX2;
@@ -1755,7 +1755,7 @@
int unscaled, needsDither;
int srcFormat, dstFormat;
SwsFilter dummyFilter= {NULL, NULL, NULL, NULL};
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
if(flags & SWS_CPU_CAPS_MMX)
asm volatile("emms\n\t"::: "memory");
#endif
@@ -1995,7 +1995,7 @@
(flags&SWS_BICUBLIN) ? (flags|SWS_BILINEAR) : flags,
srcFilter->chrH, dstFilter->chrH, c->param);
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
// can't downscale !!!
if(c->canMMX2BeUsed && (flags & SWS_FAST_BILINEAR))
{
@@ -2136,7 +2136,7 @@
}
else
{
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
MSG_V("SwScaler: using X86-Asm scaler for horizontal scaling\n");
#else
if(flags & SWS_FAST_BILINEAR)
Index: swscale_template.c
===================================================================
RCS file: /cvsroot/mplayer/main/postproc/swscale_template.c,v
retrieving revision 1.107
retrieving revision 1.108
diff -u -r1.107 -r1.108
--- swscale_template.c 27 Jun 2004 00:07:15 -0000 1.107
+++ swscale_template.c 21 Oct 2004 11:55:20 -0000 1.108
@@ -16,6 +16,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#undef REAL_MOVNTQ
#undef MOVNTQ
#undef PAVGB
#undef PREFETCH
@@ -54,29 +55,30 @@
#endif
#ifdef HAVE_MMX2
-#define MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
+#define REAL_MOVNTQ(a,b) "movntq " #a ", " #b " \n\t"
#else
-#define MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
+#define REAL_MOVNTQ(a,b) "movq " #a ", " #b " \n\t"
#endif
+#define MOVNTQ(a,b) REAL_MOVNTQ(a,b)
#ifdef HAVE_ALTIVEC
#include "swscale_altivec_template.c"
#endif
#define YSCALEYUV2YV12X(x, offset) \
- "xorl %%eax, %%eax \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
"movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
"movq %%mm3, %%mm4 \n\t"\
- "leal " offset "(%0), %%edx \n\t"\
- "movl (%%edx), %%esi \n\t"\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
".balign 16 \n\t" /* FIXME Unroll? */\
"1: \n\t"\
- "movq 8(%%edx), %%mm0 \n\t" /* filterCoeff */\
- "movq " #x "(%%esi, %%eax, 2), %%mm2 \n\t" /* srcData */\
- "movq 8+" #x "(%%esi, %%eax, 2), %%mm5 \n\t" /* srcData */\
- "addl $16, %%edx \n\t"\
- "movl (%%edx), %%esi \n\t"\
- "testl %%esi, %%esi \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq " #x "(%%"REG_S", %%"REG_a", 2), %%mm2\n\t" /* srcData */\
+ "movq 8+" #x "(%%"REG_S", %%"REG_a", 2), %%mm5\n\t" /* srcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
"pmulhw %%mm0, %%mm2 \n\t"\
"pmulhw %%mm0, %%mm5 \n\t"\
"paddw %%mm2, %%mm3 \n\t"\
@@ -85,26 +87,26 @@
"psraw $3, %%mm3 \n\t"\
"psraw $3, %%mm4 \n\t"\
"packuswb %%mm4, %%mm3 \n\t"\
- MOVNTQ(%%mm3, (%1, %%eax))\
- "addl $8, %%eax \n\t"\
- "cmpl %2, %%eax \n\t"\
+ MOVNTQ(%%mm3, (%1, %%REGa))\
+ "add $8, %%"REG_a" \n\t"\
+ "cmp %2, %%"REG_a" \n\t"\
"movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
"movq %%mm3, %%mm4 \n\t"\
- "leal " offset "(%0), %%edx \n\t"\
- "movl (%%edx), %%esi \n\t"\
+ "lea " offset "(%0), %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
"jb 1b \n\t"
#define YSCALEYUV2YV121 \
- "movl %2, %%eax \n\t"\
+ "mov %2, %%"REG_a" \n\t"\
".balign 16 \n\t" /* FIXME Unroll? */\
"1: \n\t"\
- "movq (%0, %%eax, 2), %%mm0 \n\t"\
- "movq 8(%0, %%eax, 2), %%mm1 \n\t"\
+ "movq (%0, %%"REG_a", 2), %%mm0 \n\t"\
+ "movq 8(%0, %%"REG_a", 2), %%mm1\n\t"\
"psraw $7, %%mm0 \n\t"\
"psraw $7, %%mm1 \n\t"\
"packuswb %%mm1, %%mm0 \n\t"\
- MOVNTQ(%%mm0, (%1, %%eax))\
- "addl $8, %%eax \n\t"\
+ MOVNTQ(%%mm0, (%1, %%REGa))\
+ "add $8, %%"REG_a" \n\t"\
"jnc 1b \n\t"
/*
@@ -115,44 +117,44 @@
: "%eax", "%ebx", "%ecx", "%edx", "%esi"
*/
#define YSCALEYUV2PACKEDX \
- "xorl %%eax, %%eax \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
".balign 16 \n\t"\
"nop \n\t"\
"1: \n\t"\
- "leal "CHR_MMX_FILTER_OFFSET"(%0), %%edx \n\t"\
- "movl (%%edx), %%esi \n\t"\
+ "lea "CHR_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
"movq "VROUNDER_OFFSET"(%0), %%mm3\n\t"\
"movq %%mm3, %%mm4 \n\t"\
".balign 16 \n\t"\
"2: \n\t"\
- "movq 8(%%edx), %%mm0 \n\t" /* filterCoeff */\
- "movq (%%esi, %%eax), %%mm2 \n\t" /* UsrcData */\
- "movq 4096(%%esi, %%eax), %%mm5 \n\t" /* VsrcData */\
- "addl $16, %%edx \n\t"\
- "movl (%%edx), %%esi \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq (%%"REG_S", %%"REG_a"), %%mm2 \n\t" /* UsrcData */\
+ "movq 4096(%%"REG_S", %%"REG_a"), %%mm5 \n\t" /* VsrcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
"pmulhw %%mm0, %%mm2 \n\t"\
"pmulhw %%mm0, %%mm5 \n\t"\
"paddw %%mm2, %%mm3 \n\t"\
"paddw %%mm5, %%mm4 \n\t"\
- "testl %%esi, %%esi \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
" jnz 2b \n\t"\
\
- "leal "LUM_MMX_FILTER_OFFSET"(%0), %%edx \n\t"\
- "movl (%%edx), %%esi \n\t"\
+ "lea "LUM_MMX_FILTER_OFFSET"(%0), %%"REG_d"\n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
"movq "VROUNDER_OFFSET"(%0), %%mm1\n\t"\
"movq %%mm1, %%mm7 \n\t"\
".balign 16 \n\t"\
"2: \n\t"\
- "movq 8(%%edx), %%mm0 \n\t" /* filterCoeff */\
- "movq (%%esi, %%eax, 2), %%mm2 \n\t" /* Y1srcData */\
- "movq 8(%%esi, %%eax, 2), %%mm5 \n\t" /* Y2srcData */\
- "addl $16, %%edx \n\t"\
- "movl (%%edx), %%esi \n\t"\
+ "movq 8(%%"REG_d"), %%mm0 \n\t" /* filterCoeff */\
+ "movq (%%"REG_S", %%"REG_a", 2), %%mm2 \n\t" /* Y1srcData */\
+ "movq 8(%%"REG_S", %%"REG_a", 2), %%mm5 \n\t" /* Y2srcData */\
+ "add $16, %%"REG_d" \n\t"\
+ "mov (%%"REG_d"), %%"REG_S" \n\t"\
"pmulhw %%mm0, %%mm2 \n\t"\
"pmulhw %%mm0, %%mm5 \n\t"\
"paddw %%mm2, %%mm1 \n\t"\
"paddw %%mm5, %%mm7 \n\t"\
- "testl %%esi, %%esi \n\t"\
+ "test %%"REG_S", %%"REG_S" \n\t"\
" jnz 2b \n\t"\
@@ -202,22 +204,22 @@
"movd %7, %%mm5 \n\t" /*uvalpha1*/\
"punpcklwd %%mm5, %%mm5 \n\t"\
"punpcklwd %%mm5, %%mm5 \n\t"\
- "xorl %%eax, %%eax \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
".balign 16 \n\t"\
"1: \n\t"\
- "movq (%0, %%eax, 2), %%mm0 \n\t" /*buf0[eax]*/\
- "movq (%1, %%eax, 2), %%mm1 \n\t" /*buf1[eax]*/\
- "movq (%2, %%eax,2), %%mm2 \n\t" /* uvbuf0[eax]*/\
- "movq (%3, %%eax,2), %%mm3 \n\t" /* uvbuf1[eax]*/\
+ "movq (%0, %%"REG_a", 2), %%mm0 \n\t" /*buf0[eax]*/\
+ "movq (%1, %%"REG_a", 2), %%mm1 \n\t" /*buf1[eax]*/\
+ "movq (%2, %%"REG_a",2), %%mm2 \n\t" /* uvbuf0[eax]*/\
+ "movq (%3, %%"REG_a",2), %%mm3 \n\t" /* uvbuf1[eax]*/\
"psubw %%mm1, %%mm0 \n\t" /* buf0[eax] - buf1[eax]*/\
"psubw %%mm3, %%mm2 \n\t" /* uvbuf0[eax] - uvbuf1[eax]*/\
"pmulhw %%mm6, %%mm0 \n\t" /* (buf0[eax] - buf1[eax])yalpha1>>16*/\
"pmulhw %%mm5, %%mm2 \n\t" /* (uvbuf0[eax] - uvbuf1[eax])uvalpha1>>16*/\
"psraw $4, %%mm1 \n\t" /* buf0[eax] - buf1[eax] >>4*/\
- "movq 4096(%2, %%eax,2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
+ "movq 4096(%2, %%"REG_a",2), %%mm4 \n\t" /* uvbuf0[eax+2048]*/\
"psraw $4, %%mm3 \n\t" /* uvbuf0[eax] - uvbuf1[eax] >>4*/\
"paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
- "movq 4096(%3, %%eax,2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
+ "movq 4096(%3, %%"REG_a",2), %%mm0 \n\t" /* uvbuf1[eax+2048]*/\
"paddw %%mm2, %%mm3 \n\t" /* uvbuf0[eax]uvalpha1 - uvbuf1[eax](1-uvalpha1)*/\
"psubw %%mm0, %%mm4 \n\t" /* uvbuf0[eax+2048] - uvbuf1[eax+2048]*/\
"psubw "MANGLE(w80)", %%mm1 \n\t" /* 8(Y-16)*/\
@@ -248,14 +250,14 @@
"packuswb %%mm1, %%mm1 \n\t"
#endif
-#define YSCALEYUV2PACKED(index, c) \
+#define REAL_YSCALEYUV2PACKED(index, c) \
"movq "CHR_MMX_FILTER_OFFSET"+8("#c"), %%mm0\n\t"\
"movq "LUM_MMX_FILTER_OFFSET"+8("#c"), %%mm1\n\t"\
"psraw $3, %%mm0 \n\t"\
"psraw $3, %%mm1 \n\t"\
"movq %%mm0, "CHR_MMX_FILTER_OFFSET"+8("#c")\n\t"\
"movq %%mm1, "LUM_MMX_FILTER_OFFSET"+8("#c")\n\t"\
- "xorl "#index", "#index" \n\t"\
+ "xor "#index", "#index" \n\t"\
".balign 16 \n\t"\
"1: \n\t"\
"movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
@@ -284,8 +286,10 @@
"paddw %%mm0, %%mm1 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
"paddw %%mm6, %%mm7 \n\t" /* buf0[eax]yalpha1 + buf1[eax](1-yalpha1) >>16*/\
-#define YSCALEYUV2RGB(index, c) \
- "xorl "#index", "#index" \n\t"\
+#define YSCALEYUV2PACKED(index, c) REAL_YSCALEYUV2PACKED(index, c)
+
+#define REAL_YSCALEYUV2RGB(index, c) \
+ "xor "#index", "#index" \n\t"\
".balign 16 \n\t"\
"1: \n\t"\
"movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
@@ -348,9 +352,10 @@
"packuswb %%mm6, %%mm5 \n\t"\
"packuswb %%mm3, %%mm4 \n\t"\
"pxor %%mm7, %%mm7 \n\t"
+#define YSCALEYUV2RGB(index, c) REAL_YSCALEYUV2RGB(index, c)
-#define YSCALEYUV2PACKED1(index, c) \
- "xorl "#index", "#index" \n\t"\
+#define REAL_YSCALEYUV2PACKED1(index, c) \
+ "xor "#index", "#index" \n\t"\
".balign 16 \n\t"\
"1: \n\t"\
"movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
@@ -362,8 +367,10 @@
"psraw $7, %%mm1 \n\t" \
"psraw $7, %%mm7 \n\t" \
-#define YSCALEYUV2RGB1(index, c) \
- "xorl "#index", "#index" \n\t"\
+#define YSCALEYUV2PACKED1(index, c) REAL_YSCALEYUV2PACKED1(index, c)
+
+#define REAL_YSCALEYUV2RGB1(index, c) \
+ "xor "#index", "#index" \n\t"\
".balign 16 \n\t"\
"1: \n\t"\
"movq (%2, "#index"), %%mm3 \n\t" /* uvbuf0[eax]*/\
@@ -409,9 +416,10 @@
"packuswb %%mm6, %%mm5 \n\t"\
"packuswb %%mm3, %%mm4 \n\t"\
"pxor %%mm7, %%mm7 \n\t"
+#define YSCALEYUV2RGB1(index, c) REAL_YSCALEYUV2RGB1(index, c)
-#define YSCALEYUV2PACKED1b(index, c) \
- "xorl "#index", "#index" \n\t"\
+#define REAL_YSCALEYUV2PACKED1b(index, c) \
+ "xor "#index", "#index" \n\t"\
".balign 16 \n\t"\
"1: \n\t"\
"movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
@@ -426,10 +434,11 @@
"movq 8(%0, "#index", 2), %%mm7 \n\t" /*buf0[eax]*/\
"psraw $7, %%mm1 \n\t" \
"psraw $7, %%mm7 \n\t"
+#define YSCALEYUV2PACKED1b(index, c) REAL_YSCALEYUV2PACKED1b(index, c)
// do vertical chrominance interpolation
-#define YSCALEYUV2RGB1b(index, c) \
- "xorl "#index", "#index" \n\t"\
+#define REAL_YSCALEYUV2RGB1b(index, c) \
+ "xor "#index", "#index" \n\t"\
".balign 16 \n\t"\
"1: \n\t"\
"movq (%2, "#index"), %%mm2 \n\t" /* uvbuf0[eax]*/\
@@ -479,8 +488,9 @@
"packuswb %%mm6, %%mm5 \n\t"\
"packuswb %%mm3, %%mm4 \n\t"\
"pxor %%mm7, %%mm7 \n\t"
+#define YSCALEYUV2RGB1b(index, c) REAL_YSCALEYUV2RGB1b(index, c)
-#define WRITEBGR32(dst, dstw, index) \
+#define REAL_WRITEBGR32(dst, dstw, index) \
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
"movq %%mm2, %%mm1 \n\t" /* B */\
"movq %%mm5, %%mm6 \n\t" /* R */\
@@ -500,11 +510,12 @@
MOVNTQ(%%mm1, 16(dst, index, 4))\
MOVNTQ(%%mm3, 24(dst, index, 4))\
\
- "addl $8, "#index" \n\t"\
- "cmpl "#dstw", "#index" \n\t"\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
" jb 1b \n\t"
+#define WRITEBGR32(dst, dstw, index) REAL_WRITEBGR32(dst, dstw, index)
-#define WRITEBGR16(dst, dstw, index) \
+#define REAL_WRITEBGR16(dst, dstw, index) \
"pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
"pand "MANGLE(bFC)", %%mm4 \n\t" /* G */\
"pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
@@ -527,11 +538,12 @@
MOVNTQ(%%mm2, (dst, index, 2))\
MOVNTQ(%%mm1, 8(dst, index, 2))\
\
- "addl $8, "#index" \n\t"\
- "cmpl "#dstw", "#index" \n\t"\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
" jb 1b \n\t"
+#define WRITEBGR16(dst, dstw, index) REAL_WRITEBGR16(dst, dstw, index)
-#define WRITEBGR15(dst, dstw, index) \
+#define REAL_WRITEBGR15(dst, dstw, index) \
"pand "MANGLE(bF8)", %%mm2 \n\t" /* B */\
"pand "MANGLE(bF8)", %%mm4 \n\t" /* G */\
"pand "MANGLE(bF8)", %%mm5 \n\t" /* R */\
@@ -555,9 +567,10 @@
MOVNTQ(%%mm2, (dst, index, 2))\
MOVNTQ(%%mm1, 8(dst, index, 2))\
\
- "addl $8, "#index" \n\t"\
- "cmpl "#dstw", "#index" \n\t"\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
" jb 1b \n\t"
+#define WRITEBGR15(dst, dstw, index) REAL_WRITEBGR15(dst, dstw, index)
#define WRITEBGR24OLD(dst, dstw, index) \
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */\
@@ -609,10 +622,10 @@
MOVNTQ(%%mm0, (dst))\
MOVNTQ(%%mm2, 8(dst))\
MOVNTQ(%%mm3, 16(dst))\
- "addl $24, "#dst" \n\t"\
+ "add $24, "#dst" \n\t"\
\
- "addl $8, "#index" \n\t"\
- "cmpl "#dstw", "#index" \n\t"\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
" jb 1b \n\t"
#define WRITEBGR24MMX(dst, dstw, index) \
@@ -662,10 +675,10 @@
"por %%mm3, %%mm5 \n\t" /* RGBRGBRG 2 */\
MOVNTQ(%%mm5, 16(dst))\
\
- "addl $24, "#dst" \n\t"\
+ "add $24, "#dst" \n\t"\
\
- "addl $8, "#index" \n\t"\
- "cmpl "#dstw", "#index" \n\t"\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
" jb 1b \n\t"
#define WRITEBGR24MMX2(dst, dstw, index) \
@@ -710,21 +723,21 @@
"por %%mm3, %%mm6 \n\t"\
MOVNTQ(%%mm6, 16(dst))\
\
- "addl $24, "#dst" \n\t"\
+ "add $24, "#dst" \n\t"\
\
- "addl $8, "#index" \n\t"\
- "cmpl "#dstw", "#index" \n\t"\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
" jb 1b \n\t"
#ifdef HAVE_MMX2
#undef WRITEBGR24
-#define WRITEBGR24 WRITEBGR24MMX2
+#define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX2(dst, dstw, index)
#else
#undef WRITEBGR24
-#define WRITEBGR24 WRITEBGR24MMX
+#define WRITEBGR24(dst, dstw, index) WRITEBGR24MMX(dst, dstw, index)
#endif
-#define WRITEYUY2(dst, dstw, index) \
+#define REAL_WRITEYUY2(dst, dstw, index) \
"packuswb %%mm3, %%mm3 \n\t"\
"packuswb %%mm4, %%mm4 \n\t"\
"packuswb %%mm7, %%mm1 \n\t"\
@@ -736,9 +749,10 @@
MOVNTQ(%%mm1, (dst, index, 2))\
MOVNTQ(%%mm7, 8(dst, index, 2))\
\
- "addl $8, "#index" \n\t"\
- "cmpl "#dstw", "#index" \n\t"\
+ "add $8, "#index" \n\t"\
+ "cmp "#dstw", "#index" \n\t"\
" jb 1b \n\t"
+#define WRITEYUY2(dst, dstw, index) REAL_WRITEYUY2(dst, dstw, index)
static inline void RENAME(yuv2yuvX)(SwsContext *c, int16_t *lumFilter, int16_t **lumSrc, int lumFilterSize,
@@ -751,23 +765,23 @@
asm volatile(
YSCALEYUV2YV12X(0, CHR_MMX_FILTER_OFFSET)
:: "r" (&c->redDither),
- "r" (uDest), "m" (chrDstW)
- : "%eax", "%edx", "%esi"
+ "r" (uDest), "m" ((long)chrDstW)
+ : "%"REG_a, "%"REG_d, "%"REG_S
);
asm volatile(
YSCALEYUV2YV12X(4096, CHR_MMX_FILTER_OFFSET)
:: "r" (&c->redDither),
- "r" (vDest), "m" (chrDstW)
- : "%eax", "%edx", "%esi"
+ "r" (vDest), "m" ((long)chrDstW)
+ : "%"REG_a, "%"REG_d, "%"REG_S
);
}
asm volatile(
YSCALEYUV2YV12X(0, LUM_MMX_FILTER_OFFSET)
:: "r" (&c->redDither),
- "r" (dest), "m" (dstW)
- : "%eax", "%edx", "%esi"
+ "r" (dest), "m" ((long)dstW)
+ : "%"REG_a, "%"REG_d, "%"REG_S
);
#else
#ifdef HAVE_ALTIVEC
@@ -791,23 +805,23 @@
asm volatile(
YSCALEYUV2YV121
:: "r" (chrSrc + chrDstW), "r" (uDest + chrDstW),
- "g" (-chrDstW)
- : "%eax"
+ "g" ((long)-chrDstW)
+ : "%"REG_a
);
asm volatile(
YSCALEYUV2YV121
:: "r" (chrSrc + 2048 + chrDstW), "r" (vDest + chrDstW),
- "g" (-chrDstW)
- : "%eax"
+ "g" ((long)-chrDstW)
+ : "%"REG_a
);
}
asm volatile(
YSCALEYUV2YV121
:: "r" (lumSrc + dstW), "r" (dest + dstW),
- "g" (-dstW)
- : "%eax"
+ "g" ((long)-dstW)
+ : "%"REG_a
);
#else
int i;
@@ -858,12 +872,12 @@
{
asm volatile(
YSCALEYUV2RGBX
- WRITEBGR32(%4, %5, %%eax)
+ WRITEBGR32(%4, %5, %%REGa)
:: "r" (&c->redDither),
"m" (dummy), "m" (dummy), "m" (dummy),
"r" (dest), "m" (dstW)
- : "%eax", "%edx", "%esi"
+ : "%"REG_a, "%"REG_d, "%"REG_S
);
}
break;
@@ -871,14 +885,14 @@
{
asm volatile(
YSCALEYUV2RGBX
- "leal (%%eax, %%eax, 2), %%ebx \n\t" //FIXME optimize
- "addl %4, %%ebx \n\t"
- WRITEBGR24(%%ebx, %5, %%eax)
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t" //FIXME optimize
+ "add %4, %%"REG_b" \n\t"
+ WRITEBGR24(%%REGb, %5, %%REGa)
:: "r" (&c->redDither),
"m" (dummy), "m" (dummy), "m" (dummy),
"r" (dest), "m" (dstW)
- : "%eax", "%ebx", "%edx", "%esi" //FIXME ebx
+ : "%"REG_a, "%"REG_b, "%"REG_d, "%"REG_S //FIXME ebx
);
}
break;
@@ -893,12 +907,12 @@
"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
#endif
- WRITEBGR15(%4, %5, %%eax)
+ WRITEBGR15(%4, %5, %%REGa)
:: "r" (&c->redDither),
"m" (dummy), "m" (dummy), "m" (dummy),
"r" (dest), "m" (dstW)
- : "%eax", "%edx", "%esi"
+ : "%"REG_a, "%"REG_d, "%"REG_S
);
}
break;
@@ -913,12 +927,12 @@
"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
#endif
- WRITEBGR16(%4, %5, %%eax)
+ WRITEBGR16(%4, %5, %%REGa)
:: "r" (&c->redDither),
"m" (dummy), "m" (dummy), "m" (dummy),
"r" (dest), "m" (dstW)
- : "%eax", "%edx", "%esi"
+ : "%"REG_a, "%"REG_d, "%"REG_S
);
}
break;
@@ -932,12 +946,12 @@
"psraw $3, %%mm4 \n\t"
"psraw $3, %%mm1 \n\t"
"psraw $3, %%mm7 \n\t"
- WRITEYUY2(%4, %5, %%eax)
+ WRITEYUY2(%4, %5, %%REGa)
:: "r" (&c->redDither),
"m" (dummy), "m" (dummy), "m" (dummy),
"r" (dest), "m" (dstW)
- : "%eax", "%edx", "%esi"
+ : "%"REG_a, "%"REG_d, "%"REG_S
);
}
break;
@@ -984,17 +998,17 @@
"punpcklwd %%mm0, %%mm3 \n\t" // BGR0BGR0
"punpckhwd %%mm0, %%mm1 \n\t" // BGR0BGR0
- MOVNTQ(%%mm3, (%4, %%eax, 4))
- MOVNTQ(%%mm1, 8(%4, %%eax, 4))
+ MOVNTQ(%%mm3, (%4, %%REGa, 4))
+ MOVNTQ(%%mm1, 8(%4, %%REGa, 4))
- "addl $4, %%eax \n\t"
- "cmpl %5, %%eax \n\t"
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
" jb 1b \n\t"
- :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
+ :: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" ((long)dstW),
"m" (yalpha1), "m" (uvalpha1)
- : "%eax"
+ : "%"REG_a
);
break;
case IMGFMT_BGR24:
@@ -1024,26 +1038,26 @@
"psrlq $24, %%mm1 \n\t" // 0BGR0000
"por %%mm2, %%mm1 \n\t" // RBGRR000
- "movl %4, %%ebx \n\t"
- "addl %%eax, %%ebx \n\t"
+ "mov %4, %%"REG_b" \n\t"
+ "add %%"REG_a", %%"REG_b" \n\t"
#ifdef HAVE_MMX2
//FIXME Alignment
- "movntq %%mm3, (%%ebx, %%eax, 2)\n\t"
- "movntq %%mm1, 8(%%ebx, %%eax, 2)\n\t"
+ "movntq %%mm3, (%%"REG_b", %%"REG_a", 2)\n\t"
+ "movntq %%mm1, 8(%%"REG_b", %%"REG_a", 2)\n\t"
#else
- "movd %%mm3, (%%ebx, %%eax, 2) \n\t"
+ "movd %%mm3, (%%"REG_b", %%"REG_a", 2) \n\t"
"psrlq $32, %%mm3 \n\t"
- "movd %%mm3, 4(%%ebx, %%eax, 2) \n\t"
- "movd %%mm1, 8(%%ebx, %%eax, 2) \n\t"
+ "movd %%mm3, 4(%%"REG_b", %%"REG_a", 2) \n\t"
+ "movd %%mm1, 8(%%"REG_b", %%"REG_a", 2) \n\t"
#endif
- "addl $4, %%eax \n\t"
- "cmpl %5, %%eax \n\t"
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
" jb 1b \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest), "m" (dstW),
"m" (yalpha1), "m" (uvalpha1)
- : "%eax", "%ebx"
+ : "%"REG_a, "%"REG_b
);
break;
case IMGFMT_BGR15:
@@ -1068,15 +1082,15 @@
"por %%mm3, %%mm1 \n\t"
"por %%mm1, %%mm0 \n\t"
- MOVNTQ(%%mm0, (%4, %%eax, 2))
+ MOVNTQ(%%mm0, (%4, %%REGa, 2))
- "addl $4, %%eax \n\t"
- "cmpl %5, %%eax \n\t"
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
" jb 1b \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
"m" (yalpha1), "m" (uvalpha1)
- : "%eax"
+ : "%"REG_a
);
break;
case IMGFMT_BGR16:
@@ -1101,15 +1115,15 @@
"por %%mm3, %%mm1 \n\t"
"por %%mm1, %%mm0 \n\t"
- MOVNTQ(%%mm0, (%4, %%eax, 2))
+ MOVNTQ(%%mm0, (%4, %%REGa, 2))
- "addl $4, %%eax \n\t"
- "cmpl %5, %%eax \n\t"
+ "add $4, %%"REG_a" \n\t"
+ "cmp %5, %%"REG_a" \n\t"
" jb 1b \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "r" (dest), "m" (dstW),
"m" (yalpha1), "m" (uvalpha1)
- : "%eax"
+ : "%"REG_a
);
break;
#endif
@@ -1188,34 +1202,34 @@
//Note 8280 == DSTW_OFFSET but the preprocessor can't handle that there :(
case IMGFMT_BGR32:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2RGB(%%eax, %5)
- WRITEBGR32(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2RGB(%%REGa, %5)
+ WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
case IMGFMT_BGR24:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2RGB(%%eax, %5)
- WRITEBGR24(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2RGB(%%REGa, %5)
+ WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
case IMGFMT_BGR15:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2RGB(%%eax, %5)
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2RGB(%%REGa, %5)
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
"paddusb "MANGLE(b5Dither)", %%mm2\n\t"
@@ -1223,19 +1237,19 @@
"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
#endif
- WRITEBGR15(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
case IMGFMT_BGR16:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2RGB(%%eax, %5)
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2RGB(%%REGa, %5)
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
"paddusb "MANGLE(b5Dither)", %%mm2\n\t"
@@ -1243,23 +1257,23 @@
"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
#endif
- WRITEBGR16(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
case IMGFMT_YUY2:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2PACKED(%%eax, %5)
- WRITEYUY2(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2PACKED(%%REGa, %5)
+ WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
default: break;
@@ -1293,54 +1307,54 @@
{
case IMGFMT_BGR32:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2RGB1(%%eax, %5)
- WRITEBGR32(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2RGB1(%%REGa, %5)
+ WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
case IMGFMT_BGR24:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2RGB1(%%eax, %5)
- WRITEBGR24(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2RGB1(%%REGa, %5)
+ WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
case IMGFMT_BGR15:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2RGB1(%%eax, %5)
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2RGB1(%%REGa, %5)
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
"paddusb "MANGLE(b5Dither)", %%mm2\n\t"
"paddusb "MANGLE(g5Dither)", %%mm4\n\t"
"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
#endif
- WRITEBGR15(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
case IMGFMT_BGR16:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2RGB1(%%eax, %5)
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2RGB1(%%REGa, %5)
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
"paddusb "MANGLE(b5Dither)", %%mm2\n\t"
@@ -1348,25 +1362,25 @@
"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
#endif
- WRITEBGR16(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
case IMGFMT_YUY2:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2PACKED1(%%eax, %5)
- WRITEYUY2(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2PACKED1(%%REGa, %5)
+ WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
}
@@ -1377,54 +1391,54 @@
{
case IMGFMT_BGR32:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2RGB1b(%%eax, %5)
- WRITEBGR32(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2RGB1b(%%REGa, %5)
+ WRITEBGR32(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
case IMGFMT_BGR24:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2RGB1b(%%eax, %5)
- WRITEBGR24(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2RGB1b(%%REGa, %5)
+ WRITEBGR24(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
case IMGFMT_BGR15:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2RGB1b(%%eax, %5)
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2RGB1b(%%REGa, %5)
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
"paddusb "MANGLE(b5Dither)", %%mm2\n\t"
"paddusb "MANGLE(g5Dither)", %%mm4\n\t"
"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
#endif
- WRITEBGR15(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ WRITEBGR15(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
case IMGFMT_BGR16:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2RGB1b(%%eax, %5)
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2RGB1b(%%REGa, %5)
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
"paddusb "MANGLE(b5Dither)", %%mm2\n\t"
@@ -1432,25 +1446,25 @@
"paddusb "MANGLE(r5Dither)", %%mm5\n\t"
#endif
- WRITEBGR16(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ WRITEBGR16(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
case IMGFMT_YUY2:
asm volatile(
- "movl %%esp, "ESP_OFFSET"(%5) \n\t"
- "movl %4, %%esp \n\t"
- YSCALEYUV2PACKED1b(%%eax, %5)
- WRITEYUY2(%%esp, 8280(%5), %%eax)
- "movl "ESP_OFFSET"(%5), %%esp \n\t"
+ "mov %%"REG_SP", "ESP_OFFSET"(%5) \n\t"
+ "mov %4, %%"REG_SP" \n\t"
+ YSCALEYUV2PACKED1b(%%REGa, %5)
+ WRITEYUY2(%%REGSP, 8280(%5), %%REGa)
+ "mov "ESP_OFFSET"(%5), %%"REG_SP" \n\t"
:: "r" (buf0), "r" (buf1), "r" (uvbuf0), "r" (uvbuf1), "m" (dest),
"r" (&c->redDither)
- : "%eax"
+ : "%"REG_a
);
return;
}
@@ -1471,18 +1485,18 @@
#ifdef HAVE_MMX
asm volatile(
"movq "MANGLE(bm01010101)", %%mm2\n\t"
- "movl %0, %%eax \n\t"
+ "mov %0, %%"REG_a" \n\t"
"1: \n\t"
- "movq (%1, %%eax,2), %%mm0 \n\t"
- "movq 8(%1, %%eax,2), %%mm1 \n\t"
+ "movq (%1, %%"REG_a",2), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
"pand %%mm2, %%mm0 \n\t"
"pand %%mm2, %%mm1 \n\t"
"packuswb %%mm1, %%mm0 \n\t"
- "movq %%mm0, (%2, %%eax) \n\t"
- "addl $8, %%eax \n\t"
+ "movq %%mm0, (%2, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
" js 1b \n\t"
- : : "g" (-width), "r" (src+width*2), "r" (dst+width)
- : "%eax"
+ : : "g" ((long)-width), "r" (src+width*2), "r" (dst+width)
+ : "%"REG_a
);
#else
int i;
@@ -1496,12 +1510,12 @@
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
asm volatile(
"movq "MANGLE(bm01010101)", %%mm4\n\t"
- "movl %0, %%eax \n\t"
+ "mov %0, %%"REG_a" \n\t"
"1: \n\t"
- "movq (%1, %%eax,4), %%mm0 \n\t"
- "movq 8(%1, %%eax,4), %%mm1 \n\t"
- "movq (%2, %%eax,4), %%mm2 \n\t"
- "movq 8(%2, %%eax,4), %%mm3 \n\t"
+ "movq (%1, %%"REG_a",4), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
+ "movq (%2, %%"REG_a",4), %%mm2 \n\t"
+ "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
PAVGB(%%mm2, %%mm0)
PAVGB(%%mm3, %%mm1)
"psrlw $8, %%mm0 \n\t"
@@ -1512,12 +1526,12 @@
"pand %%mm4, %%mm1 \n\t"
"packuswb %%mm0, %%mm0 \n\t"
"packuswb %%mm1, %%mm1 \n\t"
- "movd %%mm0, (%4, %%eax) \n\t"
- "movd %%mm1, (%3, %%eax) \n\t"
- "addl $4, %%eax \n\t"
+ "movd %%mm0, (%4, %%"REG_a") \n\t"
+ "movd %%mm1, (%3, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
" js 1b \n\t"
- : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
- : "%eax"
+ : : "g" ((long)-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
+ : "%"REG_a
);
#else
int i;
@@ -1534,18 +1548,18 @@
{
#ifdef HAVE_MMX
asm volatile(
- "movl %0, %%eax \n\t"
+ "mov %0, %%"REG_a" \n\t"
"1: \n\t"
- "movq (%1, %%eax,2), %%mm0 \n\t"
- "movq 8(%1, %%eax,2), %%mm1 \n\t"
+ "movq (%1, %%"REG_a",2), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",2), %%mm1 \n\t"
"psrlw $8, %%mm0 \n\t"
"psrlw $8, %%mm1 \n\t"
"packuswb %%mm1, %%mm0 \n\t"
- "movq %%mm0, (%2, %%eax) \n\t"
- "addl $8, %%eax \n\t"
+ "movq %%mm0, (%2, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
" js 1b \n\t"
- : : "g" (-width), "r" (src+width*2), "r" (dst+width)
- : "%eax"
+ : : "g" ((long)-width), "r" (src+width*2), "r" (dst+width)
+ : "%"REG_a
);
#else
int i;
@@ -1559,12 +1573,12 @@
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
asm volatile(
"movq "MANGLE(bm01010101)", %%mm4\n\t"
- "movl %0, %%eax \n\t"
+ "mov %0, %%"REG_a" \n\t"
"1: \n\t"
- "movq (%1, %%eax,4), %%mm0 \n\t"
- "movq 8(%1, %%eax,4), %%mm1 \n\t"
- "movq (%2, %%eax,4), %%mm2 \n\t"
- "movq 8(%2, %%eax,4), %%mm3 \n\t"
+ "movq (%1, %%"REG_a",4), %%mm0 \n\t"
+ "movq 8(%1, %%"REG_a",4), %%mm1 \n\t"
+ "movq (%2, %%"REG_a",4), %%mm2 \n\t"
+ "movq 8(%2, %%"REG_a",4), %%mm3 \n\t"
PAVGB(%%mm2, %%mm0)
PAVGB(%%mm3, %%mm1)
"pand %%mm4, %%mm0 \n\t"
@@ -1575,12 +1589,12 @@
"pand %%mm4, %%mm1 \n\t"
"packuswb %%mm0, %%mm0 \n\t"
"packuswb %%mm1, %%mm1 \n\t"
- "movd %%mm0, (%4, %%eax) \n\t"
- "movd %%mm1, (%3, %%eax) \n\t"
- "addl $4, %%eax \n\t"
+ "movd %%mm0, (%4, %%"REG_a") \n\t"
+ "movd %%mm1, (%3, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
" js 1b \n\t"
- : : "g" (-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
- : "%eax"
+ : : "g" ((long)-width), "r" (src1+width*4), "r" (src2+width*4), "r" (dstU+width), "r" (dstV+width)
+ : "%"REG_a
);
#else
int i;
@@ -1635,20 +1649,20 @@
{
#ifdef HAVE_MMX
asm volatile(
- "movl %2, %%eax \n\t"
+ "mov %2, %%"REG_a" \n\t"
"movq "MANGLE(bgr2YCoeff)", %%mm6 \n\t"
"movq "MANGLE(w1111)", %%mm5 \n\t"
"pxor %%mm7, %%mm7 \n\t"
- "leal (%%eax, %%eax, 2), %%ebx \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b"\n\t"
".balign 16 \n\t"
"1: \n\t"
- PREFETCH" 64(%0, %%ebx) \n\t"
- "movd (%0, %%ebx), %%mm0 \n\t"
- "movd 3(%0, %%ebx), %%mm1 \n\t"
+ PREFETCH" 64(%0, %%"REG_b") \n\t"
+ "movd (%0, %%"REG_b"), %%mm0 \n\t"
+ "movd 3(%0, %%"REG_b"), %%mm1 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
- "movd 6(%0, %%ebx), %%mm2 \n\t"
- "movd 9(%0, %%ebx), %%mm3 \n\t"
+ "movd 6(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 9(%0, %%"REG_b"), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
"punpcklbw %%mm7, %%mm3 \n\t"
"pmaddwd %%mm6, %%mm0 \n\t"
@@ -1668,12 +1682,12 @@
"packssdw %%mm2, %%mm0 \n\t"
"psraw $7, %%mm0 \n\t"
- "movd 12(%0, %%ebx), %%mm4 \n\t"
- "movd 15(%0, %%ebx), %%mm1 \n\t"
+ "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
+ "movd 15(%0, %%"REG_b"), %%mm1 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
- "movd 18(%0, %%ebx), %%mm2 \n\t"
- "movd 21(%0, %%ebx), %%mm3 \n\t"
+ "movd 18(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 21(%0, %%"REG_b"), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
"punpcklbw %%mm7, %%mm3 \n\t"
"pmaddwd %%mm6, %%mm4 \n\t"
@@ -1690,18 +1704,18 @@
"packssdw %%mm3, %%mm2 \n\t"
"pmaddwd %%mm5, %%mm4 \n\t"
"pmaddwd %%mm5, %%mm2 \n\t"
- "addl $24, %%ebx \n\t"
+ "add $24, %%"REG_b" \n\t"
"packssdw %%mm2, %%mm4 \n\t"
"psraw $7, %%mm4 \n\t"
"packuswb %%mm4, %%mm0 \n\t"
"paddusb "MANGLE(bgr2YOffset)", %%mm0 \n\t"
- "movq %%mm0, (%1, %%eax) \n\t"
- "addl $8, %%eax \n\t"
+ "movq %%mm0, (%1, %%"REG_a") \n\t"
+ "add $8, %%"REG_a" \n\t"
" js 1b \n\t"
- : : "r" (src+width*3), "r" (dst+width), "g" (-width)
- : "%eax", "%ebx"
+ : : "r" (src+width*3), "r" (dst+width), "g" ((long)-width)
+ : "%"REG_a, "%"REG_b
);
#else
int i;
@@ -1720,21 +1734,21 @@
{
#ifdef HAVE_MMX
asm volatile(
- "movl %4, %%eax \n\t"
+ "mov %4, %%"REG_a" \n\t"
"movq "MANGLE(w1111)", %%mm5 \n\t"
"movq "MANGLE(bgr2UCoeff)", %%mm6 \n\t"
"pxor %%mm7, %%mm7 \n\t"
- "leal (%%eax, %%eax, 2), %%ebx \n\t"
- "addl %%ebx, %%ebx \n\t"
+ "lea (%%"REG_a", %%"REG_a", 2), %%"REG_b" \n\t"
+ "add %%"REG_b", %%"REG_b" \n\t"
".balign 16 \n\t"
"1: \n\t"
- PREFETCH" 64(%0, %%ebx) \n\t"
- PREFETCH" 64(%1, %%ebx) \n\t"
+ PREFETCH" 64(%0, %%"REG_b") \n\t"
+ PREFETCH" 64(%1, %%"REG_b") \n\t"
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
- "movq (%0, %%ebx), %%mm0 \n\t"
- "movq (%1, %%ebx), %%mm1 \n\t"
- "movq 6(%0, %%ebx), %%mm2 \n\t"
- "movq 6(%1, %%ebx), %%mm3 \n\t"
+ "movq (%0, %%"REG_b"), %%mm0 \n\t"
+ "movq (%1, %%"REG_b"), %%mm1 \n\t"
+ "movq 6(%0, %%"REG_b"), %%mm2 \n\t"
+ "movq 6(%1, %%"REG_b"), %%mm3 \n\t"
PAVGB(%%mm1, %%mm0)
PAVGB(%%mm3, %%mm2)
"movq %%mm0, %%mm1 \n\t"
@@ -1746,10 +1760,10 @@
"punpcklbw %%mm7, %%mm0 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
#else
- "movd (%0, %%ebx), %%mm0 \n\t"
- "movd (%1, %%ebx), %%mm1 \n\t"
- "movd 3(%0, %%ebx), %%mm2 \n\t"
- "movd 3(%1, %%ebx), %%mm3 \n\t"
+ "movd (%0, %%"REG_b"), %%mm0 \n\t"
+ "movd (%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 3(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 3(%1, %%"REG_b"), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
@@ -1757,10 +1771,10 @@
"paddw %%mm1, %%mm0 \n\t"
"paddw %%mm3, %%mm2 \n\t"
"paddw %%mm2, %%mm0 \n\t"
- "movd 6(%0, %%ebx), %%mm4 \n\t"
- "movd 6(%1, %%ebx), %%mm1 \n\t"
- "movd 9(%0, %%ebx), %%mm2 \n\t"
- "movd 9(%1, %%ebx), %%mm3 \n\t"
+ "movd 6(%0, %%"REG_b"), %%mm4 \n\t"
+ "movd 6(%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 9(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 9(%1, %%"REG_b"), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
@@ -1792,10 +1806,10 @@
"psraw $7, %%mm0 \n\t"
#if defined (HAVE_MMX2) || defined (HAVE_3DNOW)
- "movq 12(%0, %%ebx), %%mm4 \n\t"
- "movq 12(%1, %%ebx), %%mm1 \n\t"
- "movq 18(%0, %%ebx), %%mm2 \n\t"
- "movq 18(%1, %%ebx), %%mm3 \n\t"
+ "movq 12(%0, %%"REG_b"), %%mm4 \n\t"
+ "movq 12(%1, %%"REG_b"), %%mm1 \n\t"
+ "movq 18(%0, %%"REG_b"), %%mm2 \n\t"
+ "movq 18(%1, %%"REG_b"), %%mm3 \n\t"
PAVGB(%%mm1, %%mm4)
PAVGB(%%mm3, %%mm2)
"movq %%mm4, %%mm1 \n\t"
@@ -1807,10 +1821,10 @@
"punpcklbw %%mm7, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
#else
- "movd 12(%0, %%ebx), %%mm4 \n\t"
- "movd 12(%1, %%ebx), %%mm1 \n\t"
- "movd 15(%0, %%ebx), %%mm2 \n\t"
- "movd 15(%1, %%ebx), %%mm3 \n\t"
+ "movd 12(%0, %%"REG_b"), %%mm4 \n\t"
+ "movd 12(%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 15(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 15(%1, %%"REG_b"), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
@@ -1818,10 +1832,10 @@
"paddw %%mm1, %%mm4 \n\t"
"paddw %%mm3, %%mm2 \n\t"
"paddw %%mm2, %%mm4 \n\t"
- "movd 18(%0, %%ebx), %%mm5 \n\t"
- "movd 18(%1, %%ebx), %%mm1 \n\t"
- "movd 21(%0, %%ebx), %%mm2 \n\t"
- "movd 21(%1, %%ebx), %%mm3 \n\t"
+ "movd 18(%0, %%"REG_b"), %%mm5 \n\t"
+ "movd 18(%1, %%"REG_b"), %%mm1 \n\t"
+ "movd 21(%0, %%"REG_b"), %%mm2 \n\t"
+ "movd 21(%1, %%"REG_b"), %%mm3 \n\t"
"punpcklbw %%mm7, %%mm5 \n\t"
"punpcklbw %%mm7, %%mm1 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
@@ -1850,7 +1864,7 @@
"packssdw %%mm3, %%mm1 \n\t"
"pmaddwd %%mm5, %%mm4 \n\t"
"pmaddwd %%mm5, %%mm1 \n\t"
- "addl $24, %%ebx \n\t"
+ "add $24, %%"REG_b" \n\t"
"packssdw %%mm1, %%mm4 \n\t" // V3 V2 U3 U2
"psraw $7, %%mm4 \n\t"
@@ -1860,13 +1874,13 @@
"packsswb %%mm1, %%mm0 \n\t"
"paddb "MANGLE(bgr2UVOffset)", %%mm0 \n\t"
- "movd %%mm0, (%2, %%eax) \n\t"
+ "movd %%mm0, (%2, %%"REG_a") \n\t"
"punpckhdq %%mm0, %%mm0 \n\t"
- "movd %%mm0, (%3, %%eax) \n\t"
- "addl $4, %%eax \n\t"
+ "movd %%mm0, (%3, %%"REG_a") \n\t"
+ "add $4, %%"REG_a" \n\t"
" js 1b \n\t"
- : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" (-width)
- : "%eax", "%ebx"
+ : : "r" (src1+width*6), "r" (src2+width*6), "r" (dstU+width), "r" (dstV+width), "g" ((long)-width)
+ : "%"REG_a, "%"REG_b
);
#else
int i;
@@ -2024,23 +2038,23 @@
assert(filterSize % 4 == 0 && filterSize>0);
if(filterSize==4) // allways true for upscaling, sometimes for down too
{
- int counter= -2*dstW;
+ long counter= -2*dstW;
filter-= counter*2;
filterPos-= counter/2;
dst-= counter/2;
asm volatile(
"pxor %%mm7, %%mm7 \n\t"
"movq "MANGLE(w02)", %%mm6 \n\t"
- "pushl %%ebp \n\t" // we use 7 regs here ...
- "movl %%eax, %%ebp \n\t"
+ "push %%"REG_BP" \n\t" // we use 7 regs here ...
+ "mov %%"REG_a", %%"REG_BP" \n\t"
".balign 16 \n\t"
"1: \n\t"
- "movzwl (%2, %%ebp), %%eax \n\t"
- "movzwl 2(%2, %%ebp), %%ebx \n\t"
- "movq (%1, %%ebp, 4), %%mm1 \n\t"
- "movq 8(%1, %%ebp, 4), %%mm3 \n\t"
- "movd (%3, %%eax), %%mm0 \n\t"
- "movd (%3, %%ebx), %%mm2 \n\t"
+ "movzxw (%2, %%"REG_BP"), %%"REG_a"\n\t"
+ "movzxw 2(%2, %%"REG_BP"), %%"REG_b"\n\t"
+ "movq (%1, %%"REG_BP", 4), %%mm1\n\t"
+ "movq 8(%1, %%"REG_BP", 4), %%mm3\n\t"
+ "movd (%3, %%"REG_a"), %%mm0 \n\t"
+ "movd (%3, %%"REG_b"), %%mm2 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
"pmaddwd %%mm1, %%mm0 \n\t"
@@ -2050,44 +2064,44 @@
"packssdw %%mm3, %%mm0 \n\t"
"pmaddwd %%mm6, %%mm0 \n\t"
"packssdw %%mm0, %%mm0 \n\t"
- "movd %%mm0, (%4, %%ebp) \n\t"
- "addl $4, %%ebp \n\t"
+ "movd %%mm0, (%4, %%"REG_BP") \n\t"
+ "add $4, %%"REG_BP" \n\t"
" jnc 1b \n\t"
- "popl %%ebp \n\t"
+ "pop %%"REG_BP" \n\t"
: "+a" (counter)
: "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
- : "%ebx"
+ : "%"REG_b
);
}
else if(filterSize==8)
{
- int counter= -2*dstW;
+ long counter= -2*dstW;
filter-= counter*4;
filterPos-= counter/2;
dst-= counter/2;
asm volatile(
"pxor %%mm7, %%mm7 \n\t"
"movq "MANGLE(w02)", %%mm6 \n\t"
- "pushl %%ebp \n\t" // we use 7 regs here ...
- "movl %%eax, %%ebp \n\t"
+ "push %%"REG_BP" \n\t" // we use 7 regs here ...
+ "mov %%"REG_a", %%"REG_BP" \n\t"
".balign 16 \n\t"
"1: \n\t"
- "movzwl (%2, %%ebp), %%eax \n\t"
- "movzwl 2(%2, %%ebp), %%ebx \n\t"
- "movq (%1, %%ebp, 8), %%mm1 \n\t"
- "movq 16(%1, %%ebp, 8), %%mm3 \n\t"
- "movd (%3, %%eax), %%mm0 \n\t"
- "movd (%3, %%ebx), %%mm2 \n\t"
+ "movzxw (%2, %%"REG_BP"), %%"REG_a"\n\t"
+ "movzxw 2(%2, %%"REG_BP"), %%"REG_b"\n\t"
+ "movq (%1, %%"REG_BP", 8), %%mm1\n\t"
+ "movq 16(%1, %%"REG_BP", 8), %%mm3\n\t"
+ "movd (%3, %%"REG_a"), %%mm0 \n\t"
+ "movd (%3, %%"REG_b"), %%mm2 \n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
"pmaddwd %%mm1, %%mm0 \n\t"
"pmaddwd %%mm2, %%mm3 \n\t"
- "movq 8(%1, %%ebp, 8), %%mm1 \n\t"
- "movq 24(%1, %%ebp, 8), %%mm5 \n\t"
- "movd 4(%3, %%eax), %%mm4 \n\t"
- "movd 4(%3, %%ebx), %%mm2 \n\t"
+ "movq 8(%1, %%"REG_BP", 8), %%mm1\n\t"
+ "movq 24(%1, %%"REG_BP", 8), %%mm5\n\t"
+ "movd 4(%3, %%"REG_a"), %%mm4 \n\t"
+ "movd 4(%3, %%"REG_b"), %%mm2 \n\t"
"punpcklbw %%mm7, %%mm4 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
"pmaddwd %%mm1, %%mm4 \n\t"
@@ -2100,19 +2114,19 @@
"packssdw %%mm3, %%mm0 \n\t"
"pmaddwd %%mm6, %%mm0 \n\t"
"packssdw %%mm0, %%mm0 \n\t"
- "movd %%mm0, (%4, %%ebp) \n\t"
- "addl $4, %%ebp \n\t"
+ "movd %%mm0, (%4, %%"REG_BP") \n\t"
+ "add $4, %%"REG_BP" \n\t"
" jnc 1b \n\t"
- "popl %%ebp \n\t"
+ "pop %%"REG_BP" \n\t"
: "+a" (counter)
: "c" (filter), "d" (filterPos), "S" (src), "D" (dst)
- : "%ebx"
+ : "%"REG_b
);
}
else
{
- int counter= -2*dstW;
+ long counter= -2*dstW;
// filter-= counter*filterSize/2;
filterPos-= counter/2;
dst-= counter/2;
@@ -2121,42 +2135,42 @@
"movq "MANGLE(w02)", %%mm6 \n\t"
".balign 16 \n\t"
"1: \n\t"
- "movl %2, %%ecx \n\t"
- "movzwl (%%ecx, %0), %%eax \n\t"
- "movzwl 2(%%ecx, %0), %%ebx \n\t"
- "movl %5, %%ecx \n\t"
+ "mov %2, %%"REG_c" \n\t"
+ "movzxw (%%"REG_c", %0), %%"REG_a"\n\t"
+ "movzxw 2(%%"REG_c", %0), %%"REG_b"\n\t"
+ "mov %5, %%"REG_c" \n\t"
"pxor %%mm4, %%mm4 \n\t"
"pxor %%mm5, %%mm5 \n\t"
"2: \n\t"
"movq (%1), %%mm1 \n\t"
"movq (%1, %6), %%mm3 \n\t"
- "movd (%%ecx, %%eax), %%mm0 \n\t"
- "movd (%%ecx, %%ebx), %%mm2 \n\t"
+ "movd (%%"REG_c", %%"REG_a"), %%mm0\n\t"
+ "movd (%%"REG_c", %%"REG_b"), %%mm2\n\t"
"punpcklbw %%mm7, %%mm0 \n\t"
"punpcklbw %%mm7, %%mm2 \n\t"
"pmaddwd %%mm1, %%mm0 \n\t"
"pmaddwd %%mm2, %%mm3 \n\t"
"paddd %%mm3, %%mm5 \n\t"
"paddd %%mm0, %%mm4 \n\t"
- "addl $8, %1 \n\t"
- "addl $4, %%ecx \n\t"
- "cmpl %4, %%ecx \n\t"
+ "add $8, %1 \n\t"
+ "add $4, %%"REG_c" \n\t"
+ "cmp %4, %%"REG_c" \n\t"
" jb 2b \n\t"
- "addl %6, %1 \n\t"
+ "add %6, %1 \n\t"
"psrad $8, %%mm4 \n\t"
"psrad $8, %%mm5 \n\t"
"packssdw %%mm5, %%mm4 \n\t"
"pmaddwd %%mm6, %%mm4 \n\t"
"packssdw %%mm4, %%mm4 \n\t"
- "movl %3, %%eax \n\t"
- "movd %%mm4, (%%eax, %0) \n\t"
- "addl $4, %0 \n\t"
+ "mov %3, %%"REG_a" \n\t"
+ "movd %%mm4, (%%"REG_a", %0) \n\t"
+ "add $4, %0 \n\t"
" jnc 1b \n\t"
: "+r" (counter), "+r" (filter)
: "m" (filterPos), "m" (dst), "m"(src+filterSize),
- "m" (src), "r" (filterSize*2)
- : "%ebx", "%eax", "%ecx"
+ "m" (src), "r" ((long)filterSize*2)
+ : "%"REG_b, "%"REG_a, "%"REG_c
);
}
#else
@@ -2241,28 +2255,28 @@
}
else // Fast Bilinear upscale / crap downscale
{
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
#ifdef HAVE_MMX2
int i;
if(canMMX2BeUsed)
{
asm volatile(
"pxor %%mm7, %%mm7 \n\t"
- "movl %0, %%ecx \n\t"
- "movl %1, %%edi \n\t"
- "movl %2, %%edx \n\t"
- "movl %3, %%ebx \n\t"
- "xorl %%eax, %%eax \n\t" // i
- PREFETCH" (%%ecx) \n\t"
- PREFETCH" 32(%%ecx) \n\t"
- PREFETCH" 64(%%ecx) \n\t"
+ "mov %0, %%"REG_c" \n\t"
+ "mov %1, %%"REG_D" \n\t"
+ "mov %2, %%"REG_d" \n\t"
+ "mov %3, %%"REG_b" \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ PREFETCH" (%%"REG_c") \n\t"
+ PREFETCH" 32(%%"REG_c") \n\t"
+ PREFETCH" 64(%%"REG_c") \n\t"
#define FUNNY_Y_CODE \
- "movl (%%ebx), %%esi \n\t"\
+ "mov (%%"REG_b"), %%"REG_S" \n\t"\
"call *%4 \n\t"\
- "addl (%%ebx, %%eax), %%ecx \n\t"\
- "addl %%eax, %%edi \n\t"\
- "xorl %%eax, %%eax \n\t"\
+ "addl (%%"REG_b", %%"REG_a"), %%ecx\n\t"\
+ "add %%"REG_a", %%"REG_d" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
FUNNY_Y_CODE
FUNNY_Y_CODE
@@ -2275,7 +2289,7 @@
:: "m" (src), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
"m" (funnyYCode)
- : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
+ : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_d
);
for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) dst[i] = src[srcW-1]*128;
}
@@ -2284,43 +2298,43 @@
#endif
//NO MMX just normal asm ...
asm volatile(
- "xorl %%eax, %%eax \n\t" // i
- "xorl %%ebx, %%ebx \n\t" // xx
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ "xor %%"REG_b", %%"REG_b" \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // 2*xalpha
".balign 16 \n\t"
"1: \n\t"
- "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
- "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
+ "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx]
+ "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
"subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
"imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
"shll $16, %%edi \n\t"
"addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
- "movl %1, %%edi \n\t"
+ "mov %1, %%"REG_D" \n\t"
"shrl $9, %%esi \n\t"
- "movw %%si, (%%edi, %%eax, 2) \n\t"
+ "movw %%si, (%%"REG_D", %%"REG_a", 2)\n\t"
"addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
- "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
+ "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
- "movzbl (%0, %%ebx), %%edi \n\t" //src[xx]
- "movzbl 1(%0, %%ebx), %%esi \n\t" //src[xx+1]
+ "movzbl (%0, %%"REG_b"), %%edi \n\t" //src[xx]
+ "movzbl 1(%0, %%"REG_b"), %%esi \n\t" //src[xx+1]
"subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
"imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
"shll $16, %%edi \n\t"
"addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
- "movl %1, %%edi \n\t"
+ "mov %1, %%"REG_D" \n\t"
"shrl $9, %%esi \n\t"
- "movw %%si, 2(%%edi, %%eax, 2) \n\t"
+ "movw %%si, 2(%%"REG_D", %%"REG_a", 2)\n\t"
"addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
- "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
+ "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
- "addl $2, %%eax \n\t"
- "cmpl %2, %%eax \n\t"
+ "add $2, %%"REG_a" \n\t"
+ "cmp %2, %%"REG_a" \n\t"
" jb 1b \n\t"
:: "r" (src), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF)
- : "%eax", "%ebx", "%ecx", "%edi", "%esi"
+ : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
);
#ifdef HAVE_MMX2
} //if MMX2 can't be used
@@ -2410,40 +2424,40 @@
}
else // Fast Bilinear upscale / crap downscale
{
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
#ifdef HAVE_MMX2
int i;
if(canMMX2BeUsed)
{
asm volatile(
"pxor %%mm7, %%mm7 \n\t"
- "movl %0, %%ecx \n\t"
- "movl %1, %%edi \n\t"
- "movl %2, %%edx \n\t"
- "movl %3, %%ebx \n\t"
- "xorl %%eax, %%eax \n\t" // i
- PREFETCH" (%%ecx) \n\t"
- PREFETCH" 32(%%ecx) \n\t"
- PREFETCH" 64(%%ecx) \n\t"
+ "mov %0, %%"REG_c" \n\t"
+ "mov %1, %%"REG_D" \n\t"
+ "mov %2, %%"REG_d" \n\t"
+ "mov %3, %%"REG_b" \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ PREFETCH" (%%"REG_c") \n\t"
+ PREFETCH" 32(%%"REG_c") \n\t"
+ PREFETCH" 64(%%"REG_c") \n\t"
#define FUNNY_UV_CODE \
- "movl (%%ebx), %%esi \n\t"\
+ "movl (%%"REG_b"), %%esi \n\t"\
"call *%4 \n\t"\
- "addl (%%ebx, %%eax), %%ecx \n\t"\
- "addl %%eax, %%edi \n\t"\
- "xorl %%eax, %%eax \n\t"\
+ "addl (%%"REG_b", %%"REG_a"), %%ecx\n\t"\
+ "add %%"REG_a", %%"REG_D" \n\t"\
+ "xor %%"REG_a", %%"REG_a" \n\t"\
FUNNY_UV_CODE
FUNNY_UV_CODE
FUNNY_UV_CODE
FUNNY_UV_CODE
- "xorl %%eax, %%eax \n\t" // i
- "movl %5, %%ecx \n\t" // src
- "movl %1, %%edi \n\t" // buf1
- "addl $4096, %%edi \n\t"
- PREFETCH" (%%ecx) \n\t"
- PREFETCH" 32(%%ecx) \n\t"
- PREFETCH" 64(%%ecx) \n\t"
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ "mov %5, %%"REG_c" \n\t" // src
+ "mov %1, %%"REG_D" \n\t" // buf1
+ "add $4096, %%"REG_D" \n\t"
+ PREFETCH" (%%"REG_c") \n\t"
+ PREFETCH" 32(%%"REG_c") \n\t"
+ PREFETCH" 64(%%"REG_c") \n\t"
FUNNY_UV_CODE
FUNNY_UV_CODE
@@ -2452,7 +2466,7 @@
:: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
"m" (funnyUVCode), "m" (src2)
- : "%eax", "%ebx", "%ecx", "%edx", "%esi", "%edi"
+ : "%"REG_a, "%"REG_b, "%"REG_c, "%"REG_d, "%esi", "%"REG_D
);
for(i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--)
{
@@ -2465,41 +2479,41 @@
{
#endif
asm volatile(
- "xorl %%eax, %%eax \n\t" // i
- "xorl %%ebx, %%ebx \n\t" // xx
+ "xor %%"REG_a", %%"REG_a" \n\t" // i
+ "xor %%"REG_b", %%"REG_b" \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // 2*xalpha
".balign 16 \n\t"
"1: \n\t"
- "movl %0, %%esi \n\t"
- "movzbl (%%esi, %%ebx), %%edi \n\t" //src[xx]
- "movzbl 1(%%esi, %%ebx), %%esi \n\t" //src[xx+1]
+ "mov %0, %%"REG_S" \n\t"
+ "movzbl (%%"REG_S", %%"REG_b"), %%edi \n\t" //src[xx]
+ "movzbl 1(%%"REG_S", %%"REG_b"), %%esi \n\t" //src[xx+1]
"subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
"imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
"shll $16, %%edi \n\t"
"addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
- "movl %1, %%edi \n\t"
+ "mov %1, %%"REG_D" \n\t"
"shrl $9, %%esi \n\t"
- "movw %%si, (%%edi, %%eax, 2) \n\t"
+ "movw %%si, (%%"REG_d", %%"REG_a", 2)\n\t"
- "movzbl (%5, %%ebx), %%edi \n\t" //src[xx]
- "movzbl 1(%5, %%ebx), %%esi \n\t" //src[xx+1]
+ "movzbl (%5, %%"REG_b"), %%edi \n\t" //src[xx]
+ "movzbl 1(%5, %%"REG_b"), %%esi \n\t" //src[xx+1]
"subl %%edi, %%esi \n\t" //src[xx+1] - src[xx]
"imull %%ecx, %%esi \n\t" //(src[xx+1] - src[xx])*2*xalpha
"shll $16, %%edi \n\t"
"addl %%edi, %%esi \n\t" //src[xx+1]*2*xalpha + src[xx]*(1-2*xalpha)
- "movl %1, %%edi \n\t"
+ "mov %1, %%"REG_D" \n\t"
"shrl $9, %%esi \n\t"
- "movw %%si, 4096(%%edi, %%eax, 2)\n\t"
+ "movw %%si, 4096(%%"REG_D", %%"REG_a", 2)\n\t"
"addw %4, %%cx \n\t" //2*xalpha += xInc&0xFF
- "adcl %3, %%ebx \n\t" //xx+= xInc>>8 + carry
- "addl $1, %%eax \n\t"
- "cmpl %2, %%eax \n\t"
+ "adc %3, %%"REG_b" \n\t" //xx+= xInc>>8 + carry
+ "add $1, %%"REG_a" \n\t"
+ "cmp %2, %%"REG_a" \n\t"
" jb 1b \n\t"
- :: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc>>16), "m" (xInc&0xFFFF),
+ :: "m" (src1), "m" (dst), "m" ((long)dstWidth), "m" ((long)(xInc>>16)), "m" ((xInc&0xFFFF)),
"r" (src2)
- : "%eax", "%ebx", "%ecx", "%edi", "%esi"
+ : "%"REG_a, "%"REG_b, "%ecx", "%"REG_D, "%esi"
);
#ifdef HAVE_MMX2
} //if MMX2 can't be used
Index: yuv2rgb.c
===================================================================
RCS file: /cvsroot/mplayer/main/postproc/yuv2rgb.c,v
retrieving revision 1.26
retrieving revision 1.27
diff -u -r1.26 -r1.27
--- yuv2rgb.c 27 Jun 2004 00:07:15 -0000 1.26
+++ yuv2rgb.c 21 Oct 2004 11:55:20 -0000 1.27
@@ -156,7 +156,7 @@
};
#endif
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
/* hope these constant values are cache line aligned */
uint64_t attribute_used __attribute__((aligned(8))) mmx_00ffw = 0x00ff00ff00ff00ffULL;
@@ -183,14 +183,12 @@
0x0004000400040004LL,};
#undef HAVE_MMX
-#undef ARCH_X86
//MMX versions
#undef RENAME
#define HAVE_MMX
#undef HAVE_MMX2
#undef HAVE_3DNOW
-#define ARCH_X86
#define RENAME(a) a ## _MMX
#include "yuv2rgb_template.c"
@@ -199,7 +197,6 @@
#define HAVE_MMX
#define HAVE_MMX2
#undef HAVE_3DNOW
-#define ARCH_X86
#define RENAME(a) a ## _MMX2
#include "yuv2rgb_template.c"
@@ -583,7 +580,7 @@
SwsFunc yuv2rgb_get_func_ptr (SwsContext *c)
{
-#ifdef ARCH_X86
+#if defined(ARCH_X86) || defined(ARCH_X86_64)
if(c->flags & SWS_CPU_CAPS_MMX2){
switch(c->dstFormat){
case IMGFMT_BGR32: return yuv420_rgb32_MMX2;
Index: yuv2rgb_template.c
===================================================================
RCS file: /cvsroot/mplayer/main/postproc/yuv2rgb_template.c,v
retrieving revision 1.15
retrieving revision 1.16
diff -u -r1.15 -r1.16
--- yuv2rgb_template.c 24 Feb 2003 00:12:30 -0000 1.15
+++ yuv2rgb_template.c 21 Oct 2004 11:55:20 -0000 1.16
@@ -143,7 +143,7 @@
uint8_t *_py = src[0] + y*srcStride[0];
uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
- int index= -h_size/2;
+ long index= -h_size/2;
b5Dither= dither8[y&1];
g6Dither= dither4[y&1];
@@ -204,8 +204,8 @@
MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
- "addl $16, %1 \n\t"
- "addl $4, %0 \n\t"
+ "add $16, %1 \n\t"
+ "add $4, %0 \n\t"
" js 1b \n\t"
: "+r" (index), "+r" (_image)
@@ -238,7 +238,7 @@
uint8_t *_py = src[0] + y*srcStride[0];
uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
- int index= -h_size/2;
+ long index= -h_size/2;
b5Dither= dither8[y&1];
g6Dither= dither4[y&1];
@@ -295,8 +295,8 @@
MOVNTQ " %%mm5, 8 (%1);" /* store pixel 4-7 */
- "addl $16, %1 \n\t"
- "addl $4, %0 \n\t"
+ "add $16, %1 \n\t"
+ "add $4, %0 \n\t"
" js 1b \n\t"
: "+r" (index), "+r" (_image)
: "r" (_pu - index), "r" (_pv - index), "r"(&c->redDither), "r" (_py - 2*index)
@@ -326,7 +326,7 @@
uint8_t *_py = src[0] + y*srcStride[0];
uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
- int index= -h_size/2;
+ long index= -h_size/2;
/* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
pixels in each iteration */
@@ -440,8 +440,8 @@
"pxor %%mm4, %%mm4 \n\t"
#endif
- "addl $24, %1 \n\t"
- "addl $4, %0 \n\t"
+ "add $24, %1 \n\t"
+ "add $4, %0 \n\t"
" js 1b \n\t"
: "+r" (index), "+r" (_image)
@@ -472,7 +472,7 @@
uint8_t *_py = src[0] + y*srcStride[0];
uint8_t *_pu = src[1] + (y>>1)*srcStride[1];
uint8_t *_pv = src[2] + (y>>1)*srcStride[2];
- int index= -h_size/2;
+ long index= -h_size/2;
/* this mmx assembly code deals with SINGLE scan line at a time, it convert 8
pixels in each iteration */
@@ -526,8 +526,8 @@
"pxor %%mm4, %%mm4;" /* zero mm4 */
"movq 8 (%5, %0, 2), %%mm6;" /* Load 8 Y Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0 */
- "addl $32, %1 \n\t"
- "addl $4, %0 \n\t"
+ "add $32, %1 \n\t"
+ "add $4, %0 \n\t"
" js 1b \n\t"
: "+r" (index), "+r" (_image)
- Previous message: [Mplayer-cvslog] CVS: main/libmpcodecs pullup.c, 1.19, 1.20 vf_decimate.c, 1.1, 1.2 vf_divtc.c, 1.1, 1.2 vf_eq.c, 1.7, 1.8 vf_eq2.c, 1.8, 1.9 vf_filmdint.c, 1.3, 1.4 vf_halfpack.c, 1.5, 1.6 vf_ilpack.c, 1.4, 1.5 vf_ivtc.c, 1.3, 1.4 vf_noise.c, 1.13, 1.14 vf_spp.c, 1.23, 1.24 vf_tfields.c, 1.7, 1.8
- Next message: [Mplayer-cvslog] CVS: main/postproc rgb2rgb.c, 1.61, 1.62 rgb2rgb_template.c, 1.71, 1.72 swscale-example.c, 1.4, 1.5 swscale.c, 1.146, 1.147 swscale_template.c, 1.107, 1.108 yuv2rgb.c, 1.26, 1.27 yuv2rgb_template.c, 1.15, 1.16
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
More information about the MPlayer-cvslog
mailing list