Update of /cvsroot/pure-data/pd/src In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv20643
Modified Files: Tag: devel_0_37 m_simd_sse_gcc.c m_simd_sse_gcc.h Log Message:
port of thomas grill's vectorized simd functions for vc to gcc
Index: m_simd_sse_gcc.c =================================================================== RCS file: /cvsroot/pure-data/pd/src/Attic/m_simd_sse_gcc.c,v retrieving revision 1.1.2.1 retrieving revision 1.1.2.2 diff -C2 -d -r1.1.2.1 -r1.1.2.2 *** m_simd_sse_gcc.c 23 Dec 2003 01:15:39 -0000 1.1.2.1 --- m_simd_sse_gcc.c 10 Jul 2004 19:57:20 -0000 1.1.2.2 *************** *** 10,12 **** --- 10,997 ----
+ /* TB: adapted from the xxx_sse_vc routines + since gcc can't access the c code as vc can, the functions itself have + been adapted to assembler */ + + /* zero_perf_sse_gcc (t_int * w)*/ + asm( + ".set T_FLOAT,4 \n" /* sizeof(t_float) */ + ".set T_INT,4 \n" /* sizeof(t_int) */ + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl zero_perf_sse_gcc \n" + "zero_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "pushl %esi \n" + "movl %esi, -4(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument pointer */ + "movl T_INT(%esi), %edx \n" /* out */ + "movl 2*T_INT(%esi), %ecx \n" /* n */ + + "xorps %xmm0, %xmm0 \n" /* load zero */ + "shrl $4, %ecx \n" /* divide by 16 */ + + /* loop: *out=0 */ + "zpsg_loop: \n" + "movaps %xmm0, (%edx) \n" + "movaps %xmm0, 4*T_FLOAT(%edx) \n" + "movaps %xmm0, 8*T_FLOAT(%edx) \n" + "movaps %xmm0, 12*T_FLOAT(%edx) \n" + + "addl $64, %edx \n" /* out+=16 */ + "loop zpsg_loop \n" + + + /* return w+3; */ + "movl -4(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $12, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + + ".align 4 \n" /* alignment */ + ".type zero_perf_sse_gcc, @function \n" + + ); + + /* copy_perf_sse_gcc (t_int * w)*/ + asm( + ".set T_FLOAT,4 \n" + ".set T_INT,4 \n" + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl copy_perf_sse_gcc \n" + "copy_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "subl $8, %esp \n" + "movl %ebx, -4(%ebp) \n" + "movl %esi, -8(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument pointer */ + "movl 1*T_INT(%esi), %ebx \n" /* in1 */ + "movl 2*T_INT(%esi), %edx \n" /* out */ + "movl 3*T_INT(%esi), %ecx \n" /* n */ + "shrl $4, %ecx \n" /* divide by 16 */ + + /* loop: *out = *in */ + "cpsg_loop: \n" + "movaps (%ebx), %xmm0 \n" + "movaps %xmm0, (%edx) \n" + "movaps 4*T_FLOAT(%ebx), %xmm1 \n" + "movaps %xmm1, 4*T_FLOAT(%edx) \n" + + "movaps 8*T_FLOAT(%ebx), %xmm2 \n" + "movaps %xmm2, 8*T_FLOAT(%edx) \n" + "movaps 12*T_FLOAT(%ebx), %xmm3 \n" + "movaps %xmm3, 12*T_FLOAT(%edx) \n" + + "addl $64, %ebx \n" /* in1 +=16 */ + "addl $64, %edx \n" /* out +=16 */ + "loop cpsg_loop \n" + + /* return w+4; */ + "movl -4(%ebp), %ebx \n" + "movl -8(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $16, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + + ".align 4 \n" /* alignment */ + ".type copy_perf_sse_gcc, @function \n" + ); + + + /* sig_tilde_perf_sse_gcc(t_int * w) */ + asm( + ".set T_FLOAT,4 \n" + ".set T_INT,4 \n" + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl sig_tilde_perf_sse_gcc \n" + "sig_tilde_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "pushl %esi \n" + "movl %esi, -4(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument pointer */ + "movl 2*T_INT(%esi), %edx \n" /* out */ + "movl 1*T_INT(%esi), %eax \n" /* f */ + "movl 3*T_INT(%esi), %ecx \n" /* n */ + + /* set registers to f */ + "movss (%eax), %xmm0 \n" + "shufps $0, %xmm0, %xmm0 \n" + "movl 12(%esi), %ecx \n" + "shrl $4, %ecx \n" /* divide by 16 */ + + /* loop: *out = f */ + "sigtpsg_loop: \n" + "movaps %xmm0, (%edx) \n" + "movaps %xmm0, 16(%edx) \n" + "movaps %xmm0, 32(%edx) \n" + "movaps %xmm0, 48(%edx) \n" + "addl $64, %edx \n" /* out+=16 */ + "loop sigtpsg_loop \n" + + /* return w+4; */ + "movl -4(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $16, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + ".align 4 \n" /* alignment */ + ".type sig_tilde_perf_sse_gcc, @function \n" + + ); + + + + /* plus_perf_sse_gcc (t_int * w)*/ + asm( + ".set T_FLOAT,4 \n" + ".set T_INT,4 \n" + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl plus_perf_sse_gcc \n" + "plus_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "subl $8, %esp \n" + "movl %ebx, -4(%ebp) \n" + "movl %esi, -8(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument vector */ + + "movl 1*T_INT(%esi), %eax \n" /* in1 */ + "movl 2*T_INT(%esi), %ebx \n" /* in2 */ + "movl 3*T_INT(%esi), %edx \n" /* out */ + "movl 4*T_INT(%esi), %ecx \n" /* n */ + "shrl $4, %ecx \n" /* divide by 16 */ + "xorl %esi, %esi \n" /* reset index */ + + /* loop: *out = *in1 + *in2 */ + "ppsg_loop: \n" + "movaps (%eax,%esi), %xmm0 \n" + "movaps (%ebx,%esi), %xmm1 \n" + "addps %xmm1, %xmm0 \n" + "movaps %xmm0, (%edx,%esi) \n" + + "movaps 4*T_FLOAT(%eax,%esi), %xmm2 \n" + "movaps 4*T_FLOAT(%ebx,%esi), %xmm3 \n" + "addps %xmm3, %xmm2 \n" + "movaps %xmm2, 4*T_FLOAT(%edx,%esi) \n" + + "movaps 8*T_FLOAT(%eax,%esi), %xmm4 \n" + "movaps 8*T_FLOAT(%ebx,%esi), %xmm5 \n" + "addps %xmm5, %xmm4 \n" + "movaps %xmm4, 8*T_FLOAT(%edx,%esi) \n" + + "movaps 12*T_FLOAT(%eax,%esi), %xmm6 \n" + "movaps 12*T_FLOAT(%ebx,%esi), %xmm7 \n" + "addps %xmm7, %xmm6 \n" + "movaps %xmm6, 12*T_FLOAT(%edx,%esi) \n" + + "addl $64, %esi \n" /* out+=16; */ + "loop ppsg_loop \n" + + /* return w+5; */ + "movl -4(%ebp), %ebx \n" + "movl -8(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $20, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + + ".align 4 \n" /* alignment */ + ".type plus_perf_sse_gcc, @function \n" + + ); + + /* scalarplus_perf_sse_gcc(t_int *w) */ + asm( + ".set T_FLOAT,4 \n" + ".set T_INT,4 \n" + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl scalarplus_perf_sse_gcc \n" + "scalarplus_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "subl $8, %esp \n" + "movl %ebx, -4(%ebp) \n" + "movl %esi, -8(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument pointer */ + "movl T_INT(%esi), %ebx \n" /* in */ + "movl 3*T_INT(%esi), %edx \n" /* out */ + "movl 2*T_INT(%esi), %eax \n" /* value */ + "movl 4*T_INT(%esi), %ecx \n" /* n */ + + "movss (%eax), %xmm0 \n" + "shufps $0, %xmm0, %xmm0 \n" + "shrl $4, %ecx \n" /* divide by 16 */ + + /* loop: *out = *in + value */ + "sppsg_loop: \n" + + "movaps (%ebx), %xmm1 \n" + "addps %xmm0, %xmm1 \n" + "movaps %xmm1, (%edx) \n" + + "movaps 4*T_FLOAT(%ebx), %xmm2 \n" + "addps %xmm0, %xmm2 \n" + "movaps %xmm2, 4*T_FLOAT(%edx) \n" + + "movaps 8*T_FLOAT(%ebx), %xmm3 \n" + "addps %xmm0, %xmm3 \n" + "movaps %xmm3, 8*T_FLOAT(%edx) \n" + + "movaps 12*T_FLOAT(%ebx), %xmm4 \n" + "addps %xmm0, %xmm4 \n" + "movaps %xmm4, 12*T_FLOAT(%edx) \n" + + "addl $64, %ebx \n" /* in += 16 */ + "addl $64, %edx \n" /* out += 16 */ + "loop sppsg_loop \n" + + /* return w+5; */ + "movl -4(%ebp), %ebx \n" + "movl -8(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $20, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + ".align 4 \n" /* alignment */ + ".type scalarplus_perf_sse_gcc, @function \n" + + ); + + + /* minus_perf_sse_gcc(t_int *w) */ + asm( + ".set T_FLOAT,4 \n" + ".set T_INT,4 \n" + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl minus_perf_sse_gcc \n" + "minus_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "subl $8, %esp \n" + "movl %ebx, -4(%ebp) \n" + "movl %esi, -8(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument pointer */ + "movl 1*T_INT(%esi), %eax \n" /* in1 */ + "movl 2*T_INT(%esi), %ebx \n" /* in2 */ + "movl 3*T_INT(%esi), %edx \n" /* out */ + "movl 4*T_INT(%esi), %ecx \n" /* n */ + "shrl $4, %ecx \n" /* divide by 16 */ + "xorl %esi, %esi \n" /* reset register */ + + + /* loop: *out = *in1 - *in2 */ + "mpsg_loop: \n" + + "movaps (%eax,%esi), %xmm0 \n" + "movaps (%ebx,%esi), %xmm1 \n" + "subps %xmm1, %xmm0 \n" + "movaps %xmm0, (%edx,%esi) \n" + + "movaps 4*T_FLOAT(%eax,%esi), %xmm2 \n" + "movaps 4*T_FLOAT(%ebx,%esi), %xmm3 \n" + "subps %xmm3, %xmm2 \n" + "movaps %xmm2, 4*T_FLOAT(%edx,%esi) \n" + + "movaps 8*T_FLOAT(%eax,%esi), %xmm4 \n" + "movaps 8*T_FLOAT(%ebx,%esi), %xmm5 \n" + "subps %xmm5, %xmm4 \n" + "movaps %xmm4, 8*T_FLOAT(%edx,%esi) \n" + + "movaps 12*T_FLOAT(%eax,%esi), %xmm6 \n" + "movaps 12*T_FLOAT(%ebx,%esi), %xmm7 \n" + "subps %xmm7, %xmm6 \n" + "movaps %xmm6, 12*T_FLOAT(%edx,%esi) \n" + "addl $64, %esi \n" /* out +=16 */ + + "loop mpsg_loop \n" + + /* return w+5; */ + "movl -4(%ebp), %ebx \n" + "movl -8(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $20, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + ".align 4 \n" /* alignment */ + ".type minus_perf_sse_gcc, @function \n" + + ); + + /* scalarminus_perf_sse_gcc(t_int *w) */ + asm( + ".set T_FLOAT,4 \n" + ".set T_INT,4 \n" + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl scalarminus_perf_sse_gcc \n" + "scalarminus_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "subl $8, %esp \n" + "movl %ebx, -4(%ebp) \n" + "movl %esi, -8(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument pointer */ + "movl T_INT(%esi), %ebx \n" /* in */ + "movl 3*T_INT(%esi), %edx \n" /* out */ + "movl 2*T_INT(%esi), %eax \n" /* value */ + "movl 4*T_INT(%esi), %ecx \n" /* n */ + + "movss (%eax), %xmm0 \n" + "shufps $0, %xmm0, %xmm0 \n" + "shrl $4, %ecx \n" /* divide by 16 */ + + /* loop: *out = *in - value */ + "smpsg_loop: \n" + + "movaps (%ebx), %xmm1 \n" + "subps %xmm0, %xmm1 \n" + "movaps %xmm1, (%edx) \n" + + "movaps 4*T_FLOAT(%ebx), %xmm2 \n" + "subps %xmm0, %xmm2 \n" + "movaps %xmm2, 4*T_FLOAT(%edx) \n" + + "movaps 8*T_FLOAT(%ebx), %xmm3 \n" + "subps %xmm0, %xmm3 \n" + "movaps %xmm3, 8*T_FLOAT(%edx) \n" + + "movaps 12*T_FLOAT(%ebx), %xmm4 \n" + "subps %xmm0, %xmm4 \n" + "movaps %xmm4, 12*T_FLOAT(%edx) \n" + + "addl $64, %ebx \n" /* in += 16 */ + "addl $64, %edx \n" /* out += 16 */ + "loop smpsg_loop \n" + + /* return w+5; */ + "movl -4(%ebp), %ebx \n" + "movl -8(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $20, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + ".align 4 \n" /* alignment */ + ".type scalarminus_perf_sse_gcc, @function\n" + + ); + + /* times_perf_sse_gcc (t_int * w)*/ + asm( + ".set T_FLOAT,4 \n" + ".set T_INT,4 \n" + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl times_perf_sse_gcc \n" + "times_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "subl $8, %esp \n" + "movl %ebx, -4(%ebp) \n" + "movl %esi, -8(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument vector */ + + "movl 1*T_INT(%esi), %eax \n" /* in1 */ + "movl 2*T_INT(%esi), %ebx \n" /* in2 */ + "movl 3*T_INT(%esi), %edx \n" /* out */ + "movl 4*T_INT(%esi), %ecx \n" /* n */ + "shrl $4, %ecx \n" /* divide by 16 */ + "xorl %esi, %esi \n" /* reset index */ + + /* loop: *out = *in1 * *in2 */ + "tpsg_loop: \n" + "movaps (%eax,%esi), %xmm0 \n" + "movaps (%ebx,%esi), %xmm1 \n" + "mulps %xmm1, %xmm0 \n" + "movaps %xmm0, (%edx,%esi) \n" + + "movaps 4*T_FLOAT(%eax,%esi), %xmm2 \n" + "movaps 4*T_FLOAT(%ebx,%esi), %xmm3 \n" + "mulps %xmm3, %xmm2 \n" + "movaps %xmm2, 4*T_FLOAT(%edx,%esi) \n" + + "movaps 8*T_FLOAT(%eax,%esi), %xmm4 \n" + "movaps 8*T_FLOAT(%ebx,%esi), %xmm5 \n" + "mulps %xmm5, %xmm4 \n" + "movaps %xmm4, 8*T_FLOAT(%edx,%esi) \n" + + "movaps 12*T_FLOAT(%eax,%esi), %xmm6 \n" + "movaps 12*T_FLOAT(%ebx,%esi), %xmm7 \n" + "mulps %xmm7, %xmm6 \n" + "movaps %xmm6, 12*T_FLOAT(%edx,%esi) \n" + + "addl $64, %esi \n" /* out+=16; */ + "loop tpsg_loop \n" + + /* return w+5; */ + "movl -4(%ebp), %ebx \n" + "movl -8(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $20, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + + ".align 4 \n" /* alignment */ + ".type times_perf_sse_gcc, @function \n" + + ); + + /* scalartimes_perf_sse_gcc(t_int *w) */ + asm( + ".set T_FLOAT,4 \n" + ".set T_INT,4 \n" + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl scalartimes_perf_sse_gcc \n" + "scalartimes_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "subl $8, %esp \n" + "movl %ebx, -4(%ebp) \n" + "movl %esi, -8(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument pointer */ + "movl T_INT(%esi), %ebx \n" /* in */ + "movl 3*T_INT(%esi), %edx \n" /* out */ + "movl 2*T_INT(%esi), %eax \n" /* value */ + "movl 4*T_INT(%esi), %ecx \n" /* n */ + + "movss (%eax), %xmm0 \n" + "shufps $0, %xmm0, %xmm0 \n" + "shrl $4, %ecx \n" /* divide by 16 */ + + /* loop: *out = *in * n */ + "stpsg_loop: \n" + + "movaps (%ebx), %xmm1 \n" + "mulps %xmm0, %xmm1 \n" + "movaps %xmm1, (%edx) \n" + + "movaps 4*T_FLOAT(%ebx), %xmm2 \n" + "mulps %xmm0, %xmm2 \n" + "movaps %xmm2, 4*T_FLOAT(%edx) \n" + + "movaps 8*T_FLOAT(%ebx), %xmm3 \n" + "mulps %xmm0, %xmm3 \n" + "movaps %xmm3, 8*T_FLOAT(%edx) \n" + + "movaps 12*T_FLOAT(%ebx), %xmm4 \n" + "mulps %xmm0, %xmm4 \n" + "movaps %xmm4, 12*T_FLOAT(%edx) \n" + + "addl $64, %ebx \n" /* in += 16 */ + "addl $64, %edx \n" /* out += 16 */ + "loop stpsg_loop \n" + + /* return w+5; */ + "movl -4(%ebp), %ebx \n" + "movl -8(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $20, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + ".align 4 \n" /* alignment */ + ".type scalartimes_perf_sse_gcc, @function\n" + + ); + + /* over_perf_sse_gcc (t_int * w)*/ + asm( + ".set T_FLOAT,4 \n" + ".set T_INT,4 \n" + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl over_perf_sse_gcc \n" + "over_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "subl $8, %esp \n" + "movl %ebx, -4(%ebp) \n" + "movl %esi, -8(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument vector */ + + "movl 1*T_INT(%esi), %eax \n" /* in1 */ + "movl 2*T_INT(%esi), %ebx \n" /* in2 */ + "movl 3*T_INT(%esi), %edx \n" /* out */ + "movl 4*T_INT(%esi), %ecx \n" /* n */ + "shrl $4, %ecx \n" /* divide by 16 */ + "xorl %esi, %esi \n" /* reset index */ + + /* loop: *out = *in1 / *in2 */ + "opsg_loop: \n" + "movaps (%eax,%esi), %xmm0 \n" + "movaps (%ebx,%esi), %xmm1 \n" + "divps %xmm1, %xmm0 \n" + "movaps %xmm0, (%edx,%esi) \n" + + "movaps 4*T_FLOAT(%eax,%esi), %xmm2 \n" + "movaps 4*T_FLOAT(%ebx,%esi), %xmm3 \n" + "divps %xmm3, %xmm2 \n" + "movaps %xmm2, 4*T_FLOAT(%edx,%esi) \n" + + "movaps 8*T_FLOAT(%eax,%esi), %xmm4 \n" + "movaps 8*T_FLOAT(%ebx,%esi), %xmm5 \n" + "divps %xmm5, %xmm4 \n" + "movaps %xmm4, 8*T_FLOAT(%edx,%esi) \n" + + "movaps 12*T_FLOAT(%eax,%esi), %xmm6 \n" + "movaps 12*T_FLOAT(%ebx,%esi), %xmm7 \n" + "divps %xmm7, %xmm6 \n" + "movaps %xmm6, 12*T_FLOAT(%edx,%esi) \n" + + "addl $64, %esi \n" /* out+=16; */ + "loop opsg_loop \n" + + /* return w+5; */ + "movl -4(%ebp), %ebx \n" + "movl -8(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $20, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + + ".align 4 \n" /* alignment */ + ".type over_perf_sse_gcc, @function \n" + + ); + + /* scalarover_perf_sse_gcc(t_int *w) */ + asm( + ".set T_FLOAT,4 \n" + ".set T_INT,4 \n" + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl scalarover_perf_sse_gcc \n" + "scalarover_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "subl $8, %esp \n" + "movl %ebx, -4(%ebp) \n" + "movl %esi, -8(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument pointer */ + "movl T_INT(%esi), %ebx \n" /* in */ + "movl 3*T_INT(%esi), %edx \n" /* out */ + "movl 2*T_INT(%esi), %eax \n" /* value */ + "movl 4*T_INT(%esi), %ecx \n" /* n */ + + "movss (%eax), %xmm0 \n" + "shufps $0, %xmm0, %xmm0 \n" + "shrl $4, %ecx \n" /* divide by 16 */ + + /* loop: *out = *in / n */ + "sopsg_loop: \n" + + "movaps (%ebx), %xmm1 \n" + "divps %xmm0, %xmm1 \n" + "movaps %xmm1, (%edx) \n" + + "movaps 4*T_FLOAT(%ebx), %xmm2 \n" + "divps %xmm0, %xmm2 \n" + "movaps %xmm2, 4*T_FLOAT(%edx) \n" + + "movaps 8*T_FLOAT(%ebx), %xmm3 \n" + "divps %xmm0, %xmm3 \n" + "movaps %xmm3, 8*T_FLOAT(%edx) \n" + + "movaps 12*T_FLOAT(%ebx), %xmm4 \n" + "divps %xmm0, %xmm4 \n" + "movaps %xmm4, 12*T_FLOAT(%edx) \n" + + "addl $64, %ebx \n" /* in += 16 */ + "addl $64, %edx \n" /* out += 16 */ + "loop sopsg_loop \n" + + /* return w+5; */ + "movl -4(%ebp), %ebx \n" + "movl -8(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $20, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + ".align 4 \n" /* alignment */ + ".type scalarover_perf_sse_gcc, @function \n" + + ); + + /* min_perf_sse_gcc (t_int * w)*/ + asm( + ".set T_FLOAT,4 \n" + ".set T_INT,4 \n" + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl min_perf_sse_gcc \n" + "min_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "subl $8, %esp \n" + "movl %ebx, -4(%ebp) \n" + "movl %esi, -8(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument vector */ + + "movl 1*T_INT(%esi), %eax \n" /* in1 */ + "movl 2*T_INT(%esi), %ebx \n" /* in2 */ + "movl 3*T_INT(%esi), %edx \n" /* out */ + "movl 4*T_INT(%esi), %ecx \n" /* n */ + "shrl $4, %ecx \n" /* divide by 16 */ + "xorl %esi, %esi \n" /* reset index */ + + /* loop: *out = min(*in1, *in2) */ + "minpsg_loop: \n" + "movaps (%eax,%esi), %xmm0 \n" + "movaps (%ebx,%esi), %xmm1 \n" + "minps %xmm1, %xmm0 \n" + "movaps %xmm0, (%edx,%esi) \n" + + "movaps 4*T_FLOAT(%eax,%esi), %xmm2 \n" + "movaps 4*T_FLOAT(%ebx,%esi), %xmm3 \n" + "minps %xmm3, %xmm2 \n" + "movaps %xmm2, 4*T_FLOAT(%edx,%esi) \n" + + "movaps 8*T_FLOAT(%eax,%esi), %xmm4 \n" + "movaps 8*T_FLOAT(%ebx,%esi), %xmm5 \n" + "minps %xmm5, %xmm4 \n" + "movaps %xmm4, 8*T_FLOAT(%edx,%esi) \n" + + "movaps 12*T_FLOAT(%eax,%esi), %xmm6 \n" + "movaps 12*T_FLOAT(%ebx,%esi), %xmm7 \n" + "minps %xmm7, %xmm6 \n" + "movaps %xmm6, 12*T_FLOAT(%edx,%esi) \n" + + "addl $64, %esi \n" /* out+=16; */ + "loop minpsg_loop \n" + + /* return w+5; */ + "movl -4(%ebp), %ebx \n" + "movl -8(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $20, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + + ".align 4 \n" /* alignment */ + ".type min_perf_sse_gcc, @function \n" + + ); + + /* scalarmin_perf_sse_gcc(t_int *w) */ + asm( + ".set T_FLOAT,4 \n" + ".set T_INT,4 \n" + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl scalarmin_perf_sse_gcc \n" + "scalarmin_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "subl $8, %esp \n" + "movl %ebx, -4(%ebp) \n" + "movl %esi, -8(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument pointer */ + "movl T_INT(%esi), %ebx \n" /* in */ + "movl 3*T_INT(%esi), %edx \n" /* out */ + "movl 2*T_INT(%esi), %eax \n" /* value */ + "movl 4*T_INT(%esi), %ecx \n" /* n */ + + "movss (%eax), %xmm0 \n" + "shufps $0, %xmm0, %xmm0 \n" + "shrl $4, %ecx \n" /* divide by 16 */ + + /* loop: *out = min(*in,value) */ + "sminopsg_loop: \n" + + "movaps (%ebx), %xmm1 \n" + "minps %xmm0, %xmm1 \n" + "movaps %xmm1, (%edx) \n" + + "movaps 4*T_FLOAT(%ebx), %xmm2 \n" + "minps %xmm0, %xmm2 \n" + "movaps %xmm2, 4*T_FLOAT(%edx) \n" + + "movaps 8*T_FLOAT(%ebx), %xmm3 \n" + "minps %xmm0, %xmm3 \n" + "movaps %xmm3, 8*T_FLOAT(%edx) \n" + + "movaps 12*T_FLOAT(%ebx), %xmm4 \n" + "minps %xmm0, %xmm4 \n" + "movaps %xmm4, 12*T_FLOAT(%edx) \n" + + "addl $64, %ebx \n" /* in += 16 */ + "addl $64, %edx \n" /* out += 16 */ + "loop sminopsg_loop \n" + + /* return w+5; */ + "movl -4(%ebp), %ebx \n" + "movl -8(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $20, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + ".align 4 \n" /* alignment */ + ".type scalarmin_perf_sse_gcc, @function \n" + + ); + + /* max_perf_sse_gcc (t_int * w)*/ + asm( + ".set T_FLOAT,4 \n" + ".set T_INT,4 \n" + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl max_perf_sse_gcc \n" + "max_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "subl $8, %esp \n" + "movl %ebx, -4(%ebp) \n" + "movl %esi, -8(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument vector */ + + "movl 1*T_INT(%esi), %eax \n" /* in1 */ + "movl 2*T_INT(%esi), %ebx \n" /* in2 */ + "movl 3*T_INT(%esi), %edx \n" /* out */ + "movl 4*T_INT(%esi), %ecx \n" /* n */ + "shrl $4, %ecx \n" /* divide by 16 */ + "xorl %esi, %esi \n" /* reset index */ + + /* loop: *out = max(*in1, *in2) */ + "maxpsg_loop: \n" + "movaps (%eax,%esi), %xmm0 \n" + "movaps (%ebx,%esi), %xmm1 \n" + "maxps %xmm1, %xmm0 \n" + "movaps %xmm0, (%edx,%esi) \n" + + "movaps 4*T_FLOAT(%eax,%esi), %xmm2 \n" + "movaps 4*T_FLOAT(%ebx,%esi), %xmm3 \n" + "maxps %xmm3, %xmm2 \n" + "movaps %xmm2, 4*T_FLOAT(%edx,%esi) \n" + + "movaps 8*T_FLOAT(%eax,%esi), %xmm4 \n" + "movaps 8*T_FLOAT(%ebx,%esi), %xmm5 \n" + "maxps %xmm5, %xmm4 \n" + "movaps %xmm4, 8*T_FLOAT(%edx,%esi) \n" + + "movaps 12*T_FLOAT(%eax,%esi), %xmm6 \n" + "movaps 12*T_FLOAT(%ebx,%esi), %xmm7 \n" + "maxps %xmm7, %xmm6 \n" + "movaps %xmm6, 12*T_FLOAT(%edx,%esi) \n" + + "addl $64, %esi \n" /* out+=16; */ + "loop maxpsg_loop \n" + + /* return w+5; */ + "movl -4(%ebp), %ebx \n" + "movl -8(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $20, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + + ".align 4 \n" /* alignment */ + ".type max_perf_sse_gcc, @function \n" + + ); + + /* scalarmax_perf_sse_gcc(t_int *w) */ + asm( + ".set T_FLOAT,4 \n" + ".set T_INT,4 \n" + + /* header */ + ".text \n" + ".align 4 \n" /* alignment */ + + ".globl scalarmax_perf_sse_gcc \n" + "scalarmax_perf_sse_gcc: \n" + + /* head of function */ + "pushl %ebp \n" + "movl %esp, %ebp \n" + "subl $8, %esp \n" + "movl %ebx, -4(%ebp) \n" + "movl %esi, -8(%ebp) \n" + + /* get arguments */ + "movl 8(%ebp), %esi \n" /* argument pointer */ + "movl T_INT(%esi), %ebx \n" /* in */ + "movl 3*T_INT(%esi), %edx \n" /* out */ + "movl 2*T_INT(%esi), %eax \n" /* value */ + "movl 4*T_INT(%esi), %ecx \n" /* n */ + + "movss (%eax), %xmm0 \n" + "shufps $0, %xmm0, %xmm0 \n" + "shrl $4, %ecx \n" /* divide by 16 */ + + /* loop: *out = max(*in,value) */ + "smaxopsg_loop: \n" + + "movaps (%ebx), %xmm1 \n" + "maxps %xmm0, %xmm1 \n" + "movaps %xmm1, (%edx) \n" + + "movaps 4*T_FLOAT(%ebx), %xmm2 \n" + "maxps %xmm0, %xmm2 \n" + "movaps %xmm2, 4*T_FLOAT(%edx) \n" + + "movaps 8*T_FLOAT(%ebx), %xmm3 \n" + "maxps %xmm0, %xmm3 \n" + "movaps %xmm3, 8*T_FLOAT(%edx) \n" + + "movaps 12*T_FLOAT(%ebx), %xmm4 \n" + "maxps %xmm0, %xmm4 \n" + "movaps %xmm4, 12*T_FLOAT(%edx) \n" + + "addl $64, %ebx \n" /* in += 16 */ + "addl $64, %edx \n" /* out += 16 */ + "loop smaxopsg_loop \n" + + /* return w+5; */ + "movl -4(%ebp), %ebx \n" + "movl -8(%ebp), %esi \n" + "movl 8(%ebp), %eax \n" + "addl $20, %eax \n" + "movl %ebp, %esp \n" + "popl %ebp \n" + "ret \n" + + ".align 4 \n" /* alignment */ + ".type scalarmax_perf_sse_gcc, @function \n" + + ); + + #endif +
Index: m_simd_sse_gcc.h =================================================================== RCS file: /cvsroot/pure-data/pd/src/Attic/m_simd_sse_gcc.h,v retrieving revision 1.1.2.3 retrieving revision 1.1.2.4 diff -C2 -d -r1.1.2.3 -r1.1.2.4 *** m_simd_sse_gcc.h 29 Dec 2003 02:01:57 -0000 1.1.2.3 --- m_simd_sse_gcc.h 10 Jul 2004 19:57:21 -0000 1.1.2.4 *************** *** 33,58 ****
/* functions in d_ugen.c */ ! #define zero_perf_simd zero_perf8 /* SIMD not implemented */
/* functions in d_dac.c */ ! #define copy_perf_simd copy_perf8 /* SIMD not implemented */
/* functions in d_ctl.c */ ! #define sig_tilde_perf_simd sig_tilde_perf8 /* SIMD not implemented */
/* functions in d_arithmetic.c */ ! #define plus_perf_simd plus_perf8 /* SIMD not implemented */ ! #define scalarplus_perf_simd scalarplus_perf8 /* SIMD not implemented */ ! #define minus_perf_simd minus_perf8 /* SIMD not implemented */ ! #define scalarminus_perf_simd scalarminus_perf8 /* SIMD not implemented */ ! #define times_perf_simd times_perf8 /* SIMD not implemented */ ! #define scalartimes_perf_simd scalartimes_perf8 /* SIMD not implemented */ #define sqr_perf_simd sqr_perf8 /* SIMD not implemented */ ! #define over_perf_simd over_perf8 /* SIMD not implemented */ ! #define scalarover_perf_simd scalarover_perf8 /* SIMD not implemented */ ! #define min_perf_simd min_perf8 /* SIMD not implemented */ ! #define scalarmin_perf_simd scalarmin_perf8 /* SIMD not implemented */ ! #define max_perf_simd max_perf8 /* SIMD not implemented */ ! #define scalarmax_perf_simd scalarmax_perf8 /* SIMD not implemented */
/* functions in d_math.c */ --- 33,58 ----
/* functions in d_ugen.c */ ! #define zero_perf_simd zero_perf_sse_gcc
/* functions in d_dac.c */ ! #define copy_perf_simd copy_perf_sse_gcc
/* functions in d_ctl.c */ ! #define sig_tilde_perf_simd sig_tilde_perf_sse_gcc
/* functions in d_arithmetic.c */ ! #define plus_perf_simd plus_perf_sse_gcc ! #define scalarplus_perf_simd scalarplus_perf_sse_gcc ! #define minus_perf_simd minus_perf_sse_gcc ! #define scalarminus_perf_simd scalarminus_perf_sse_gcc ! #define times_perf_simd times_perf_sse_gcc ! #define scalartimes_perf_simd scalartimes_perf_sse_gcc #define sqr_perf_simd sqr_perf8 /* SIMD not implemented */ ! #define over_perf_simd over_perf_sse_gcc ! #define scalarover_perf_simd scalarover_perf_sse_gcc ! #define min_perf_simd min_perf_sse_gcc ! #define scalarmin_perf_simd scalarmin_perf_sse_gcc ! #define max_perf_simd max_perf_sse_gcc ! #define scalarmax_perf_simd scalarmax_perf_sse_gcc
/* functions in d_math.c */