Update of /cvsroot/pure-data/externals/grill/flext/source In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv27306/source
Modified Files: flsimd.cpp Log Message: Fixes for Mac fixed autoconf files updates for batch mode some more SIMD optimized functions
Index: flsimd.cpp =================================================================== RCS file: /cvsroot/pure-data/externals/grill/flext/source/flsimd.cpp,v retrieving revision 1.17 retrieving revision 1.18 diff -C2 -d -r1.17 -r1.18 *** flsimd.cpp 26 Jan 2005 05:02:01 -0000 1.17 --- flsimd.cpp 27 Jan 2005 04:57:26 -0000 1.18 *************** *** 247,250 **** --- 247,280 ---- return feature; } + + inline bool IsVectorAligned(const void *where) + { + return (reinterpret_cast<size_t>(where)&(__alignof(__m128)-1)) == 0; + } + + inline bool VectorsAligned(const void *v1,const void *v2) + { + return ( + (reinterpret_cast<size_t>(v1)|reinterpret_cast<size_t>(v2)) + &(__alignof(__m128)-1) + ) == 0; + } + + inline bool VectorsAligned(const void *v1,const void *v2,const void *v3) + { + return ( + (reinterpret_cast<size_t>(v1)|reinterpret_cast<size_t>(v2)|reinterpret_cast<size_t>(v3)) + &(__alignof(__m128)-1) + ) == 0; + } + + inline bool VectorsAligned(const void *v1,const void *v2,const void *v3,const void *v4) + { + return ( + (reinterpret_cast<size_t>(v1)|reinterpret_cast<size_t>(v2)|reinterpret_cast<size_t>(v3)|reinterpret_cast<size_t>(v4)) + &(__alignof(__m128)-1) + ) == 0; + } + #else // not MSVC *************** *** 348,351 **** --- 378,405 ---- }
+ inline bool VectorsAligned(const void *v1,const void *v2) + { + return ( + (reinterpret_cast<size_t>(v1)|reinterpret_cast<size_t>(v2)) + &(sizeof(vector float)-1) + ) == 0; + } + + inline bool VectorsAligned(const void *v1,const void *v2,const void *v3) + { + return ( + (reinterpret_cast<size_t>(v1)|reinterpret_cast<size_t>(v2)|reinterpret_cast<size_t>(v3)) + &(sizeof(vector float)-1) + ) == 0; + } + + inline bool VectorsAligned(const void *v1,const void *v2,const void *v3,const void *v4) + { + return ( + (reinterpret_cast<size_t>(v1)|reinterpret_cast<size_t>(v2)|reinterpret_cast<size_t>(v3)|reinterpret_cast<size_t>(v4)) + &(sizeof(vector float)-1) + ) == 0; + } + inline vector float LoadValue(const float &f) { *************** *** 386,391 **** }
! if((reinterpret_cast<size_t>(src)&(__alignof(__m128)-1)) == 0) { ! if((reinterpret_cast<size_t>(dst)&(__alignof(__m128)-1)) == 0) { // aligned src, aligned dst __asm { --- 440,445 ---- }
! if(IsVectorAligned(src)) { ! if(IsVectorAligned(dst)) { // aligned src, aligned dst __asm { *************** *** 435,439 **** } else { ! if((reinterpret_cast<size_t>(dst)&(__alignof(__m128)-1)) == 0) { // unaligned src, aligned dst __asm { --- 489,493 ---- } else { ! if(IsVectorAligned(dst)) { // unaligned src, aligned dst __asm { *************** *** 639,660 ****
for(; n--; src += 16,dst += 16) { ! vector float a1 = vec_ld( 0,src); ! vector float a2 = vec_ld(16,src); ! vector float a3 = vec_ld(32,src); ! vector float a4 = vec_ld(48,src); ! ! a1 = vec_madd(a1,argmul,argadd); ! a2 = vec_madd(a2,argmul,argadd); ! a3 = vec_madd(a3,argmul,argadd); ! a4 = vec_madd(a4,argmul,argadd); ! ! vec_st(a1, 0,dst); ! vec_st(a2,16,dst); ! vec_st(a3,32,dst); ! vec_st(a4,48,dst); }
while(cnt--) *(dst++) = *(src++)*opmul+opadd; } #endif
--- 693,735 ----
for(; n--; src += 16,dst += 16) { ! vec_st(vec_madd(vec_ld( 0,src),argmul,argadd), 0,dst); ! vec_st(vec_madd(vec_ld(16,src),argmul,argadd),16,dst); ! vec_st(vec_madd(vec_ld(32,src),argmul,argadd),32,dst); ! vec_st(vec_madd(vec_ld(48,src),argmul,argadd),48,dst); }
while(cnt--) *(dst++) = *(src++)*opmul+opadd; } + + static void ScaleAltivec(t_sample *dst,const t_sample *src,t_sample opmul,const t_sample *add,int cnt) + { + const vector float argmul = LoadValue(opmul); + int n = cnt>>4; + cnt -= n<<4; + + for(; n--; src += 16,dst += 16,add += 16) { + vec_st(vec_madd(vec_ld( 0,src),argmul,vec_ld( 0,add)), 0,dst); + vec_st(vec_madd(vec_ld(16,src),argmul,vec_ld(16,add)),16,dst); + vec_st(vec_madd(vec_ld(32,src),argmul,vec_ld(32,add)),32,dst); + vec_st(vec_madd(vec_ld(48,src),argmul,vec_ld(48,add)),48,dst); + } + + while(cnt--) *(dst++) = *(src++) * opmul + *(add++); + } + + static void ScaleAltivec(t_sample *dst,const t_sample *src,const t_sample *mul,const t_sample *add,int cnt) + { + int n = cnt>>4; + cnt -= n<<4; + + for(; n--; src += 16,dst += 16,mul += 16,add += 16) { + vec_st(vec_madd(vec_ld( 0,src),vec_ld( 0,mul),vec_ld( 0,add)), 0,dst); + vec_st(vec_madd(vec_ld(16,src),vec_ld(16,mul),vec_ld(16,add)),16,dst); + vec_st(vec_madd(vec_ld(32,src),vec_ld(32,mul),vec_ld(32,add)),32,dst); + vec_st(vec_madd(vec_ld(48,src),vec_ld(48,mul),vec_ld(48,add)),48,dst); + } + + while(cnt--) *(dst++) = *(src++) * *(mul++) + *(add++); + } #endif
*************** *** 683,687 **** }
! if((reinterpret_cast<size_t>(dst)&(__alignof(__m128)-1)) == 0) { // aligned version __asm { --- 758,762 ---- }
! if(IsVectorAligned(dst)) { // aligned version __asm { *************** *** 769,775 **** }
! if((reinterpret_cast<size_t>(src)&(__alignof(__m128)-1)) == 0 ! && (reinterpret_cast<size_t>(dst)&(__alignof(__m128)-1)) == 0 ! ) { // aligned version __asm { --- 844,848 ---- }
! if(VectorsAligned(src,dst)) { // aligned version __asm { *************** *** 843,847 **** else #elif FLEXT_CPU == FLEXT_CPU_PPC && defined(__VEC__) ! if(GetSIMDCapabilities()&simd_altivec && IsVectorAligned(src) && IsVectorAligned(dst)) MulAltivec(dst,src,op,cnt); else --- 916,920 ---- else #elif FLEXT_CPU == FLEXT_CPU_PPC && defined(__VEC__) ! if(GetSIMDCapabilities()&simd_altivec && VectorsAligned(src,dst)) MulAltivec(dst,src,op,cnt); else *************** *** 905,912 **** }
! if((reinterpret_cast<size_t>(src)&(__alignof(__m128)-1)) == 0 ! && (reinterpret_cast<size_t>(dst)&(__alignof(__m128)-1)) == 0 ! ) { ! if((reinterpret_cast<size_t>(op)&(__alignof(__m128)-1)) == 0) { __asm { mov ecx,[n] --- 978,983 ---- }
! if(VectorsAligned(src,dst)) { ! if(IsVectorAligned(op)) { __asm { mov ecx,[n] *************** *** 986,990 **** } else { ! if((reinterpret_cast<size_t>(op)&(__alignof(__m128)-1)) == 0) { __asm { mov ecx,[n] --- 1057,1061 ---- } else { ! if(IsVectorAligned(op)) { __asm { mov ecx,[n] *************** *** 1073,1077 **** else #elif FLEXT_CPU == FLEXT_CPU_PPC && defined(__VEC__) ! if(GetSIMDCapabilities()&simd_altivec && IsVectorAligned(src) && IsVectorAligned(op) && IsVectorAligned(dst)) MulAltivec(dst,src,op,cnt); else --- 1144,1148 ---- else #elif FLEXT_CPU == FLEXT_CPU_PPC && defined(__VEC__) ! if(GetSIMDCapabilities()&simd_altivec && VectorsAligned(src,op,dst)) MulAltivec(dst,src,op,cnt); else *************** *** 1135,1141 **** }
! if((reinterpret_cast<size_t>(src)&(__alignof(__m128)-1)) == 0 ! && (reinterpret_cast<size_t>(dst)&(__alignof(__m128)-1)) == 0 ! ) { // aligned version __asm { --- 1206,1210 ---- }
! if(VectorsAligned(src,dst)) { // aligned version __asm { *************** *** 1203,1207 **** else #elif FLEXT_CPU == FLEXT_CPU_PPC && defined(__VEC__) ! if(GetSIMDCapabilities()&simd_altivec && IsVectorAligned(src) && IsVectorAligned(dst)) AddAltivec(dst,src,op,cnt); else --- 1272,1276 ---- else #elif FLEXT_CPU == FLEXT_CPU_PPC && defined(__VEC__) ! if(GetSIMDCapabilities()&simd_altivec && VectorsAligned(src,dst)) AddAltivec(dst,src,op,cnt); else *************** *** 1264,1271 **** cnt -= n<<4;
! if((reinterpret_cast<size_t>(src)&(__alignof(__m128)-1)) == 0 ! && (reinterpret_cast<size_t>(dst)&(__alignof(__m128)-1)) == 0 ! ) { ! if((reinterpret_cast<size_t>(op)&(__alignof(__m128)-1)) == 0) { __asm { mov ecx,dword ptr [n] --- 1333,1338 ---- cnt -= n<<4;
! if(VectorsAligned(src,dst)) { ! if(IsVectorAligned(op)) { __asm { mov ecx,dword ptr [n] *************** *** 1345,1349 **** } else { ! if((reinterpret_cast<size_t>(op)&(__alignof(__m128)-1)) == 0) { __asm { mov ecx,dword ptr [n] --- 1412,1416 ---- } else { ! if(IsVectorAligned(op)) { __asm { mov ecx,dword ptr [n] *************** *** 1431,1435 **** else #elif FLEXT_CPU == FLEXT_CPU_PPC && defined(__VEC__) ! if(GetSIMDCapabilities()&simd_altivec && IsVectorAligned(src) && IsVectorAligned(op) && IsVectorAligned(dst)) AddAltivec(dst,src,op,cnt); else --- 1498,1502 ---- else #elif FLEXT_CPU == FLEXT_CPU_PPC && defined(__VEC__) ! if(GetSIMDCapabilities()&simd_altivec && VectorsAligned(src,op,dst)) AddAltivec(dst,src,op,cnt); else *************** *** 1497,1503 **** }
! if((reinterpret_cast<size_t>(src)&(__alignof(__m128)-1)) == 0 ! && (reinterpret_cast<size_t>(dst)&(__alignof(__m128)-1)) == 0 ! ) { // aligned version __asm { --- 1564,1568 ---- }
! if(VectorsAligned(src,dst)) { // aligned version __asm { *************** *** 1573,1577 **** else #elif FLEXT_CPU == FLEXT_CPU_PPC && defined(__VEC__) ! if(GetSIMDCapabilities()&simd_altivec && IsVectorAligned(src) && IsVectorAligned(dst)) ScaleAltivec(dst,src,opmul,opadd,cnt); else --- 1638,1642 ---- else #elif FLEXT_CPU == FLEXT_CPU_PPC && defined(__VEC__) ! if(GetSIMDCapabilities()&simd_altivec && VectorsAligned(src,dst)) ScaleAltivec(dst,src,opmul,opadd,cnt); else *************** *** 1593,1602 **** }
! void flext::ScaleSamples(t_sample *dst,const t_sample *src,t_sample opmul,const t_sample *add,int cnt) { { int n = cnt>>3; cnt -= n<<3; ! if(dst == add) { while(n--) { dst[0] += src[0]*opmul; dst[1] += src[1]*opmul; --- 1658,1790 ---- }
! void flext::ScaleSamples(t_sample *dst,const t_sample *src,t_sample opmul,const t_sample *opadd,int cnt) { + #ifdef FLEXT_USE_IPP + if(sizeof(t_sample) == 4) { + ippsMulC_32f((const float *)src,(float)opmul,(float *)dst,cnt); + ippsAdd_32f_I((float *)opadd,(float *)dst,cnt); + } + else if(sizeof(t_sample) == 8) { + ippsMulC_64f((const double *)src,(double)opmul,(double *)dst,cnt); + ippsAdd_64f_I((double *)opadd,(double *)dst,cnt); + } + else + ERRINTERNAL(); + #else + #ifdef FLEXT_USE_SIMD + #ifdef _MSC_VER + if(GetSIMDCapabilities()&simd_sse) { + // single precision + int n = cnt>>4; + cnt -= n<<4; + + __asm { + mov eax,dword ptr [src] + prefetcht0 [eax+0] + prefetcht0 [eax+32] + + movss xmm0,xmmword ptr [opmul] + shufps xmm0,xmm0,0 + } + + if(VectorsAligned(src,dst,opadd)) { + // aligned version + __asm { + mov ecx,dword ptr [n] + mov eax,dword ptr [src] + mov edx,dword ptr [dst] + mov ebx,dword ptr [opadd] + loopa: + prefetcht0 [eax+64] + prefetcht0 [ebx+64] + prefetcht0 [eax+96] + prefetcht0 [ebx+96] + + movaps xmm2,xmmword ptr[eax] + movaps xmm1,xmmword ptr[ebx] + mulps xmm2,xmm0 + addps xmm2,xmm1 + movaps xmmword ptr[edx],xmm2 + + movaps xmm3,xmmword ptr[eax+4*4] + movaps xmm1,xmmword ptr[ebx+4*4] + mulps xmm3,xmm0 + addps xmm3,xmm1 + movaps xmmword ptr[edx+4*4],xmm3 + + movaps xmm4,xmmword ptr[eax+8*4] + movaps xmm1,xmmword ptr[ebx+8*4] + mulps xmm4,xmm0 + addps xmm4,xmm1 + movaps xmmword ptr[edx+8*4],xmm4 + + movaps xmm5,xmmword ptr[eax+12*4] + movaps xmm1,xmmword ptr[ebx+12*4] + mulps xmm5,xmm0 + addps xmm5,xmm1 + movaps xmmword ptr[edx+12*4],xmm5 + + add eax,16*4 + add edx,16*4 + add ebx,16*4 + loop loopa + } + } + else { + // unaligned version + __asm { + mov ecx,dword ptr [n] + mov eax,dword ptr [src] + mov edx,dword ptr [dst] + mov ebx,dword ptr [opadd] + loopu: + prefetcht0 [eax+64] + prefetcht0 [ebx+64] + prefetcht0 [eax+96] + prefetcht0 [ebx+96] + + movups xmm2,xmmword ptr[eax] + movups xmm1,xmmword ptr[ebx] + mulps xmm2,xmm0 + addps xmm2,xmm1 + movups xmmword ptr[edx],xmm2 + + movups xmm3,xmmword ptr[eax+4*4] + movups xmm1,xmmword ptr[ebx+4*4] + mulps xmm3,xmm0 + addps xmm3,xmm1 + movups xmmword ptr[edx+4*4],xmm3 + + movups xmm4,xmmword ptr[eax+8*4] + movups xmm1,xmmword ptr[ebx+8*4] + mulps xmm4,xmm0 + addps xmm4,xmm1 + movups xmmword ptr[edx+8*4],xmm4 + + movups xmm5,xmmword ptr[eax+12*4] + movups xmm1,xmmword ptr[ebx+12*4] + mulps xmm5,xmm0 + addps xmm5,xmm1 + movups xmmword ptr[edx+12*4],xmm5 + + add eax,16*4 + add edx,16*4 + add ebx,16*4 + loop loopu + } + } + while(cnt--) *(dst++) = *(src++) * opmul + *(opadd++); + } + else + #elif FLEXT_CPU == FLEXT_CPU_PPC && defined(__VEC__) + if(GetSIMDCapabilities()&simd_altivec && VectorsAligned(src,dst,opadd)) + ScaleAltivec(dst,src,opmul,opadd,cnt); + else + #endif // _MSC_VER + #endif // FLEXT_USE_SIMD { int n = cnt>>3; cnt -= n<<3; ! if(dst == opadd) { while(n--) { dst[0] += src[0]*opmul; dst[1] += src[1]*opmul; *************** *** 1610,1649 **** else { while(n--) { ! dst[0] = src[0]*opmul+add[0]; dst[1] = src[1]*opmul+add[1]; ! dst[2] = src[2]*opmul+add[2]; dst[3] = src[3]*opmul+add[3]; ! dst[4] = src[4]*opmul+add[4]; dst[5] = src[5]*opmul+add[5]; ! dst[6] = src[6]*opmul+add[6]; dst[7] = src[7]*opmul+add[7]; ! src += 8,dst += 8,add += 8; } ! while(cnt--) *(dst++) = *(src++)*opmul+*(add++); } } }
! void flext::ScaleSamples(t_sample *dst,const t_sample *src,const t_sample *mul,const t_sample *add,int cnt) { { int n = cnt>>3; cnt -= n<<3; ! if(dst == add) { while(n--) { ! dst[0] += src[0]*mul[0]; dst[1] += src[1]*mul[1]; ! dst[2] += src[2]*mul[2]; dst[3] += src[3]*mul[3]; ! dst[4] += src[4]*mul[4]; dst[5] += src[5]*mul[5]; ! dst[6] += src[6]*mul[6]; dst[7] += src[7]*mul[7]; ! src += 8,dst += 8,mul += 8; } ! while(cnt--) *(dst++) += *(src++) * *(mul++); } else { while(n--) { ! dst[0] = src[0]*mul[0]+add[0]; dst[1] = src[1]*mul[1]+add[1]; ! dst[2] = src[2]*mul[2]+add[2]; dst[3] = src[3]*mul[3]+add[3]; ! dst[4] = src[4]*mul[4]+add[4]; dst[5] = src[5]*mul[5]+add[5]; ! dst[6] = src[6]*mul[6]+add[6]; dst[7] = src[7]*mul[7]+add[7]; ! src += 8,dst += 8,mul += 8,add += 8; } ! while(cnt--) *(dst++) = *(src++)* *(mul++) + *(add++); } } } --- 1798,1975 ---- else { while(n--) { ! dst[0] = src[0]*opmul+opadd[0]; dst[1] = src[1]*opmul+opadd[1]; ! dst[2] = src[2]*opmul+opadd[2]; dst[3] = src[3]*opmul+opadd[3]; ! dst[4] = src[4]*opmul+opadd[4]; dst[5] = src[5]*opmul+opadd[5]; ! dst[6] = src[6]*opmul+opadd[6]; dst[7] = src[7]*opmul+opadd[7]; ! src += 8,dst += 8,opadd += 8; } ! while(cnt--) *(dst++) = *(src++)*opmul+*(opadd++); } } + #endif }
! void flext::ScaleSamples(t_sample *dst,const t_sample *src,const t_sample *opmul,const t_sample *opadd,int cnt) { + #ifdef FLEXT_USE_IPP + if(sizeof(t_sample) == 4) { + ippsMul_32f((const float *)src,(const float *)opmul,(float *)dst,cnt); + ippsAdd_32f_I((const float *)opadd,(float *)dst,cnt); + } + else if(sizeof(t_sample) == 8) { + ippsMul_64f((const double *)src,(const double *)opmul,(double *)dst,cnt); + ippsAdd_64f_I((const double *)opadd,(double *)dst,cnt); + } + else + ERRINTERNAL(); + #else + #ifdef FLEXT_USE_SIMD + #ifdef _MSC_VER + if(GetSIMDCapabilities()&simd_sse) { + // single precision + int n = cnt>>4; + cnt -= n<<4; + + __asm { + mov eax,dword ptr [src] + prefetcht0 [eax+0] + prefetcht0 [eax+32] + } + + if(VectorsAligned(src,dst,opmul,opadd)) { + // aligned version + __asm { + mov ecx,dword ptr [n] + mov eax,dword ptr [src] + mov edx,dword ptr [dst] + mov esi,dword ptr [opmul] + mov ebx,dword ptr [opadd] + loopa: + prefetcht0 [eax+64] + prefetcht0 [ebx+64] + prefetcht0 [esi+64] + prefetcht0 [eax+96] + prefetcht0 [ebx+96] + prefetcht0 [esi+96] + + movaps xmm2,xmmword ptr[eax] + movaps xmm0,xmmword ptr[esi] + movaps xmm1,xmmword ptr[ebx] + mulps xmm2,xmm0 + addps xmm2,xmm1 + movaps xmmword ptr[edx],xmm2 + + movaps xmm3,xmmword ptr[eax+4*4] + movaps xmm0,xmmword ptr[esi+4*4] + movaps xmm1,xmmword ptr[ebx+4*4] + mulps xmm3,xmm0 + addps xmm3,xmm1 + movaps xmmword ptr[edx+4*4],xmm3 + + movaps xmm4,xmmword ptr[eax+8*4] + movaps xmm0,xmmword ptr[esi+8*4] + movaps xmm1,xmmword ptr[ebx+8*4] + mulps xmm4,xmm0 + addps xmm4,xmm1 + movaps xmmword ptr[edx+8*4],xmm4 + + movaps xmm5,xmmword ptr[eax+12*4] + movaps xmm0,xmmword ptr[esi+12*4] + movaps xmm1,xmmword ptr[ebx+12*4] + mulps xmm5,xmm0 + addps xmm5,xmm1 + movaps xmmword ptr[edx+12*4],xmm5 + + add eax,16*4 + add edx,16*4 + add ebx,16*4 + add esi,16*4 + loop loopa + } + } + else { + // unaligned version + __asm { + mov ecx,dword ptr [n] + mov eax,dword ptr [src] + mov edx,dword ptr [dst] + mov esi,dword ptr [opmul] + mov ebx,dword ptr [opadd] + loopu: + prefetcht0 [eax+64] + prefetcht0 [ebx+64] + prefetcht0 [esi+64] + prefetcht0 [eax+96] + prefetcht0 [ebx+96] + prefetcht0 [esi+96] + + movups xmm2,xmmword ptr[eax] + movups xmm0,xmmword ptr[esi] + movups xmm1,xmmword ptr[ebx] + mulps xmm2,xmm0 + addps xmm2,xmm1 + movups xmmword ptr[edx],xmm2 + + movups xmm3,xmmword ptr[eax+4*4] + movups xmm0,xmmword ptr[esi+4*4] + movups xmm1,xmmword ptr[ebx+4*4] + mulps xmm3,xmm0 + addps xmm3,xmm1 + movups xmmword ptr[edx+4*4],xmm3 + + movups xmm4,xmmword ptr[eax+8*4] + movups xmm0,xmmword ptr[esi+8*4] + movups xmm1,xmmword ptr[ebx+8*4] + mulps xmm4,xmm0 + addps xmm4,xmm1 + movups xmmword ptr[edx+8*4],xmm4 + + movups xmm5,xmmword ptr[eax+12*4] + movups xmm0,xmmword ptr[esi+12*4] + movups xmm1,xmmword ptr[ebx+12*4] + mulps xmm5,xmm0 + addps xmm5,xmm1 + movups xmmword ptr[edx+12*4],xmm5 + + add eax,16*4 + add edx,16*4 + add ebx,16*4 + add esi,16*4 + loop loopu + } + } + while(cnt--) *(dst++) = *(src++) * *(opmul++) + *(opadd++); + } + else + #elif FLEXT_CPU == FLEXT_CPU_PPC && defined(__VEC__) + if(GetSIMDCapabilities()&simd_altivec && VectorsAligned(src,dst,opmul,opadd)) + ScaleAltivec(dst,src,opmul,opadd,cnt); + else + #endif // _MSC_VER + #endif // FLEXT_USE_SIMD { int n = cnt>>3; cnt -= n<<3; ! if(dst == opadd) { while(n--) { ! dst[0] += src[0]*opmul[0]; dst[1] += src[1]*opmul[1]; ! dst[2] += src[2]*opmul[2]; dst[3] += src[3]*opmul[3]; ! dst[4] += src[4]*opmul[4]; dst[5] += src[5]*opmul[5]; ! dst[6] += src[6]*opmul[6]; dst[7] += src[7]*opmul[7]; ! src += 8,dst += 8,opmul += 8; } ! while(cnt--) *(dst++) += *(src++) * *(opmul++); } else { while(n--) { ! dst[0] = src[0]*opmul[0]+opadd[0]; dst[1] = src[1]*opmul[1]+opadd[1]; ! dst[2] = src[2]*opmul[2]+opadd[2]; dst[3] = src[3]*opmul[3]+opadd[3]; ! dst[4] = src[4]*opmul[4]+opadd[4]; dst[5] = src[5]*opmul[5]+opadd[5]; ! dst[6] = src[6]*opmul[6]+opadd[6]; dst[7] = src[7]*opmul[7]+opadd[7]; ! src += 8,dst += 8,opmul += 8,opadd += 8; } ! while(cnt--) *(dst++) = *(src++)* *(opmul++) + *(opadd++); } } + #endif }