X-Git-Url: https://oss.titaniummirror.com/gitweb?a=blobdiff_plain;f=gmp%2Fmpn%2Fx86%2Fpentium%2Fmod_1.asm;fp=gmp%2Fmpn%2Fx86%2Fpentium%2Fmod_1.asm;h=408242e7a9c521a1d349c8a6c997fc65629f220f;hb=6fed43773c9b0ce596dca5686f37ac3fc0fa11c0;hp=0000000000000000000000000000000000000000;hpb=27b11d56b743098deb193d510b337ba22dc52e5c;p=msp430-gcc.git diff --git a/gmp/mpn/x86/pentium/mod_1.asm b/gmp/mpn/x86/pentium/mod_1.asm new file mode 100644 index 00000000..408242e7 --- /dev/null +++ b/gmp/mpn/x86/pentium/mod_1.asm @@ -0,0 +1,454 @@ +dnl Intel P5 mpn_mod_1 -- mpn by limb remainder. + +dnl Copyright 1999, 2000, 2002 Free Software Foundation, Inc. +dnl +dnl This file is part of the GNU MP Library. +dnl +dnl The GNU MP Library is free software; you can redistribute it and/or +dnl modify it under the terms of the GNU Lesser General Public License as +dnl published by the Free Software Foundation; either version 3 of the +dnl License, or (at your option) any later version. +dnl +dnl The GNU MP Library is distributed in the hope that it will be useful, +dnl but WITHOUT ANY WARRANTY; without even the implied warranty of +dnl MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +dnl Lesser General Public License for more details. +dnl +dnl You should have received a copy of the GNU Lesser General Public License +dnl along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. + +include(`../config.m4') + + +C P5: 28.0 cycles/limb + + +C mp_limb_t mpn_mod_1 (mp_srcptr src, mp_size_t size, mp_limb_t divisor); +C mp_limb_t mpn_mod_1c (mp_srcptr src, mp_size_t size, mp_limb_t divisor, +C mp_limb_t carry); +C mp_limb_t mpn_preinv_mod_1 (mp_srcptr src, mp_size_t size, mp_limb_t divisor, +C mp_limb_t inverse); +C +C This code is not unlike mpn/x86/p6/mod_1.asm, it does the same sort of +C multiply by inverse without on-the-fly shifts. See that code for some +C general comments. +C +C Alternatives: +C +C P5 shldl is 4 cycles, so shifting on the fly would be at least 5 cycles +C slower, probably more depending what it did to register usage. Using MMX +C on P55 would be better, but still at least 4 or 5 instructions and so 2 or +C 3 cycles. + + +dnl These thresholds are the sizes where the multiply by inverse method is +dnl used, rather than plain "divl"s. Minimum value 2. +dnl +dnl MUL_NORM_THRESHOLD is for an already normalized divisor (high bit set), +dnl MUL_UNNORM_THRESHOLD for an unnormalized divisor. +dnl +dnl With the divl loop at 44 c/l and the inverse at 28 c/l with about 70 +dnl cycles to setup, the threshold should be about ceil(70/16)==5, which is +dnl what happens in practice. +dnl +dnl An unnormalized divisor gets an extra 40 cycles at the end for the +dnl final (r*2^n)%(d*2^n) and shift. This increases the threshold by about +dnl 40/16=3. +dnl +dnl PIC adds between 4 and 7 cycles (not sure why it varies), but this +dnl doesn't change the thresholds. +dnl +dnl The entry sequence code that chooses between MUL_NORM_THRESHOLD and +dnl MUL_UNNORM_THRESHOLD is a bit horrible, but it adds only 2 cycles +dnl (branch free) and ensures the choice between div or mul is optimal. + +deflit(MUL_NORM_THRESHOLD, ifdef(`PIC',5,5)) +deflit(MUL_UNNORM_THRESHOLD, ifdef(`PIC',8,8)) + +deflit(MUL_NORM_DELTA, eval(MUL_NORM_THRESHOLD - MUL_UNNORM_THRESHOLD)) + + +defframe(PARAM_INVERSE, 16) dnl mpn_preinv_mod_1 +defframe(PARAM_CARRY, 16) dnl mpn_mod_1c +defframe(PARAM_DIVISOR, 12) +defframe(PARAM_SIZE, 8) +defframe(PARAM_SRC, 4) + +dnl re-using parameter space +define(VAR_NORM, `PARAM_DIVISOR') +define(VAR_INVERSE, `PARAM_SIZE') + + TEXT + + ALIGN(8) +PROLOGUE(mpn_preinv_mod_1) +deflit(`FRAME',0) + + pushl %ebp FRAME_pushl() + pushl %esi FRAME_pushl() + + movl PARAM_SRC, %esi + movl PARAM_SIZE, %edx + + pushl %edi FRAME_pushl() + pushl %ebx FRAME_pushl() + + movl PARAM_DIVISOR, %ebp + movl PARAM_INVERSE, %eax + + movl -4(%esi,%edx,4), %edi C src high limb + leal -8(%esi,%edx,4), %esi C &src[size-2] + + movl $0, VAR_NORM + decl %edx + + jnz L(start_preinv) + + subl %ebp, %edi C src-divisor + popl %ebx + + sbbl %ecx, %ecx C -1 if underflow + movl %edi, %eax C src-divisor + + andl %ebp, %ecx C d if underflow + popl %edi + + addl %ecx, %eax C remainder, with possible addback + popl %esi + + popl %ebp + + ret + +EPILOGUE() + + + ALIGN(8) +PROLOGUE(mpn_mod_1c) +deflit(`FRAME',0) + + movl PARAM_DIVISOR, %eax + movl PARAM_SIZE, %ecx + + sarl $31, %eax C d highbit + movl PARAM_CARRY, %edx + + orl %ecx, %ecx + jz L(done_edx) C result==carry if size==0 + + andl $MUL_NORM_DELTA, %eax + pushl %ebp FRAME_pushl() + + addl $MUL_UNNORM_THRESHOLD, %eax C norm or unnorm thresh + pushl %esi FRAME_pushl() + + movl PARAM_SRC, %esi + movl PARAM_DIVISOR, %ebp + + cmpl %eax, %ecx + jb L(divide_top) + + movl %edx, %eax C carry as pretend src high limb + leal 1(%ecx), %edx C size+1 + + cmpl $0x1000000, %ebp + jmp L(mul_by_inverse_1c) + +EPILOGUE() + + + ALIGN(8) +PROLOGUE(mpn_mod_1) +deflit(`FRAME',0) + + movl PARAM_SIZE, %ecx + pushl %ebp FRAME_pushl() + + orl %ecx, %ecx + jz L(done_zero) + + movl PARAM_SRC, %eax + movl PARAM_DIVISOR, %ebp + + sarl $31, %ebp C -1 if divisor normalized + movl -4(%eax,%ecx,4), %eax C src high limb + + movl PARAM_DIVISOR, %edx + pushl %esi FRAME_pushl() + + andl $MUL_NORM_DELTA, %ebp + cmpl %edx, %eax C carry flag if high 25,17,9,1 + + shrl %cl, %ebx + addl $_GLOBAL_OFFSET_TABLE_+[.-L(here)], %edi + + C AGI + movl __clz_tab@GOT(%edi), %edi + addl $-34, %ecx + + C AGI + movb (%ebx,%edi), %bl + +',` + leal 25(,%ecx,8), %ecx C 0,-1,-2,-3 -> 25,17,9,1 + + shrl %cl, %ebx + addl $-34, %ecx + + C AGI + movb __clz_tab(%ebx), %bl +') + movl %eax, %edi C carry -> n1 + + addl %ebx, %ecx C -34 + c + __clz_tab[d>>c] = -clz-1 + leal -8(%esi,%edx,4), %esi C &src[size-2] + + xorl $-1, %ecx C clz + movl $-1, %edx + + ASSERT(e,`pushl %eax C clz calculation same as bsrl + bsrl %ebp, %eax + xorl $31, %eax + cmpl %eax, %ecx + popl %eax') + + shll %cl, %ebp C d normalized + movl %ecx, VAR_NORM + + subl %ebp, %edx C (b-d)-1, so edx:eax = b*(b-d)-1 + movl $-1, %eax + + divl %ebp C floor (b*(b-d)-1) / d + +L(start_preinv): + movl %eax, VAR_INVERSE + movl %ebp, %eax C d + + movl %ecx, %edx C fake high, will cancel + + +C For mpn_mod_1 and mpn_preinv_mod_1, the initial carry in %edi is the src +C high limb, and this may be greater than the divisor and may need one copy +C of the divisor subtracted (only one, because the divisor is normalized). +C This is accomplished by having the initial ecx:edi act as a fake previous +C n2:n10. The initial edx:eax is d, acting as a fake (q1+1)*d which is +C subtracted from ecx:edi, with the usual addback if it produces an +C underflow. + + +L(inverse_top): + C eax scratch (n10, n1, q1, etc) + C ebx scratch (nadj, src limit) + C ecx old n2 + C edx scratch + C esi src pointer, &src[size-2] to &src[0] + C edi old n10 + C ebp d + + subl %eax, %edi C low n - (q1+1)*d + movl (%esi), %eax C new n10 + + sbbl %edx, %ecx C high n - (q1+1)*d, 0 or -1 + movl %ebp, %ebx C d + + sarl $31, %eax C -n1 + andl %ebp, %ecx C d if underflow + + addl %edi, %ecx C remainder -> n2, and possible addback + ASSERT(b,`cmpl %ebp, %ecx') + andl %eax, %ebx C -n1 & d + + movl (%esi), %edi C n10 + andl $1, %eax C n1 + + addl %ecx, %eax C n2+n1 + addl %edi, %ebx C nadj = n10 + (-n1 & d), ignoring overflow + + mull VAR_INVERSE C m*(n2+n1) + + addl %eax, %ebx C low(m*(n2+n1) + nadj), giving carry flag + leal 1(%ecx), %eax C 1+n2 + + adcl %edx, %eax C 1 + high[n2<<32 + m*(n2+n1) + nadj] = q1+1 + movl PARAM_SRC, %ebx + + sbbl $0, %eax C use q1 if q1+1 overflows + subl $4, %esi C step src ptr + + mull %ebp C (q1+1)*d + + cmpl %ebx, %esi + jae L(inverse_top) + + + + C %edi (after subtract and addback) is the remainder modulo d*2^n + C and must be reduced to 0<=r