X-Git-Url: https://oss.titaniummirror.com/gitweb?a=blobdiff_plain;f=gmp%2Fmpn%2Fpyr%2Fadd_n.s;fp=gmp%2Fmpn%2Fpyr%2Fadd_n.s;h=7ac02e6b4d2031e2d077cdc20ba42ec0506192bb;hb=6fed43773c9b0ce596dca5686f37ac3fc0fa11c0;hp=0000000000000000000000000000000000000000;hpb=27b11d56b743098deb193d510b337ba22dc52e5c;p=msp430-gcc.git diff --git a/gmp/mpn/pyr/add_n.s b/gmp/mpn/pyr/add_n.s new file mode 100644 index 00000000..7ac02e6b --- /dev/null +++ b/gmp/mpn/pyr/add_n.s @@ -0,0 +1,74 @@ +# Pyramid __gmpn_add_n -- Add two limb vectors of the same length > 0 and store +# sum in a third limb vector. + +# Copyright 1995, 2000 Free Software Foundation, Inc. + +# This file is part of the GNU MP Library. + +# The GNU MP Library is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 3 of the License, or (at your +# option) any later version. + +# The GNU MP Library is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +# License for more details. + +# You should have received a copy of the GNU Lesser General Public License +# along with the GNU MP Library. If not, see http://www.gnu.org/licenses/. + +.text + .align 2 +.globl ___gmpn_add_n +___gmpn_add_n: + movw $-1,tr0 # representation for carry clear + + movw pr3,tr2 + andw $3,tr2 + beq Lend0 + subw tr2,pr3 + +Loop0: rsubw $0,tr0 # restore carry bit from carry-save register + + movw (pr1),tr1 + addwc (pr2),tr1 + movw tr1,(pr0) + + subwb tr0,tr0 + addw $4,pr0 + addw $4,pr1 + addw $4,pr2 + addw $-1,tr2 + bne Loop0 + + mtstw pr3,pr3 + beq Lend +Lend0: +Loop: rsubw $0,tr0 # restore carry bit from carry-save register + + movw (pr1),tr1 + addwc (pr2),tr1 + movw tr1,(pr0) + + movw 4(pr1),tr1 + addwc 4(pr2),tr1 + movw tr1,4(pr0) + + movw 8(pr1),tr1 + addwc 8(pr2),tr1 + movw tr1,8(pr0) + + movw 12(pr1),tr1 + addwc 12(pr2),tr1 + movw tr1,12(pr0) + + subwb tr0,tr0 + addw $16,pr0 + addw $16,pr1 + addw $16,pr2 + addw $-4,pr3 + bne Loop +Lend: + mnegw tr0,pr0 + ret