/* libgcc routines for 68000 w/o floating-point hardware.
- Copyright (C) 1994, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Copyright (C) 1994, 1996, 1997, 1998, 2008, 2009 Free Software Foundation, Inc.
-This file is part of GNU CC.
+This file is part of GCC.
-GNU CC is free software; you can redistribute it and/or modify it
+GCC is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
-Free Software Foundation; either version 2, or (at your option) any
+Free Software Foundation; either version 3, or (at your option) any
later version.
-In addition to the permissions in the GNU General Public License, the
-Free Software Foundation gives you unlimited permission to link the
-compiled version of this file with other programs, and to distribute
-those programs without any restriction coming from the use of this
-file. (The General Public License restrictions do apply in other
-respects; for example, they cover modification of the file, and
-distribution when not linked into another program.)
-
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
-You should have received a copy of the GNU General Public License
-along with this program; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
-/* As a special exception, if you link this library with files
- compiled with GCC to produce an executable, this does not cause
- the resulting executable to be covered by the GNU General Public License.
- This exception does not however invalidate any other reasons why
- the executable file might be covered by the GNU General Public License. */
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
/* Use this one for any 680x0; assumes no floating point hardware.
The trailing " '" appearing on some lines is for ANSI preprocessors. Yuk.
#define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+/* Note that X is a function. */
+
+#ifdef __ELF__
+#define FUNC(x) .type SYM(x),function
+#else
+/* The .proc pseudo-op is accepted, but ignored, by GAS. We could just
+ define this to the empty string for non-ELF systems, but defining it
+ to .proc means that the information is available to the assembler if
+ the need arises. */
+#define FUNC(x) .proc
+#endif
+
/* Use the right prefix for registers. */
#define REG(x) CONCAT1 (__REGISTER_PREFIX__, x)
#define a6 REG (a6)
#define fp REG (fp)
#define sp REG (sp)
+#define pc REG (pc)
+
+/* Provide a few macros to allow for PIC code support.
+ * With PIC, data is stored A5 relative so we've got to take a bit of special
+ * care to ensure that all loads of global data is via A5. PIC also requires
+ * jumps and subroutine calls to be PC relative rather than absolute. We cheat
+ * a little on this and in the PIC case, we use short offset branches and
+ * hope that the final object code is within range (which it should be).
+ */
+#ifndef __PIC__
+
+ /* Non PIC (absolute/relocatable) versions */
+
+ .macro PICCALL addr
+ jbsr \addr
+ .endm
+
+ .macro PICJUMP addr
+ jmp \addr
+ .endm
+
+ .macro PICLEA sym, reg
+ lea \sym, \reg
+ .endm
+
+ .macro PICPEA sym, areg
+ pea \sym
+ .endm
+
+#else /* __PIC__ */
+
+# if defined (__uClinux__)
+
+ /* Versions for uClinux */
+
+# if defined(__ID_SHARED_LIBRARY__)
+
+ /* -mid-shared-library versions */
+
+ .macro PICLEA sym, reg
+ movel a5@(_current_shared_library_a5_offset_), \reg
+ movel \sym@GOT(\reg), \reg
+ .endm
+
+ .macro PICPEA sym, areg
+ movel a5@(_current_shared_library_a5_offset_), \areg
+ movel \sym@GOT(\areg), sp@-
+ .endm
+
+ .macro PICCALL addr
+ PICLEA \addr,a0
+ jsr a0@
+ .endm
+
+ .macro PICJUMP addr
+ PICLEA \addr,a0
+ jmp a0@
+ .endm
+
+# else /* !__ID_SHARED_LIBRARY__ */
+
+ /* Versions for -msep-data */
+
+ .macro PICLEA sym, reg
+ movel \sym@GOT(a5), \reg
+ .endm
+
+ .macro PICPEA sym, areg
+ movel \sym@GOT(a5), sp@-
+ .endm
+
+ .macro PICCALL addr
+#if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
+ lea \addr-.-8,a0
+ jsr pc@(a0)
+#else
+ bsr \addr
+#endif
+ .endm
+
+ .macro PICJUMP addr
+ /* ISA C has no bra.l instruction, and since this assembly file
+ gets assembled into multiple object files, we avoid the
+ bra instruction entirely. */
+#if defined (__mcoldfire__) && !defined (__mcfisab__)
+ lea \addr-.-8,a0
+ jmp pc@(a0)
+#else
+ bra \addr
+#endif
+ .endm
+
+# endif
+
+# else /* !__uClinux__ */
+
+ /* Versions for Linux */
+
+ .macro PICLEA sym, reg
+ movel #_GLOBAL_OFFSET_TABLE_@GOTPC, \reg
+ lea (-6, pc, \reg), \reg
+ movel \sym@GOT(\reg), \reg
+ .endm
+
+ .macro PICPEA sym, areg
+ movel #_GLOBAL_OFFSET_TABLE_@GOTPC, \areg
+ lea (-6, pc, \areg), \areg
+ movel \sym@GOT(\areg), sp@-
+ .endm
+
+ .macro PICCALL addr
+#if defined (__mcoldfire__) && !defined (__mcfisab__) && !defined (__mcfisac__)
+ lea \addr-.-8,a0
+ jsr pc@(a0)
+#else
+ bsr \addr
+#endif
+ .endm
+
+ .macro PICJUMP addr
+ /* ISA C has no bra.l instruction, and since this assembly file
+ gets assembled into multiple object files, we avoid the
+ bra instruction entirely. */
+#if defined (__mcoldfire__) && !defined (__mcfisab__)
+ lea \addr-.-8,a0
+ jmp pc@(a0)
+#else
+ bra \addr
+#endif
+ .endm
+
+# endif
+#endif /* __PIC__ */
+
#ifdef L_floatex
| void __clear_sticky_bits(void);
SYM (__clear_sticky_bit):
- lea SYM (_fpCCR),a0
-#ifndef __mcf5200__
+ PICLEA SYM (_fpCCR),a0
+#ifndef __mcoldfire__
movew IMM (0),a0@(STICK)
#else
clr.w a0@(STICK)
FPTRAP = 15
$_exception_handler:
- lea SYM (_fpCCR),a0
+ PICLEA SYM (_fpCCR),a0
movew d7,a0@(EBITS) | set __exception_bits
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
orw d7,a0@(STICK) | and __sticky_bits
#else
movew a0@(STICK),d4
movew d5,a0@(LASTO) | and __last_operation
| Now put the operands in place:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (SINGLE_FLOAT),d6
#else
cmpl IMM (SINGLE_FLOAT),d6
movel a6@(12),a0@(OPER2)
2:
| And check whether the exception is trap-enabled:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
andw a0@(TRAPE),d7 | is exception trap-enabled?
#else
clrl d6
andl d6,d7
#endif
beq 1f | no, exit
- pea SYM (_fpCCR) | yes, push address of _fpCCR
+ PICPEA SYM (_fpCCR),a1 | yes, push address of _fpCCR
trap IMM (FPTRAP) | and trap
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
1: moveml sp@+,d2-d7 | restore data registers
#else
1: moveml sp@,d2-d7
#ifdef L_mulsi3
.text
- .proc
+ FUNC(__mulsi3)
.globl SYM (__mulsi3)
SYM (__mulsi3):
movew sp@(4), d0 /* x0 -> d0 */
muluw sp@(10), d0 /* x0*y1 */
movew sp@(6), d1 /* x1 -> d1 */
muluw sp@(8), d1 /* x1*y0 */
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
addw d1, d0
#else
addl d1, d0
#ifdef L_udivsi3
.text
- .proc
+ FUNC(__udivsi3)
.globl SYM (__udivsi3)
SYM (__udivsi3):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
movel d2, sp@-
movel sp@(12), d1 /* d1 = divisor */
movel sp@(8), d0 /* d0 = dividend */
lsrl IMM (1), d0 /* shift dividend */
cmpl IMM (0x10000), d1 /* still divisor >= 2 ^ 16 ? */
jcc L4
- divu d1, d0 /* now we have 16 bit divisor */
+ divu d1, d0 /* now we have 16-bit divisor */
andl IMM (0xffff), d0 /* mask out divisor, ignore remainder */
-/* Multiply the 16 bit tentative quotient with the 32 bit divisor. Because of
- the operand ranges, this might give a 33 bit product. If this product is
+/* Multiply the 16-bit tentative quotient with the 32-bit divisor. Because of
+ the operand ranges, this might give a 33-bit product. If this product is
greater than the dividend, the tentative quotient was too large. */
movel d2, d1
mulu d0, d1 /* low part, 32 bits */
L6: movel sp@+, d2
rts
-#else /* __mcf5200__ */
+#else /* __mcoldfire__ */
-/* Coldfire implementation of non-restoring division algorithm from
+/* ColdFire implementation of non-restoring division algorithm from
Hennessy & Patterson, Appendix A. */
link a6,IMM (-12)
moveml d2-d4,sp@
moveml sp@,d2-d4 | restore data registers
unlk a6 | and return
rts
-#endif /* __mcf5200__ */
+#endif /* __mcoldfire__ */
#endif /* L_udivsi3 */
#ifdef L_divsi3
.text
- .proc
+ FUNC(__divsi3)
.globl SYM (__divsi3)
SYM (__divsi3):
movel d2, sp@-
movel sp@(12), d1 /* d1 = divisor */
jpl L1
negl d1
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
negb d2 /* change sign because divisor <0 */
#else
negl d2 /* change sign because divisor <0 */
L1: movel sp@(8), d0 /* d0 = dividend */
jpl L2
negl d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
negb d2
#else
negl d2
L2: movel d1, sp@-
movel d0, sp@-
- jbsr SYM (__udivsi3) /* divide abs(dividend) by abs(divisor) */
+ PICCALL SYM (__udivsi3) /* divide abs(dividend) by abs(divisor) */
addql IMM (8), sp
tstb d2
#ifdef L_umodsi3
.text
- .proc
+ FUNC(__umodsi3)
.globl SYM (__umodsi3)
SYM (__umodsi3):
movel sp@(8), d1 /* d1 = divisor */
movel sp@(4), d0 /* d0 = dividend */
movel d1, sp@-
movel d0, sp@-
- jbsr SYM (__udivsi3)
+ PICCALL SYM (__udivsi3)
addql IMM (8), sp
movel sp@(8), d1 /* d1 = divisor */
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
movel d1, sp@-
movel d0, sp@-
- jbsr SYM (__mulsi3) /* d0 = (a/b)*b */
+ PICCALL SYM (__mulsi3) /* d0 = (a/b)*b */
addql IMM (8), sp
#else
mulsl d1,d0
#ifdef L_modsi3
.text
- .proc
+ FUNC(__modsi3)
.globl SYM (__modsi3)
SYM (__modsi3):
movel sp@(8), d1 /* d1 = divisor */
movel sp@(4), d0 /* d0 = dividend */
movel d1, sp@-
movel d0, sp@-
- jbsr SYM (__divsi3)
+ PICCALL SYM (__divsi3)
addql IMM (8), sp
movel sp@(8), d1 /* d1 = divisor */
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
movel d1, sp@-
movel d0, sp@-
- jbsr SYM (__mulsi3) /* d0 = (a/b)*b */
+ PICCALL SYM (__mulsi3) /* d0 = (a/b)*b */
addql IMM (8), sp
#else
mulsl d1,d0
.globl SYM (__divdf3)
.globl SYM (__negdf2)
.globl SYM (__cmpdf2)
+ .globl SYM (__cmpdf2_internal)
+ .hidden SYM (__cmpdf2_internal)
.text
.even
orl d7,d0
movew IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (DOUBLE_FLOAT),d6
- jmp $_exception_handler
+ PICJUMP $_exception_handler
Ld$infty:
Ld$overflow:
orl d7,d0
movew IMM (INEXACT_RESULT+OVERFLOW),d7
moveq IMM (DOUBLE_FLOAT),d6
- jmp $_exception_handler
+ PICJUMP $_exception_handler
Ld$underflow:
| Return 0 and set the exception flags
movel d0,d1
movew IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (DOUBLE_FLOAT),d6
- jmp $_exception_handler
+ PICJUMP $_exception_handler
Ld$inop:
| Return a quiet NaN and set the exception flags
movel d0,d1
movew IMM (INEXACT_RESULT+INVALID_OPERATION),d7
moveq IMM (DOUBLE_FLOAT),d6
- jmp $_exception_handler
+ PICJUMP $_exception_handler
Ld$div$0:
| Return a properly signed INFINITY and set the exception flags
orl d7,d0
movew IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
moveq IMM (DOUBLE_FLOAT),d6
- jmp $_exception_handler
+ PICJUMP $_exception_handler
|=============================================================================
|=============================================================================
|=============================================================================
| double __subdf3(double, double);
+ FUNC(__subdf3)
SYM (__subdf3):
bchg IMM (31),sp@(12) | change sign of second operand
| and fall through, so we always add
|=============================================================================
| double __adddf3(double, double);
+ FUNC(__adddf3)
SYM (__adddf3):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0) | everything will be done in registers
moveml d2-d7,sp@- | save all data registers and a2 (but d0-d1)
#else
andl IMM (0x80000000),d7 | isolate a's sign bit '
swap d6 | and also b's sign bit '
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
andw IMM (0x8000),d6 |
orw d6,d7 | and combine them into d7, so that a's sign '
| bit is in the high word and b's is in the '
orl d7,d0 | and put hidden bit back
Ladddf$1:
swap d4 | shift right exponent so that it starts
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (5),d4 | in bit 0 and not bit 20
#else
lsrl IMM (5),d4 | in bit 0 and not bit 20
orl d7,d2 | and put hidden bit back
Ladddf$2:
swap d5 | shift right exponent so that it starts
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (5),d5 | in bit 0 and not bit 20
#else
lsrl IMM (5),d5 | in bit 0 and not bit 20
| and d4-d5-d6-d7 for the second. To do this we store (temporarily) the
| exponents in a2-a3.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml a2-a3,sp@- | save the address registers
#else
movel a2,sp@-
| Here we shift the numbers until the exponents are the same, and put
| the largest exponent in a2.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d4,a2 | get exponents back
exg d5,a3 |
cmpw d4,d5 | compare the exponents
| Here we have a's exponent larger than b's, so we have to shift b. We do
| this by using as counter d2:
1: movew d4,d2 | move largest exponent to d2
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw d5,d2 | and subtract second exponent
exg d4,a2 | get back the longs we saved
exg d5,a3 |
movel a4,a3
#endif
| if difference is too large we don't shift (actually, we can just exit) '
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (DBL_MANT_DIG+2),d2
#else
cmpl IMM (DBL_MANT_DIG+2),d2
#endif
bge Ladddf$b$small
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (32),d2 | if difference >= 32, shift by longs
#else
cmpl IMM (32),d2 | if difference >= 32, shift by longs
#endif
bge 5f
2:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (16),d2 | if difference >= 16, shift by words
#else
cmpl IMM (16),d2 | if difference >= 16, shift by words
bra 3f | enter dbra loop
4:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d4
roxrl IMM (1),d5
roxrl IMM (1),d6
12: lsrl IMM (1),d4
#endif
3:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d2,4b
#else
subql IMM (1),d2
movel d5,d6
movel d4,d5
movel IMM (0),d4
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (32),d2
#else
subl IMM (32),d2
swap d5
movew IMM (0),d4
swap d4
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (16),d2
#else
subl IMM (16),d2
bra 3b
9:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d4,d5
movew d4,d6
subw d5,d6 | keep d5 (largest exponent) in d4
movel a4,a3
#endif
| if difference is too large we don't shift (actually, we can just exit) '
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (DBL_MANT_DIG+2),d6
#else
cmpl IMM (DBL_MANT_DIG+2),d6
#endif
bge Ladddf$a$small
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (32),d6 | if difference >= 32, shift by longs
#else
cmpl IMM (32),d6 | if difference >= 32, shift by longs
#endif
bge 5f
2:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (16),d6 | if difference >= 16, shift by words
#else
cmpl IMM (16),d6 | if difference >= 16, shift by words
bra 3f | enter dbra loop
4:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
12: lsrl IMM (1),d0
#endif
3:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d6,4b
#else
subql IMM (1),d6
movel d1,d2
movel d0,d1
movel IMM (0),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (32),d6
#else
subl IMM (32),d6
swap d1
movew IMM (0),d0
swap d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (16),d6
#else
subl IMM (16),d6
#endif
bra 3b
Ladddf$3:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d4,a2
exg d5,a3
#else
| the signs in a4.
| Here we have to decide whether to add or subtract the numbers:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a0 | get the signs
exg d6,a3 | a3 is free to be used
#else
eorl d7,d6 | compare the signs
bmi Lsubdf$0 | if the signs are different we have
| to subtract
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a0 | else we add the numbers
exg d6,a3 |
#else
movel a0,d7 |
andl IMM (0x80000000),d7 | d7 now has the sign
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
| one more bit we check this:
btst IMM (DBL_MANT_DIG+1),d0
beq 1f
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
addl IMM (1),d4
#endif
1:
- lea Ladddf$5,a0 | to return from rounding routine
- lea SYM (_fpCCR),a1 | check the rounding mode
-#ifdef __mcf5200__
+ lea pc@(Ladddf$5),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
bra Lround$to$plus
Ladddf$5:
| Put back the exponent and check for overflow
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (0x7ff),d4 | is the exponent big?
#else
cmpl IMM (0x7ff),d4 | is the exponent big?
#endif
bge 1f
bclr IMM (DBL_MANT_DIG-1),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lslw IMM (4),d4 | put exponent back into position
#else
lsll IMM (4),d4 | put exponent back into position
#endif
swap d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
swap d0 |
bra Ladddf$ret
1:
- movew IMM (ADD),d5
+ moveq IMM (ADD),d5
bra Ld$overflow
Lsubdf$0:
| Here we do the subtraction.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a0 | put sign back in a0
exg d6,a3 |
#else
movel a2,d4 | return exponent to d4
movel a0,d7
andl IMM (0x80000000),d7 | isolate sign bit
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,a2-a3 |
#else
movel sp@+,a4
| one more bit we check this:
btst IMM (DBL_MANT_DIG+1),d0
beq 1f
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
addl IMM (1),d4
#endif
1:
- lea Lsubdf$1,a0 | to return from rounding routine
- lea SYM (_fpCCR),a1 | check the rounding mode
-#ifdef __mcf5200__
+ lea pc@(Lsubdf$1),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
Lsubdf$1:
| Put back the exponent and sign (we don't have overflow). '
bclr IMM (DBL_MANT_DIG-1),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lslw IMM (4),d4 | put exponent back into position
#else
lsll IMM (4),d4 | put exponent back into position
#endif
swap d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
| DBL_MANT_DIG+1) we return the other (and now we don't have to '
| check for finiteness or zero).
Ladddf$a$small:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
#endif
movel a6@(16),d0
movel a6@(20),d1
- lea SYM (_fpCCR),a0
+ PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
rts
Ladddf$b$small:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
#endif
movel a6@(8),d0
movel a6@(12),d1
- lea SYM (_fpCCR),a0
+ PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| Return b (if a is zero)
movel d2,d0
movel d3,d1
- bra 1f
+ bne 1f | Check if b is -0
+ cmpl IMM (0x80000000),d0
+ bne 1f
+ andl IMM (0x80000000),d7 | Use the sign of a
+ clrl d0
+ bra Ladddf$ret
Ladddf$a:
movel a6@(8),d0
movel a6@(12),d1
1:
- movew IMM (ADD),d5
+ moveq IMM (ADD),d5
| Check for NaN and +/-INFINITY.
movel d0,d7 |
andl IMM (0x80000000),d7 |
bra Ld$infty |
Ladddf$ret$1:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,a2-a3 | restore regs and exit
#else
movel sp@+,a4
Ladddf$ret:
| Normal exit.
- lea SYM (_fpCCR),a0
+ PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
orl d7,d0 | put sign bit back
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
Ladddf$ret$den:
| Return a denormalized number.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0 | shift right once more
roxrl IMM (1),d1 |
#else
bra Ladddf$ret
Ladddf$nf:
- movew IMM (ADD),d5
+ moveq IMM (ADD),d5
| This could be faster but it is not worth the effort, since it is not
| executed very often. We sacrifice speed for clarity here.
movel a6@(8),d0 | get the numbers back (remember that we
|=============================================================================
| double __muldf3(double, double);
+ FUNC(__muldf3)
SYM (__muldf3):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
andl d6,d0 | isolate fraction
orl IMM (0x00100000),d0 | and put hidden bit back
swap d4 | I like exponents in the first byte
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (4),d4 |
#else
lsrl IMM (4),d4 |
andl d6,d2 |
orl IMM (0x00100000),d2 | and put hidden bit back
swap d5 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (4),d5 |
#else
lsrl IMM (4),d5 |
#endif
Lmuldf$2: |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
addw d5,d4 | add exponents
subw IMM (D_BIAS+1),d4 | and subtract bias (plus one)
#else
| enough to keep everything in them. So we use the address registers to keep
| some intermediate data.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml a2-a3,sp@- | save a2 and a3 for temporary use
#else
movel a2,sp@-
movel d4,a3 | and a3 will preserve the exponent
| First, shift d2-d3 so bit 20 becomes bit 31:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
rorl IMM (5),d2 | rotate d2 5 places right
swap d2 | and swap it
rorl IMM (5),d3 | do the same thing with d3
| We use a1 as counter:
movel IMM (DBL_MANT_DIG-1),a1
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a1
#else
movel d7,a4
#endif
1:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a1 | put counter back in a1
#else
movel d7,a4
addl d7,d7 |
addxl d6,d6 |
bcc 2f | if bit clear skip the following
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a2 |
#else
movel d7,a4
addxl d4,d2 |
addxl d7,d1 |
addxl d7,d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a2 |
#else
movel d7,a4
movel a4,a2
#endif
2:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d7,a1 | put counter in d7
dbf d7,1b | decrement and branch
#else
#endif
movel a3,d4 | restore exponent
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,a2-a3
#else
movel sp@+,a4
swap d3
movew d3,d2
movew IMM (0),d3
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
| Now round, check for over- and underflow, and exit.
movel a0,d7 | get sign bit back into d7
- movew IMM (MULTIPLY),d5
+ moveq IMM (MULTIPLY),d5
btst IMM (DBL_MANT_DIG+1-32),d0
beq Lround$exit
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d4
bra Lround$exit
Lmuldf$inop:
- movew IMM (MULTIPLY),d5
+ moveq IMM (MULTIPLY),d5
bra Ld$inop
Lmuldf$b$nf:
- movew IMM (MULTIPLY),d5
+ moveq IMM (MULTIPLY),d5
movel a0,d7 | get sign bit back into d7
tstl d3 | we know d2 == 0x7ff00000, so check d3
bne Ld$inop | if d3 <> 0 b is NaN
bra Ld$overflow | else we have overflow (since a is finite)
Lmuldf$a$nf:
- movew IMM (MULTIPLY),d5
+ moveq IMM (MULTIPLY),d5
movel a0,d7 | get sign bit back into d7
tstl d1 | we know d0 == 0x7ff00000, so check d1
bne Ld$inop | if d1 <> 0 a is NaN
| If either number is zero return zero, unless the other is +/-INFINITY or
| NaN, in which case we return NaN.
Lmuldf$b$0:
- movew IMM (MULTIPLY),d5
-#ifndef __mcf5200__
+ moveq IMM (MULTIPLY),d5
+#ifndef __mcoldfire__
exg d2,d0 | put b (==0) into d0-d1
exg d3,d1 | and a (with sign bit cleared) into d2-d3
+ movel a0,d0 | set result sign
#else
- movel d2,d7
- movel d0,d2
- movel d7,d0
- movel d3,d7
+ movel d0,d2 | put a into d2-d3
movel d1,d3
- movel d7,d1
+ movel a0,d0 | put result zero into d0-d1
+ movq IMM(0),d1
#endif
bra 1f
Lmuldf$a$0:
+ movel a0,d0 | set result sign
movel a6@(16),d2 | put b into d2-d3 again
movel a6@(20),d3 |
bclr IMM (31),d2 | clear sign bit
1: cmpl IMM (0x7ff00000),d2 | check for non-finiteness
bge Ld$inop | in case NaN or +/-INFINITY return NaN
- lea SYM (_fpCCR),a0
+ PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
andl d6,d0
1: addl d1,d1 | shift a left until bit 20 is set
addxl d0,d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d4 | and adjust exponent
#else
subl IMM (1),d4 | and adjust exponent
andl d6,d2
1: addl d3,d3 | shift b left until bit 20 is set
addxl d2,d2 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d5 | and adjust exponent
#else
subql IMM (1),d5 | and adjust exponent
|=============================================================================
| double __divdf3(double, double);
+ FUNC(__divdf3)
SYM (__divdf3):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
andl d6,d0 | and isolate fraction
orl IMM (0x00100000),d0 | and put hidden bit back
swap d4 | I like exponents in the first byte
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (4),d4 |
#else
lsrl IMM (4),d4 |
andl d6,d2 |
orl IMM (0x00100000),d2
swap d5 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (4),d5 |
#else
lsrl IMM (4),d5 |
#endif
Ldivdf$2: |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw d5,d4 | subtract exponents
addw IMM (D_BIAS),d4 | and add bias
#else
bset d5,d6 | set the corresponding bit in d6
3: addl d1,d1 | shift a by 1
addxl d0,d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
bset d5,d7 | set the corresponding bit in d7
3: addl d1,d1 | shift a by 1
addxl d0,d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
beq 3f | if d0==d2 check d1 and d3
2: addl d1,d1 | shift a by 1
addxl d0,d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d5,1b | and branch back
#else
subql IMM (1), d5
| to it; if you don't do this the algorithm loses in some cases). '
movel IMM (0),d2
movel d2,d3
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (DBL_MANT_DIG),d5
addw IMM (63),d5
cmpw IMM (31),d5
bhi 2f
1: bset d5,d3
bra 5f
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (32),d5
#else
subl IMM (32),d5
| not set:
btst IMM (DBL_MANT_DIG-32+1),d0
beq 1f
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
roxrl IMM (1),d2
1:
| Now round, check for over- and underflow, and exit.
movel a0,d7 | restore sign bit to d7
- movew IMM (DIVIDE),d5
+ moveq IMM (DIVIDE),d5
bra Lround$exit
Ldivdf$inop:
- movew IMM (DIVIDE),d5
+ moveq IMM (DIVIDE),d5
bra Ld$inop
Ldivdf$a$0:
| If a is zero check to see whether b is zero also. In that case return
| NaN; then check if b is NaN, and return NaN also in that case. Else
-| return zero.
- movew IMM (DIVIDE),d5
+| return a properly signed zero.
+ moveq IMM (DIVIDE),d5
bclr IMM (31),d2 |
movel d2,d4 |
orl d3,d4 |
blt 1f |
tstl d3 |
bne Ld$inop |
-1: movel IMM (0),d0 | else return zero
- movel d0,d1 |
- lea SYM (_fpCCR),a0 | clear exception flags
+1: movel a0,d0 | else return signed zero
+ moveq IMM(0),d1 |
+ PICLEA SYM (_fpCCR),a0 | clear exception flags
movew IMM (0),a0@ |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7 |
rts |
Ldivdf$b$0:
- movew IMM (DIVIDE),d5
+ moveq IMM (DIVIDE),d5
| If we got here a is not zero. Check if a is NaN; in that case return NaN,
| else return +/-INFINITY. Remember that a is in d0 with the sign bit
| cleared already.
bra Ld$div$0 | else signal DIVIDE_BY_ZERO
Ldivdf$b$nf:
- movew IMM (DIVIDE),d5
+ moveq IMM (DIVIDE),d5
| If d2 == 0x7ff00000 we have to check d3.
tstl d3 |
bne Ld$inop | if d3 <> 0, b is NaN
bra Ld$underflow | else b is +/-INFINITY, so signal underflow
Ldivdf$a$nf:
- movew IMM (DIVIDE),d5
+ moveq IMM (DIVIDE),d5
| If d0 == 0x7ff00000 we have to check d1.
tstl d1 |
bne Ld$inop | if d1 <> 0, a is NaN
andl d6,d0
1: addl d1,d1 | shift a left until bit 20 is set
addxl d0,d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d4 | and adjust exponent
#else
subl IMM (1),d4 | and adjust exponent
andl d6,d2
1: addl d3,d3 | shift b left until bit 20 is set
addxl d2,d2
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d5 | and adjust exponent
#else
subql IMM (1),d5 | and adjust exponent
| so that 2^21 <= d0 < 2^22, and the exponent is in the lower byte of d4.
| First check for underlow in the exponent:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (-DBL_MANT_DIG-1),d4
#else
cmpl IMM (-DBL_MANT_DIG-1),d4
movel d7,a0 |
movel IMM (0),d6 | use d6-d7 to collect bits flushed right
movel d6,d7 | use d6-d7 to collect bits flushed right
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (1),d4 | if the exponent is less than 1 we
#else
cmpl IMM (1),d4 | if the exponent is less than 1 we
#endif
bge 2f | have to shift right (denormalize)
1:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
addw IMM (1),d4 | adjust the exponent
lsrl IMM (1),d0 | shift right once
roxrl IMM (1),d1 |
orl d7,d3 | the bits which were flushed right
movel a0,d7 | get back sign bit into d7
| Now call the rounding routine (which takes care of denormalized numbers):
- lea Lround$0,a0 | to return from rounding routine
- lea SYM (_fpCCR),a1 | check the rounding mode
-#ifdef __mcf5200__
+ lea pc@(Lround$0),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
| check again for underflow!). We have to check for overflow or for a
| denormalized number (which also signals underflow).
| Check for overflow (i.e., exponent >= 0x7ff).
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (0x07ff),d4
#else
cmpl IMM (0x07ff),d4
beq Ld$den
1:
| Put back the exponents and sign and return.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lslw IMM (4),d4 | exponent back to fourth byte
#else
lsll IMM (4),d4 | exponent back to fourth byte
#endif
bclr IMM (DBL_MANT_DIG-32-1),d0
swap d0 | and put back exponent
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
orw d4,d0 |
#else
orl d4,d0 |
swap d0 |
orl d7,d0 | and sign also
- lea SYM (_fpCCR),a0
+ PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
|=============================================================================
| double __negdf2(double, double);
+ FUNC(__negdf2)
SYM (__negdf2):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
- movew IMM (NEGATE),d5
+ moveq IMM (NEGATE),d5
movel a6@(8),d0 | get number to negate in d0-d1
movel a6@(12),d1 |
bchg IMM (31),d0 | negate
movel d0,d7 | else get sign and return INFINITY
andl IMM (0x80000000),d7
bra Ld$infty
-1: lea SYM (_fpCCR),a0
+1: PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
LESS = -1
EQUAL = 0
-| int __cmpdf2(double, double);
-SYM (__cmpdf2):
-#ifndef __mcf5200__
+| int __cmpdf2_internal(double, double, int);
+SYM (__cmpdf2_internal):
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@- | save registers
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
- movew IMM (COMPARE),d5
+ moveq IMM (COMPARE),d5
movel a6@(8),d0 | get first operand
movel a6@(12),d1 |
movel a6@(16),d2 | get second operand
bclr IMM (31),d0 | and clear signs in d0 and d2
movel d2,d7 |
bclr IMM (31),d2 |
- cmpl IMM (0x7fff0000),d0 | check for a == NaN
- bhi Ld$inop | if d0 > 0x7ff00000, a is NaN
+ cmpl IMM (0x7ff00000),d0 | check for a == NaN
+ bhi Lcmpd$inop | if d0 > 0x7ff00000, a is NaN
beq Lcmpdf$a$nf | if equal can be INFINITY, so check d1
movel d0,d4 | copy into d4 to test for zero
orl d1,d4 |
beq Lcmpdf$a$0 |
Lcmpdf$0:
- cmpl IMM (0x7fff0000),d2 | check for b == NaN
- bhi Ld$inop | if d2 > 0x7ff00000, b is NaN
+ cmpl IMM (0x7ff00000),d2 | check for b == NaN
+ bhi Lcmpd$inop | if d2 > 0x7ff00000, b is NaN
beq Lcmpdf$b$nf | if equal can be INFINITY, so check d3
movel d2,d4 |
orl d3,d4 |
tstl d6
bpl 1f
| If both are negative exchange them
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d0,d2
exg d1,d3
#else
bne Lcmpdf$a$gt$b | |b| < |a|
| If we got here a == b.
movel IMM (EQUAL),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
rts
Lcmpdf$a$gt$b:
movel IMM (GREATER),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
rts
Lcmpdf$b$gt$a:
movel IMM (LESS),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
bne Ld$inop
bra Lcmpdf$1
+Lcmpd$inop:
+ movl a6@(24),d0
+ moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
+ moveq IMM (DOUBLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+| int __cmpdf2(double, double);
+ FUNC(__cmpdf2)
+SYM (__cmpdf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(20),sp@-
+ movl a6@(16),sp@-
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpdf2_internal)
+ unlk a6
+ rts
+
|=============================================================================
| rounding routines
|=============================================================================
| Normalize shifting left until bit #DBL_MANT_DIG-32 is set or the exponent
| is one (remember that a denormalized number corresponds to an
| exponent of -D_BIAS+1).
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (1),d4 | remember that the exponent is at least one
#else
cmpl IMM (1),d4 | remember that the exponent is at least one
addxl d2,d2 |
addxl d1,d1 |
addxl d0,d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d4,1b |
#else
subql IMM (1), d4
addxl d2,d0
| Shift right once (because we used bit #DBL_MANT_DIG-32!).
2:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
#else
| 'fraction overflow' ...).
btst IMM (DBL_MANT_DIG-32),d0
beq 1f
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d4
.globl SYM (__divsf3)
.globl SYM (__negsf2)
.globl SYM (__cmpsf2)
+ .globl SYM (__cmpsf2_internal)
+ .hidden SYM (__cmpsf2_internal)
| These are common routines to return and signal exceptions.
Lf$den:
| Return and signal a denormalized number
orl d7,d0
- movew IMM (INEXACT_RESULT+UNDERFLOW),d7
+ moveq IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (SINGLE_FLOAT),d6
- jmp $_exception_handler
+ PICJUMP $_exception_handler
Lf$infty:
Lf$overflow:
| Return a properly signed INFINITY and set the exception flags
movel IMM (INFINITY),d0
orl d7,d0
- movew IMM (INEXACT_RESULT+OVERFLOW),d7
+ moveq IMM (INEXACT_RESULT+OVERFLOW),d7
moveq IMM (SINGLE_FLOAT),d6
- jmp $_exception_handler
+ PICJUMP $_exception_handler
Lf$underflow:
| Return 0 and set the exception flags
- movel IMM (0),d0
- movew IMM (INEXACT_RESULT+UNDERFLOW),d7
+ moveq IMM (0),d0
+ moveq IMM (INEXACT_RESULT+UNDERFLOW),d7
moveq IMM (SINGLE_FLOAT),d6
- jmp $_exception_handler
+ PICJUMP $_exception_handler
Lf$inop:
| Return a quiet NaN and set the exception flags
movel IMM (QUIET_NaN),d0
- movew IMM (INEXACT_RESULT+INVALID_OPERATION),d7
+ moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
moveq IMM (SINGLE_FLOAT),d6
- jmp $_exception_handler
+ PICJUMP $_exception_handler
Lf$div$0:
| Return a properly signed INFINITY and set the exception flags
movel IMM (INFINITY),d0
orl d7,d0
- movew IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
+ moveq IMM (INEXACT_RESULT+DIVIDE_BY_ZERO),d7
moveq IMM (SINGLE_FLOAT),d6
- jmp $_exception_handler
+ PICJUMP $_exception_handler
|=============================================================================
|=============================================================================
|=============================================================================
| float __subsf3(float, float);
+ FUNC(__subsf3)
SYM (__subsf3):
bchg IMM (31),sp@(8) | change sign of second operand
| and fall through
|=============================================================================
| float __addsf3(float, float);
+ FUNC(__addsf3)
SYM (__addsf3):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0) | everything will be done in registers
moveml d2-d7,sp@- | save all data registers but d0-d1
#else
#endif
movel a6@(8),d0 | get first operand
movel a6@(12),d1 | get second operand
- movel d0,d6 | get d0's sign bit '
+ movel d0,a0 | get d0's sign bit '
addl d0,d0 | check and clear sign bit of a
beq Laddsf$b | if zero return second operand
- movel d1,d7 | save b's sign bit '
+ movel d1,a1 | save b's sign bit '
addl d1,d1 | get rid of sign bit
beq Laddsf$a | if zero return first operand
- movel d6,a0 | save signs in address registers
- movel d7,a1 | so we can use d6 and d7
-
| Get the exponents and check for denormalized and/or infinity.
movel IMM (0x00ffffff),d4 | mask to get fraction
| same, and put the largest exponent in d6. Note that we are using two
| registers for each number (see the discussion by D. Knuth in "Seminumerical
| Algorithms").
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw d6,d7 | compare exponents
#else
cmpl d6,d7 | compare exponents
1:
subl d6,d7 | keep the largest exponent
negl d7
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (8),d7 | put difference in lower byte
#else
lsrl IMM (8),d7 | put difference in lower byte
#endif
| if difference is too large we don't shift (actually, we can just exit) '
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (FLT_MANT_DIG+2),d7
#else
cmpl IMM (FLT_MANT_DIG+2),d7
#endif
bge Laddsf$b$small
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (16),d7 | if difference >= 16 swap
#else
cmpl IMM (16),d7 | if difference >= 16 swap
#endif
bge 4f
2:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d7
#else
subql IMM (1), d7
#endif
3:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d2 | shift right second operand
roxrl IMM (1),d3
dbra d7,3b
swap d3
movew d3,d2
swap d2
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (16),d7
#else
subl IMM (16),d7
bne 2b | if still more bits, go back to normal case
bra Laddsf$3
5:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d6,d7 | exchange the exponents
#else
eorl d6,d7
#endif
subl d6,d7 | keep the largest exponent
negl d7 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (8),d7 | put difference in lower byte
#else
lsrl IMM (8),d7 | put difference in lower byte
#endif
| if difference is too large we don't shift (and exit!) '
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (FLT_MANT_DIG+2),d7
#else
cmpl IMM (FLT_MANT_DIG+2),d7
#endif
bge Laddsf$a$small
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (16),d7 | if difference >= 16 swap
#else
cmpl IMM (16),d7 | if difference >= 16 swap
#endif
bge 8f
6:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d7
#else
subl IMM (1),d7
#endif
7:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0 | shift right first operand
roxrl IMM (1),d1
dbra d7,7b
swap d1
movew d1,d0
swap d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (16),d7
#else
subl IMM (16),d7
Laddsf$3:
| Here we have to decide whether to add or subtract the numbers
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d6,a0 | get signs back
exg d7,a1 | and save the exponents
#else
| numbers
| Here we have both positive or both negative
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d6,a0 | now we have the exponent in d6
#else
movel d6,d4
| Put the exponent, in the first byte, in d2, to use the "standard" rounding
| routines:
movel d6,d2
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (8),d2
#else
lsrl IMM (8),d2
| one more bit we check this:
btst IMM (FLT_MANT_DIG+1),d0
beq 1f
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
#else
#endif
addl IMM (1),d2
1:
- lea Laddsf$4,a0 | to return from rounding routine
- lea SYM (_fpCCR),a1 | check the rounding mode
-#ifdef __mcf5200__
+ lea pc@(Laddsf$4),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
bra Lround$to$plus
Laddsf$4:
| Put back the exponent, but check for overflow.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (0xff),d2
#else
cmpl IMM (0xff),d2
#endif
bhi 1f
bclr IMM (FLT_MANT_DIG-1),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lslw IMM (7),d2
#else
lsll IMM (7),d2
orl d2,d0
bra Laddsf$ret
1:
- movew IMM (ADD),d5
+ moveq IMM (ADD),d5
bra Lf$overflow
Lsubsf$0:
negl d1
negxl d0
1:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d2,a0 | now we have the exponent in d2
lsrw IMM (8),d2 | put it in the first byte
#else
| Note that we do not have to normalize, since in the subtraction bit
| #FLT_MANT_DIG+1 is never set, and denormalized numbers are handled by
| the rounding routines themselves.
- lea Lsubsf$1,a0 | to return from rounding routine
- lea SYM (_fpCCR),a1 | check the rounding mode
-#ifdef __mcf5200__
+ lea pc@(Lsubsf$1),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
Lsubsf$1:
| Put back the exponent (we can't have overflow!). '
bclr IMM (FLT_MANT_DIG-1),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lslw IMM (7),d2
#else
lsll IMM (7),d2
| check for finiteness or zero).
Laddsf$a$small:
movel a6@(12),d0
- lea SYM (_fpCCR),a0
+ PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
Laddsf$b$small:
movel a6@(8),d0
- lea SYM (_fpCCR),a0
+ PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
Laddsf$b:
| Return b (if a is zero).
movel a6@(12),d0
- bra 1f
+ cmpl IMM (0x80000000),d0 | Check if b is -0
+ bne 1f
+ movel a0,d7
+ andl IMM (0x80000000),d7 | Use the sign of a
+ clrl d0
+ bra Laddsf$ret
Laddsf$a:
| Return a (if b is zero).
movel a6@(8),d0
1:
- movew IMM (ADD),d5
+ moveq IMM (ADD),d5
| We have to check for NaN and +/-infty.
movel d0,d7
andl IMM (0x80000000),d7 | put sign in d7
Laddsf$ret:
| Normal exit (a and b nonzero, result is not NaN nor +/-infty).
| We have to clear the exception flags (just the exception type).
- lea SYM (_fpCCR),a0
+ PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
orl d7,d0 | put sign bit
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | restore data registers
#else
moveml sp@,d2-d7
| NaN, but if it is finite we return INFINITY with the corresponding sign.
Laddsf$nf:
- movew IMM (ADD),d5
+ moveq IMM (ADD),d5
| This could be faster but it is not worth the effort, since it is not
| executed very often. We sacrifice speed for clarity here.
movel a6@(8),d0 | get the numbers back (remember that we
|=============================================================================
| float __mulsf3(float, float);
+ FUNC(__mulsf3)
SYM (__mulsf3):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
andl d5,d0 | and isolate fraction
orl d4,d0 | and put hidden bit back
swap d2 | I like exponents in the first byte
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (7),d2 |
#else
lsrl IMM (7),d2 |
andl d5,d1 |
orl d4,d1 |
swap d3 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (7),d3 |
#else
lsrl IMM (7),d3 |
#endif
Lmulsf$2: |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
addw d3,d2 | add exponents
subw IMM (F_BIAS+1),d2 | and subtract bias (plus one)
#else
lsll IMM (31-FLT_MANT_DIG+1),d6
| Start the loop (we loop #FLT_MANT_DIG times):
- movew IMM (FLT_MANT_DIG-1),d3
+ moveq IMM (FLT_MANT_DIG-1),d3
1: addl d1,d1 | shift sum
addxl d0,d0
lsll IMM (1),d6 | get bit bn
addl d5,d1 | add a
addxl d4,d0
2:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbf d3,1b | loop back
#else
subql IMM (1),d3
| Now we have the product in d0-d1, with bit (FLT_MANT_DIG - 1) + FLT_MANT_DIG
| (mod 32) of d0 set. The first thing to do now is to normalize it so bit
| FLT_MANT_DIG is set (to do the rounding).
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
rorl IMM (6),d1
swap d1
movew d1,d3
lsll IMM (8),d0
addl d0,d0
addl d0,d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
orw d3,d0
#else
orl d3,d0
#endif
- movew IMM (MULTIPLY),d5
+ moveq IMM (MULTIPLY),d5
btst IMM (FLT_MANT_DIG+1),d0
beq Lround$exit
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrl IMM (1),d0
roxrl IMM (1),d1
addw IMM (1),d2
bra Lround$exit
Lmulsf$inop:
- movew IMM (MULTIPLY),d5
+ moveq IMM (MULTIPLY),d5
bra Lf$inop
Lmulsf$overflow:
- movew IMM (MULTIPLY),d5
+ moveq IMM (MULTIPLY),d5
bra Lf$overflow
Lmulsf$inf:
- movew IMM (MULTIPLY),d5
+ moveq IMM (MULTIPLY),d5
| If either is NaN return NaN; else both are (maybe infinite) numbers, so
| return INFINITY with the correct sign (which is in d7).
cmpl d6,d1 | is b NaN?
| or NaN, in which case we return NaN.
Lmulsf$b$0:
| Here d1 (==b) is zero.
- movel d1,d0 | put b into d0 (just a zero)
movel a6@(8),d1 | get a again to check for non-finiteness
bra 1f
Lmulsf$a$0:
1: bclr IMM (31),d1 | clear sign bit
cmpl IMM (INFINITY),d1 | and check for a large exponent
bge Lf$inop | if b is +/-INFINITY or NaN return NaN
- lea SYM (_fpCCR),a0 | else return zero
+ movel d7,d0 | else return signed zero
+ PICLEA SYM (_fpCCR),a0 |
movew IMM (0),a0@ |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7
movel IMM (1),d2
andl d5,d0
1: addl d0,d0 | shift a left (until bit 23 is set)
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d2 | and adjust exponent
#else
subql IMM (1),d2 | and adjust exponent
movel IMM (1),d3
andl d5,d1
1: addl d1,d1 | shift b left until bit 23 is set
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d3 | and adjust exponent
#else
- subl IMM (1),d3 | and adjust exponent
+ subql IMM (1),d3 | and adjust exponent
#endif
btst IMM (FLT_MANT_DIG-1),d1
bne Lmulsf$2 |
|=============================================================================
| float __divsf3(float, float);
+ FUNC(__divsf3)
SYM (__divsf3):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
andl d5,d0 | and isolate fraction
orl d4,d0 | and put hidden bit back
swap d2 | I like exponents in the first byte
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (7),d2 |
#else
lsrl IMM (7),d2 |
andl d5,d1 |
orl d4,d1 |
swap d3 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lsrw IMM (7),d3 |
#else
lsrl IMM (7),d3 |
#endif
Ldivsf$2: |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw d3,d2 | subtract exponents
addw IMM (F_BIAS),d2 | and add bias
#else
movel IMM (0),d6 |
movel d6,d7
- movew IMM (FLT_MANT_DIG+1),d3
+ moveq IMM (FLT_MANT_DIG+1),d3
1: cmpl d0,d1 | is a < b?
bhi 2f |
bset d3,d6 | set a bit in d6
subl d1,d0 | if a >= b a <-- a-b
beq 3f | if a is zero, exit
2: addl d0,d0 | multiply a by 2
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d3,1b
#else
subql IMM (1),d3
#endif
| Now we keep going to set the sticky bit ...
- movew IMM (FLT_MANT_DIG),d3
+ moveq IMM (FLT_MANT_DIG),d3
1: cmpl d0,d1
ble 2f
addl d0,d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d3,1b
#else
subql IMM(1),d3
movel IMM (0),d1
bra 3f
2: movel IMM (0),d1
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (FLT_MANT_DIG),d3
addw IMM (31),d3
#else
btst IMM (FLT_MANT_DIG+1),d0
beq 1f | if it is not set, then bit 24 is set
lsrl IMM (1),d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
addw IMM (1),d2 |
#else
addl IMM (1),d2 |
#endif
1:
| Now round, check for over- and underflow, and exit.
- movew IMM (DIVIDE),d5
+ moveq IMM (DIVIDE),d5
bra Lround$exit
Ldivsf$inop:
- movew IMM (DIVIDE),d5
+ moveq IMM (DIVIDE),d5
bra Lf$inop
Ldivsf$overflow:
- movew IMM (DIVIDE),d5
+ moveq IMM (DIVIDE),d5
bra Lf$overflow
Ldivsf$underflow:
- movew IMM (DIVIDE),d5
+ moveq IMM (DIVIDE),d5
bra Lf$underflow
Ldivsf$a$0:
- movew IMM (DIVIDE),d5
+ moveq IMM (DIVIDE),d5
| If a is zero check to see whether b is zero also. In that case return
| NaN; then check if b is NaN, and return NaN also in that case. Else
-| return zero.
+| return a properly signed zero.
andl IMM (0x7fffffff),d1 | clear sign bit and test b
beq Lf$inop | if b is also zero return NaN
cmpl IMM (INFINITY),d1 | check for NaN
bhi Lf$inop |
- movel IMM (0),d0 | else return zero
- lea SYM (_fpCCR),a0 |
+ movel d7,d0 | else return signed zero
+ PICLEA SYM (_fpCCR),a0 |
movew IMM (0),a0@ |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 |
#else
moveml sp@,d2-d7 |
rts |
Ldivsf$b$0:
- movew IMM (DIVIDE),d5
+ moveq IMM (DIVIDE),d5
| If we got here a is not zero. Check if a is NaN; in that case return NaN,
| else return +/-INFINITY. Remember that a is in d0 with the sign bit
| cleared already.
bra Lf$div$0 | else signal DIVIDE_BY_ZERO
Ldivsf$inf:
- movew IMM (DIVIDE),d5
+ moveq IMM (DIVIDE),d5
| If a is INFINITY we have to check b
cmpl IMM (INFINITY),d1 | compare b with INFINITY
bge Lf$inop | if b is NaN or INFINITY return NaN
movel IMM (1),d2
andl d5,d0
1: addl d0,d0 | shift a left until bit FLT_MANT_DIG-1 is set
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d2 | and adjust exponent
#else
subl IMM (1),d2 | and adjust exponent
movel IMM (1),d3
andl d5,d1
1: addl d1,d1 | shift b left until bit FLT_MANT_DIG is set
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
subw IMM (1),d3 | and adjust exponent
#else
subl IMM (1),d3 | and adjust exponent
| This is a common exit point for __mulsf3 and __divsf3.
| First check for underlow in the exponent:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (-FLT_MANT_DIG-1),d2
#else
cmpl IMM (-FLT_MANT_DIG-1),d2
| exponent until it becomes 1 or the fraction is zero (in the latter case
| we signal underflow and return zero).
movel IMM (0),d6 | d6 is used temporarily
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (1),d2 | if the exponent is less than 1 we
#else
cmpl IMM (1),d2 | if the exponent is less than 1 we
#endif
bge 2f | have to shift right (denormalize)
1:
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
addw IMM (1),d2 | adjust the exponent
lsrl IMM (1),d0 | shift right once
roxrl IMM (1),d1 |
2: orl d6,d1 | this is a trick so we don't lose '
| the extra bits which were flushed right
| Now call the rounding routine (which takes care of denormalized numbers):
- lea Lround$0,a0 | to return from rounding routine
- lea SYM (_fpCCR),a1 | check the rounding mode
-#ifdef __mcf5200__
+ lea pc@(Lround$0),a0 | to return from rounding routine
+ PICLEA SYM (_fpCCR),a1 | check the rounding mode
+#ifdef __mcoldfire__
clrl d6
#endif
movew a1@(6),d6 | rounding mode in d6
beq Lround$to$nearest
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (ROUND_TO_PLUS),d6
#else
cmpl IMM (ROUND_TO_PLUS),d6
| check again for underflow!). We have to check for overflow or for a
| denormalized number (which also signals underflow).
| Check for overflow (i.e., exponent >= 255).
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (0x00ff),d2
#else
cmpl IMM (0x00ff),d2
beq Lf$den
1:
| Put back the exponents and sign and return.
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
lslw IMM (7),d2 | exponent back to fourth byte
#else
lsll IMM (7),d2 | exponent back to fourth byte
#endif
bclr IMM (FLT_MANT_DIG-1),d0
swap d0 | and put back exponent
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
orw d2,d0 |
#else
orl d2,d0
swap d0 |
orl d7,d0 | and sign also
- lea SYM (_fpCCR),a0
+ PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
| and +/-INFINITY.
| float __negsf2(float);
+ FUNC(__negsf2)
SYM (__negsf2):
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@-
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
- movew IMM (NEGATE),d5
+ moveq IMM (NEGATE),d5
movel a6@(8),d0 | get number to negate in d0
bchg IMM (31),d0 | negate
movel d0,d1 | make a positive copy
movel d0,d7 | else get sign and return INFINITY
andl IMM (0x80000000),d7
bra Lf$infty
-1: lea SYM (_fpCCR),a0
+1: PICLEA SYM (_fpCCR),a0
movew IMM (0),a0@
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7
#else
moveml sp@,d2-d7
LESS = -1
EQUAL = 0
-| int __cmpsf2(float, float);
-SYM (__cmpsf2):
-#ifndef __mcf5200__
+| int __cmpsf2_internal(float, float, int);
+SYM (__cmpsf2_internal):
+#ifndef __mcoldfire__
link a6,IMM (0)
moveml d2-d7,sp@- | save registers
#else
link a6,IMM (-24)
moveml d2-d7,sp@
#endif
- movew IMM (COMPARE),d5
+ moveq IMM (COMPARE),d5
movel a6@(8),d0 | get first operand
movel a6@(12),d1 | get second operand
| Check if either is NaN, and in that case return garbage and signal
andl IMM (0x7fffffff),d0
beq Lcmpsf$a$0
cmpl IMM (0x7f800000),d0
- bhi Lf$inop
+ bhi Lcmpf$inop
Lcmpsf$1:
movel d1,d7
andl IMM (0x7fffffff),d1
beq Lcmpsf$b$0
cmpl IMM (0x7f800000),d1
- bhi Lf$inop
+ bhi Lcmpf$inop
Lcmpsf$2:
| Check the signs
eorl d6,d7
tstl d6
bpl 1f
| If both are negative exchange them
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
exg d0,d1
#else
movel d0,d7
bne Lcmpsf$a$gt$b | |b| < |a|
| If we got here a == b.
movel IMM (EQUAL),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
rts
Lcmpsf$a$gt$b:
movel IMM (GREATER),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
rts
Lcmpsf$b$gt$a:
movel IMM (LESS),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
moveml sp@+,d2-d7 | put back the registers
#else
moveml sp@,d2-d7
bclr IMM (31),d7
bra Lcmpsf$2
+Lcmpf$inop:
+ movl a6@(16),d0
+ moveq IMM (INEXACT_RESULT+INVALID_OPERATION),d7
+ moveq IMM (SINGLE_FLOAT),d6
+ PICJUMP $_exception_handler
+
+| int __cmpsf2(float, float);
+ FUNC(__cmpsf2)
+SYM (__cmpsf2):
+ link a6,IMM (0)
+ pea 1
+ movl a6@(12),sp@-
+ movl a6@(8),sp@-
+ PICCALL SYM (__cmpsf2_internal)
+ unlk a6
+ rts
+
|=============================================================================
| rounding routines
|=============================================================================
| Normalize shifting left until bit #FLT_MANT_DIG is set or the exponent
| is one (remember that a denormalized number corresponds to an
| exponent of -F_BIAS+1).
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
cmpw IMM (1),d2 | remember that the exponent is at least one
#else
cmpl IMM (1),d2 | remember that the exponent is at least one
beq 2f | an exponent of one means denormalized
addl d1,d1 | else shift and adjust the exponent
addxl d0,d0 |
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
dbra d2,1b |
#else
subql IMM (1),d2
btst IMM (FLT_MANT_DIG),d0
beq 1f
lsrl IMM (1),d0
-#ifndef __mcf5200__
+#ifndef __mcoldfire__
addw IMM (1),d2
#else
addql IMM (1),d2
| simply calls __cmpdf2. It would be more efficient to give the
| __cmpdf2 routine several names, but separating them out will make it
| easier to write efficient versions of these routines someday.
+| If the operands recompare unordered unordered __gtdf2 and __gedf2 return -1.
+| The other routines return 1.
#ifdef L_eqdf2
.text
- .proc
+ FUNC(__eqdf2)
.globl SYM (__eqdf2)
SYM (__eqdf2):
link a6,IMM (0)
+ pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
- jbsr SYM (__cmpdf2)
+ PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_eqdf2 */
#ifdef L_nedf2
.text
- .proc
+ FUNC(__nedf2)
.globl SYM (__nedf2)
SYM (__nedf2):
link a6,IMM (0)
+ pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
- jbsr SYM (__cmpdf2)
+ PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_nedf2 */
#ifdef L_gtdf2
.text
- .proc
+ FUNC(__gtdf2)
.globl SYM (__gtdf2)
SYM (__gtdf2):
link a6,IMM (0)
+ pea -1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
- jbsr SYM (__cmpdf2)
+ PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_gtdf2 */
#ifdef L_gedf2
.text
- .proc
+ FUNC(__gedf2)
.globl SYM (__gedf2)
SYM (__gedf2):
link a6,IMM (0)
+ pea -1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
- jbsr SYM (__cmpdf2)
+ PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_gedf2 */
#ifdef L_ltdf2
.text
- .proc
+ FUNC(__ltdf2)
.globl SYM (__ltdf2)
SYM (__ltdf2):
link a6,IMM (0)
+ pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
- jbsr SYM (__cmpdf2)
+ PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_ltdf2 */
#ifdef L_ledf2
.text
- .proc
+ FUNC(__ledf2)
.globl SYM (__ledf2)
SYM (__ledf2):
link a6,IMM (0)
+ pea 1
movl a6@(20),sp@-
movl a6@(16),sp@-
movl a6@(12),sp@-
movl a6@(8),sp@-
- jbsr SYM (__cmpdf2)
+ PICCALL SYM (__cmpdf2_internal)
unlk a6
rts
#endif /* L_ledf2 */
#ifdef L_eqsf2
.text
- .proc
+ FUNC(__eqsf2)
.globl SYM (__eqsf2)
SYM (__eqsf2):
link a6,IMM (0)
+ pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
- jbsr SYM (__cmpsf2)
+ PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_eqsf2 */
#ifdef L_nesf2
.text
- .proc
+ FUNC(__nesf2)
.globl SYM (__nesf2)
SYM (__nesf2):
link a6,IMM (0)
+ pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
- jbsr SYM (__cmpsf2)
+ PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_nesf2 */
#ifdef L_gtsf2
.text
- .proc
+ FUNC(__gtsf2)
.globl SYM (__gtsf2)
SYM (__gtsf2):
link a6,IMM (0)
+ pea -1
movl a6@(12),sp@-
movl a6@(8),sp@-
- jbsr SYM (__cmpsf2)
+ PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_gtsf2 */
#ifdef L_gesf2
.text
- .proc
+ FUNC(__gesf2)
.globl SYM (__gesf2)
SYM (__gesf2):
link a6,IMM (0)
+ pea -1
movl a6@(12),sp@-
movl a6@(8),sp@-
- jbsr SYM (__cmpsf2)
+ PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_gesf2 */
#ifdef L_ltsf2
.text
- .proc
+ FUNC(__ltsf2)
.globl SYM (__ltsf2)
SYM (__ltsf2):
link a6,IMM (0)
+ pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
- jbsr SYM (__cmpsf2)
+ PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_ltsf2 */
#ifdef L_lesf2
.text
- .proc
+ FUNC(__lesf2)
.globl SYM (__lesf2)
SYM (__lesf2):
link a6,IMM (0)
+ pea 1
movl a6@(12),sp@-
movl a6@(8),sp@-
- jbsr SYM (__cmpsf2)
+ PICCALL SYM (__cmpsf2_internal)
unlk a6
rts
#endif /* L_lesf2 */
+
+#if defined (__ELF__) && defined (__linux__)
+ /* Make stack non-executable for ELF linux targets. */
+ .section .note.GNU-stack,"",@progbits
+#endif