;; Machine description the Motorola MCore
-;; Copyright (C) 1993, 1999, 2000 Free Software Foundation, Inc.
+;; Copyright (C) 1993, 1999, 2000, 2004, 2005, 2007
+;; Free Software Foundation, Inc.
;; Contributed by Motorola.
-;; This file is part of GNU CC.
+;; This file is part of GCC.
-;; GNU CC is free software; you can redistribute it and/or modify
+;; GCC is free software; you can redistribute it and/or modify
;; it under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 2, or (at your option)
+;; the Free Software Foundation; either version 3, or (at your option)
;; any later version.
-;; GNU CC is distributed in the hope that it will be useful,
+;; GCC is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
;; GNU General Public License for more details.
;; You should have received a copy of the GNU General Public License
-;; along with GNU CC; see the file COPYING. If not, write to
-;; the Free Software Foundation, 59 Temple Place - Suite 330,
-;; Boston, MA 02111-1307, USA.
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
;; calculations and the literal table placement into the assembler,
;; where their interactions can be managed in a single place.
-; All MCORE instructions are two bytes long.
+;; All MCORE instructions are two bytes long.
(define_attr "length" "" (const_int 2))
-;; (define_function_unit {name} {num-units} {n-users} {test}
-;; {ready-delay} {issue-delay} [{conflict-list}])
-
+;; Scheduling. We only model a simple load latency.
+(define_insn_reservation "any_insn" 1
+ (eq_attr "type" "!load")
+ "nothing")
+(define_insn_reservation "memory" 2
+ (eq_attr "type" "load")
+ "nothing")
-(define_function_unit "memory" 1 1 (eq_attr "type" "load") 2 0)
+(include "predicates.md")
;; -------------------------------------------------------------------------
;; Test and bit test
;; ; This is done to allow bit field masks to fold together in combine.
;; ; The reload phase will force the immediate into a register at the
;; ; very end. This helps in some cases, but hurts in others: we'd
-;; ; really like to cse these immediates. However, there is an phase
+;; ; really like to cse these immediates. However, there is a phase
;; ; ordering problem here. cse picks up individual masks and cse's
;; ; those, but not folded masks (cse happens before combine). It's
;; ; not clear what the best solution is because we really want cse
(define_split
[(parallel[
(set (reg:CC 17)
- (ne:CC (ne:SI (leu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "r")
- (match_operand:SI 1 "mcore_arith_reg_operand" "r"))
+ (ne:CC (ne:SI (leu:CC (match_operand:SI 0 "mcore_arith_reg_operand" "")
+ (match_operand:SI 1 "mcore_arith_reg_operand" ""))
(const_int 0))
(const_int 0)))
- (clobber (match_operand:CC 2 "mcore_arith_reg_operand" "=r"))])]
+ (clobber (match_operand:CC 2 "mcore_arith_reg_operand" ""))])]
""
[(set (reg:CC 17) (ne:SI (match_dup 0) (const_int 0)))
(set (reg:CC 17) (leu:CC (match_dup 0) (match_dup 1)))])
if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0
&& ! mcore_arith_S_operand (operands[2]))
{
- int not_value = ~ INTVAL (operands[2]);
+ HOST_WIDE_INT not_value = ~ INTVAL (operands[2]);
+
if ( CONST_OK_FOR_I (not_value)
|| CONST_OK_FOR_M (not_value)
|| CONST_OK_FOR_N (not_value))
case 2: return \"and %0,%1\";
/* case -1: return \"bclri %0,%Q2\"; will not happen */
case 3: return mcore_output_bclri (operands[0], INTVAL (operands[2]));
- default: abort ();
+ default: gcc_unreachable ();
}
}")
case 1: return \"andi %0,%2\";
case 2: return \"and %0,%1\";
case 3: return mcore_output_bclri (operands[0], INTVAL (operands[2]));
- default: abort ();
+ default: gcc_unreachable ();
}
}")
case 0: return \"or %0,%2\";
case 1: return \"bseti %0,%P2\";
case 2: return mcore_output_bseti (operands[0], INTVAL (operands[2]));
- default: abort ();
+ default: gcc_unreachable ();
}
}")
case 0: return \"or %0,%2\";
case 1: return \"bseti %0,%P2\";
case 2: return mcore_output_bseti (operands[0], INTVAL (operands[2]));
- default: abort ();
+ default: gcc_unreachable ();
}
}")
;;
;; Other sizes may be handy for indexing.
;; the tradeoffs to consider when adding these are
-;; codesize, execution time [vs. mul it is easy to win],
+;; code size, execution time [vs. mul it is easy to win],
;; and register pressure -- these patterns don't use an extra
;; register to build the offset from the base
;; and whether the compiler will not come up with some other idiom.
/* Convert adds to subtracts if this makes loading the constant cheaper.
But only if we are allowed to generate new pseudos. */
if (! (reload_in_progress || reload_completed)
- && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < -32)
+ && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) < -32)
{
- int neg_value = - INTVAL (operands[2]);
+ HOST_WIDE_INT neg_value = - INTVAL (operands[2]);
+
if ( CONST_OK_FOR_I (neg_value)
|| CONST_OK_FOR_M (neg_value)
|| CONST_OK_FOR_N (neg_value))
;; || (INTVAL (operands[2]) < -32 && INTVAL(operands[2]) >= -64))"
;; "*
;; {
-;; int n = INTVAL(operands[2]);
+;; HOST_WIDE_INT n = INTVAL(operands[2]);
;; if (n > 0)
;; {
;; operands[2] = GEN_INT(n - 32);
;; || (INTVAL (operands[2]) < -32 && INTVAL(operands[2]) >= -64))"
;; "*
;; {
-;; int n = INTVAL(operands[2]);
+;; HOST_WIDE_INT n = INTVAL(operands[2]);
;; if ( n > 0)
;; {
;; operands[2] = GEN_INT( n - 32);
&& INTVAL (operands[2]) > 0 && ! (INTVAL (operands[2]) & 0x80000000)"
"*
{
- if (GET_MODE (operands[2]) != SImode)
- abort ();
+ gcc_assert (GET_MODE (operands[2]) == SImode);
if (TARGET_LITTLE_END)
return \"addu %0,%2\;cmphs %0,%2\;incf %R0\";
return \"addu %R0,%2\;cmphs %R0,%2\;incf %0\";
{
if (GET_CODE (operands[0]) == MEM)
operands[1] = force_reg (SImode, operands[1]);
- else if (CONSTANT_P (operands[1])
- && (GET_CODE (operands[1]) != CONST_INT
- || ( ! CONST_OK_FOR_I (INTVAL (operands[1]))
- && ! CONST_OK_FOR_M (INTVAL (operands[1]))
- && ! CONST_OK_FOR_N (INTVAL (operands[1]))
- && (! TARGET_HARDLIT ||
- ! mcore_const_ok_for_inline (INTVAL (operands[1])))))
- && ! reload_completed
- && ! reload_in_progress
- && GET_CODE (operands[0]) == REG
- && REGNO (operands[0]) < FIRST_PSEUDO_REGISTER
- && (REGNO (operands[0]) == STACK_POINTER_REGNUM
- || REGNO (operands[0]) == LK_REG))
- operands[1] = force_reg (SImode, operands[1]);
}")
-;;; Must put a/i before r/r so that it will be preferred when the dest is
-;;; a hard register. Must put a/R before r/m.
-;;; DO WE NEED a/i ANYMORE?
-
(define_insn ""
- [(set (match_operand:SI 0 "mcore_general_movdst_operand" "=r,r,r,a,r,r,a,r,m")
- (match_operand:SI 1 "mcore_general_movsrc_operand" "I,M,N,i,r,c,R,m,r"))]
+ [(set (match_operand:SI 0 "mcore_general_movdst_operand" "=r,r,a,r,a,r,m")
+ (match_operand:SI 1 "mcore_general_movsrc_operand" "r,P,i,c,R,m,r"))]
"(register_operand (operands[0], SImode)
- || register_operand (operands[1], SImode))
- && ! (CONSTANT_P (operands[1])
- && (GET_CODE (operands[1]) != CONST_INT
- || ( ! CONST_OK_FOR_I (INTVAL (operands[1]))
- && ! CONST_OK_FOR_M (INTVAL (operands[1]))
- && ! CONST_OK_FOR_N (INTVAL (operands[1]))))
- && GET_CODE (operands[0]) == REG
- && REGNO (operands[0]) < FIRST_PSEUDO_REGISTER
- && (REGNO (operands[0]) == STACK_POINTER_REGNUM
- || REGNO (operands[0]) == LK_REG))"
+ || register_operand (operands[1], SImode))"
"* return mcore_output_move (insn, operands, SImode);"
- [(set_attr "type" "move,move,move,move,move,move,load,load,store")])
+ [(set_attr "type" "move,move,move,move,load,load,store")])
-;; This is to work around a bug in reload.
-(define_insn ""
- [(set (match_operand:SI 0 "register_operand" "=r")
- (match_operand:SI 1 "immediate_operand" "i"))]
- "((reload_in_progress || reload_completed)
- && CONSTANT_P (operands[1])
- && GET_CODE (operands[1]) == CONST_INT
- && ! CONST_OK_FOR_I (INTVAL (operands[1]))
- && ! CONST_OK_FOR_M (INTVAL (operands[1]))
- && ! CONST_OK_FOR_N (INTVAL (operands[1]))
- && GET_CODE (operands[0]) == REG
- && REGNO (operands[0]) == LK_REG)"
- "* return mcore_output_inline_const_forced (insn, operands, SImode);"
- [(set_attr "type" "load")])
-
-;; (define_expand "reload_insi"
-;; [(parallel [(match_operand:SI 0 "register_operand" "=r")
-;; (match_operand:SI 1 "general_operand" "")
-;; (match_operand:DI 2 "register_operand" "=&r")])]
-;; ""
-;; "
-;; {
-;; if (CONSTANT_P (operands[1])
-;; && GET_CODE (operands[1]) == CONST_INT
-;; && ! CONST_OK_FOR_I (INTVAL (operands[1]))
-;; && ! CONST_OK_FOR_M (INTVAL (operands[1]))
-;; && ! CONST_OK_FOR_N (INTVAL (operands[1]))
-;; && GET_CODE (operands[0]) == REG
-;; && (REGNO (operands[0]) == STACK_POINTER_REGNUM
-;; || REGNO (operands[0]) == LK_REG))
-;; {
-;; rtx tmp;
-;;
-;; if ( REGNO (operands[2]) == REGNO (operands[0])
-;; || REGNO (operands[2]) == STACK_POINTER_REGNUM
-;; || REGNO (operands[2]) == LK_REG)
-;; tmp = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
-;; else
-;; tmp = gen_rtx_REG (SImode, REGNO (operands[2]));
-;;
-;; emit_insn (gen_movsi (tmp, operands[1]));
-;; emit_insn (gen_movsi (operands[0], tmp));
-;; DONE;
-;; }
-;; emit_insn (gen_movsi (operands[0], operands[1]));
-;; DONE;
-;; }"
-;; )
-
-
-
;;
;; HImode
;;
-;;; ??? This isn't guaranteed to work. It should be more like the SImode
-;;; patterns.
-
(define_expand "movhi"
[(set (match_operand:HI 0 "general_operand" "")
(match_operand:HI 1 "general_operand" ""))]
{
rtx reg = gen_reg_rtx (SImode);
emit_insn (gen_movsi (reg, operands[1]));
- operands[1] = gen_rtx (SUBREG, HImode, reg, 0);
+ operands[1] = gen_lowpart (HImode, reg);
}
}")
(define_insn ""
- [(set (match_operand:HI 0 "mcore_general_movdst_operand" "=r,r,r,r,r,r,m")
- (match_operand:HI 1 "mcore_general_movsrc_operand" "r,I,M,N,c,m,r"))]
+ [(set (match_operand:HI 0 "mcore_general_movdst_operand" "=r,r,a,r,r,m")
+ (match_operand:HI 1 "mcore_general_movsrc_operand" "r,P,i,c,m,r"))]
"(register_operand (operands[0], HImode)
- || register_operand (operands[1], HImode))
- && (GET_CODE (operands[1]) != CONST_INT
- || CONST_OK_FOR_M (INTVAL (operands[1]))
- || CONST_OK_FOR_N (INTVAL (operands[1]))
- || CONST_OK_FOR_I (INTVAL (operands[1])))"
- "@
- mov %0,%1
- movi %0,%1
- bgeni %0,%P1
- bmaski %0,%N1
- mvc %0
- ld.h %0,%1
- st.h %1,%0"
- [(set_attr "type" "move,move,move,move,move,load,store")])
-
-;; Like movhi, but the const_int source can't be synthesized in
-;; a single-instruction. Fall back to the same things that
-;; are done for movsi in such cases. Presumes that we can
-;; modify any parts of the register that we wish.
-
-(define_insn ""
- [(set (match_operand:HI 0 "mcore_general_movdst_operand" "=r,a")
- (match_operand:HI 1 "const_int_operand" "P,i"))]
- "GET_CODE (operands[1]) == CONST_INT
- && INTVAL (operands[1]) > 127 && INTVAL (operands[1]) < 65536"
- "*
-{
- if (GET_CODE (operands[0])== REG && REGNO (operands[0]) == 15
- && !mcore_const_ok_for_inline (INTVAL (operands[1])))
- {
- /* mcore_output_move would generate lrw r15 -- a forbidden combo */
- return mcore_output_inline_const_forced (insn, operands, SImode);
- }
- else
- return mcore_output_move (insn, operands, SImode);
-}"
- [(set_attr "type" "move")])
-
-
-;; if we're still looking around for things to use, here's a last
-;; ditch effort that just calls the move. We only let this happen
-;; if we're in the reload pass.
-;;
-(define_insn ""
- [(set (match_operand:HI 0 "mcore_general_movdst_operand" "=r,a")
- (match_operand:HI 1 "const_int_operand" "P,i"))]
- "reload_in_progress || reload_completed"
- "*
-{
- if (GET_CODE (operands[0])== REG && REGNO (operands[0]) == 15
- && !mcore_const_ok_for_inline (INTVAL (operands[1])))
- {
- /* mcore_output_move would generate lrw r15 -- a forbidden combo */
- return mcore_output_inline_const_forced (insn, operands, SImode);
- }
- else
- return mcore_output_move (insn, operands, HImode);
-}"
- [(set_attr "type" "move")])
+ || register_operand (operands[1], HImode))"
+ "* return mcore_output_move (insn, operands, HImode);"
+ [(set_attr "type" "move,move,move,move,load,store")])
;;
;; QImode
{
rtx reg = gen_reg_rtx (SImode);
emit_insn (gen_movsi (reg, operands[1]));
- operands[1] = gen_rtx (SUBREG, QImode, reg, 0);
+ operands[1] = gen_lowpart (QImode, reg);
}
}")
(define_insn ""
- [(set (match_operand:QI 0 "mcore_general_movdst_operand" "=r,r,r,r,r,r,m")
- (match_operand:QI 1 "mcore_general_movsrc_operand" "r,I,M,N,c,m,r"))]
+ [(set (match_operand:QI 0 "mcore_general_movdst_operand" "=r,r,a,r,r,m")
+ (match_operand:QI 1 "mcore_general_movsrc_operand" "r,P,i,c,m,r"))]
"(register_operand (operands[0], QImode)
- || register_operand (operands[1], QImode))
- && (GET_CODE (operands[1]) != CONST_INT
- || CONST_OK_FOR_M (INTVAL (operands[1]))
- || CONST_OK_FOR_N (INTVAL (operands[1]))
- || CONST_OK_FOR_I (INTVAL (operands[1])))"
- "@
- mov %0,%1
- movi %0,%1
- bgeni %0,%P1
- bmaski %0,%N1
- mvc %0
- ld.b %0,%1
- st.b %1,%0"
- [(set_attr "type" "move,move,move,move,move,load,store")])
-
-;; cover the case where the constant is 128..255; this isn't handled
-;; in the above case. We could if we wanted to mess with adding a
-;; new constraint class like M,N,I.
-(define_insn ""
- [(set (match_operand:QI 0 "mcore_general_movdst_operand" "=r")
- (match_operand:QI 1 "const_int_operand" ""))]
- "GET_CODE (operands[1]) == CONST_INT
- && INTVAL (operands[1]) > 127 && INTVAL (operands[1]) < 256"
- "*
-{
- /* have a constant in range 128..255; have to do 2 insns; we can
- * do this with a movi followed by a bseti
- */
- operands[2] = GEN_INT (INTVAL (operands[1]) & 0x7f);
- return \"movi\\t%0,%2\;bseti\\t%0,7\";
-}"
- [(set_attr "type" "move")])
+ || register_operand (operands[1], QImode))"
+ "* return mcore_output_move (insn, operands, QImode);"
+ [(set_attr "type" "move,move,move,move,load,store")])
-;; if we're still looking around for things to use, here's a last
-;; ditch effort that just calls the move. We only let this happen
-;; if we're in the reload pass.
-;;
-(define_insn ""
- [(set (match_operand:QI 0 "mcore_general_movdst_operand" "=r,a")
- (match_operand:QI 1 "const_int_operand" "P,i"))]
- "(reload_in_progress || reload_completed)"
- "*
-{
- if (GET_CODE (operands[0])== REG && REGNO (operands[0]) == 15
- && ! mcore_const_ok_for_inline (INTVAL (operands[1])))
- {
- /* mcore_output_move would generate lrw r15 -- a forbidden combo */
- return mcore_output_inline_const_forced (insn, operands, SImode);
- }
- else
- return mcore_output_move (insn, operands, QImode);
-}"
- [(set_attr "type" "move")])
;; DImode
else if (GET_CODE (operands[1]) == CONST_INT
&& ! CONST_OK_FOR_I (INTVAL (operands[1]))
&& ! CONST_OK_FOR_M (INTVAL (operands[1]))
- && ! CONST_OK_FOR_N (INTVAL (operands[1]))
- && ! reload_completed
- && ! reload_in_progress
- && GET_CODE (operands[0]) == REG)
+ && ! CONST_OK_FOR_N (INTVAL (operands[1])))
{
- emit_move_insn (operand_subword (operands[0], 0, 1, DImode),
- operand_subword_force (operands[1], 0, DImode));
- emit_move_insn (operand_subword (operands[0], 1, 1, DImode),
- operand_subword_force (operands[1], 1, DImode));
+ int i;
+ for (i = 0; i < UNITS_PER_WORD * 2; i += UNITS_PER_WORD)
+ emit_move_insn (simplify_gen_subreg (SImode, operands[0], DImode, i),
+ simplify_gen_subreg (SImode, operands[1], DImode, i));
DONE;
}
}")
count = INTVAL (operands[2]);
regno = REGNO (operands[0]);
- operands[3] = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (count));
+ operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
for (i = 0; i < count; i++)
XVECEXP (operands[3], 0, i)
- = gen_rtx (SET, VOIDmode,
- gen_rtx (REG, SImode, regno + i),
- gen_rtx (MEM, SImode, plus_constant (stack_pointer_rtx,
+ = gen_rtx_SET (VOIDmode,
+ gen_rtx_REG (SImode, regno + i),
+ gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx,
i * 4)));
}")
count = INTVAL (operands[2]);
regno = REGNO (operands[1]);
- operands[3] = gen_rtx (PARALLEL, VOIDmode, rtvec_alloc (count));
+ operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
for (i = 0; i < count; i++)
XVECEXP (operands[3], 0, i)
- = gen_rtx (SET, VOIDmode,
- gen_rtx (MEM, SImode, plus_constant (stack_pointer_rtx,
+ = gen_rtx_SET (VOIDmode,
+ gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx,
i * 4)),
- gen_rtx (REG, SImode, regno + i));
+ gen_rtx_REG (SImode, regno + i));
}")
(define_insn ""
""
"
{
- emit_insn (gen_jump_real (operand0));
+ emit_jump_insn (gen_jump_real (operand0));
DONE;
}
")
if (GET_CODE (operands[0]) == MEM
&& ! register_operand (XEXP (operands[0], 0), SImode)
&& ! mcore_symbolic_address_p (XEXP (operands[0], 0)))
- operands[0] = gen_rtx (MEM, GET_MODE (operands[0]),
+ operands[0] = gen_rtx_MEM (GET_MODE (operands[0]),
force_reg (Pmode, XEXP (operands[0], 0)));
}")
if (GET_CODE (operands[0]) == MEM
&& ! register_operand (XEXP (operands[0], 0), SImode)
&& ! mcore_symbolic_address_p (XEXP (operands[0], 0)))
- operands[1] = gen_rtx (MEM, GET_MODE (operands[1]),
+ operands[1] = gen_rtx_MEM (GET_MODE (operands[1]),
force_reg (Pmode, XEXP (operands[1], 0)));
}")
; rtx lshft = GEN_INT (32 - (INTVAL (operands[2]) + INTVAL (operands[3])));
; rtx rshft = GEN_INT (32 - INTVAL (operands[2]));
;
-; emit_insn (gen_rtx (SET, SImode, operands[0], operands[1]));
-; emit_insn (gen_rtx (SET, SImode, operands[0],
-; gen_rtx (ASHIFT, SImode, operands[0], lshft)));
-; emit_insn (gen_rtx (SET, SImode, operands[0],
-; gen_rtx (ASHIFTRT, SImode, operands[0], rshft)));
+; emit_insn (gen_rtx_SET (SImode, operands[0], operands[1]));
+; emit_insn (gen_rtx_SET (SImode, operands[0],
+; gen_rtx_ASHIFT (SImode, operands[0], lshft)));
+; emit_insn (gen_rtx_SET (SImode, operands[0],
+; gen_rtx_ASHIFTRT (SImode, operands[0], rshft)));
; DONE;
; }
; else
{
if (INTVAL (operands[2]) == 8 && INTVAL (operands[3]) % 8 == 0)
{
- /* 8 bit field, aligned properly, use the xtrb[0123]+sext sequence */
- /* not DONE, not FAIL, but let the RTL get generated... */
+ /* 8-bit field, aligned properly, use the xtrb[0123]+sext sequence. */
+ /* not DONE, not FAIL, but let the RTL get generated.... */
}
else if (TARGET_W_FIELD)
{
rtx tmp1 = gen_reg_rtx (SImode);
rtx tmp2 = gen_reg_rtx (SImode);
- emit_insn (gen_rtx (SET, SImode, tmp1, operands[1]));
- emit_insn (gen_rtx (SET, SImode, tmp2,
- gen_rtx (ASHIFT, SImode, tmp1, lshft)));
- emit_insn (gen_rtx (SET, SImode, operands[0],
- gen_rtx (ASHIFTRT, SImode, tmp2, rshft)));
+ emit_insn (gen_rtx_SET (SImode, tmp1, operands[1]));
+ emit_insn (gen_rtx_SET (SImode, tmp2,
+ gen_rtx_ASHIFT (SImode, tmp1, lshft)));
+ emit_insn (gen_rtx_SET (SImode, operands[0],
+ gen_rtx_ASHIFTRT (SImode, tmp2, rshft)));
DONE;
}
else
{
- /* let the caller choose an alternate sequence */
+ /* Let the caller choose an alternate sequence. */
FAIL;
}
}")
{
if (INTVAL (operands[2]) == 8 && INTVAL (operands[3]) % 8 == 0)
{
- /* 8 bit field, aligned properly, use the xtrb[0123] sequence */
- /* let the template generate some RTL.... */
+ /* 8-bit field, aligned properly, use the xtrb[0123] sequence. */
+ /* Let the template generate some RTL.... */
}
else if (CONST_OK_FOR_K ((1 << INTVAL (operands[2])) - 1))
{
- /* A narrow bitfield (<=5 bits) means we can do a shift to put
+ /* A narrow bit-field (<=5 bits) means we can do a shift to put
it in place and then use an andi to extract it.
- This is as good as a shiftleft/shiftright. */
+ This is as good as a shiftleft/shiftright. */
rtx shifted;
rtx mask = GEN_INT ((1 << INTVAL (operands[2])) - 1);
{
rtx rshft = GEN_INT (INTVAL (operands[3]));
shifted = gen_reg_rtx (SImode);
- emit_insn (gen_rtx (SET, SImode, shifted,
- gen_rtx (LSHIFTRT, SImode, operands[1], rshft)));
+ emit_insn (gen_rtx_SET (SImode, shifted,
+ gen_rtx_LSHIFTRT (SImode, operands[1], rshft)));
}
- emit_insn (gen_rtx (SET, SImode, operands[0],
- gen_rtx (AND, SImode, shifted, mask)));
+ emit_insn (gen_rtx_SET (SImode, operands[0],
+ gen_rtx_AND (SImode, shifted, mask)));
DONE;
}
else if (TARGET_W_FIELD)
rtx tmp1 = gen_reg_rtx (SImode);
rtx tmp2 = gen_reg_rtx (SImode);
- emit_insn (gen_rtx (SET, SImode, tmp1, operands[1]));
- emit_insn (gen_rtx (SET, SImode, tmp2,
- gen_rtx (ASHIFT, SImode, tmp1, lshft)));
- emit_insn (gen_rtx (SET, SImode, operands[0],
- gen_rtx (LSHIFTRT, SImode, tmp2, rshft)));
+ emit_insn (gen_rtx_SET (SImode, tmp1, operands[1]));
+ emit_insn (gen_rtx_SET (SImode, tmp2,
+ gen_rtx_ASHIFT (SImode, tmp1, lshft)));
+ emit_insn (gen_rtx_SET (SImode, operands[0],
+ gen_rtx_LSHIFTRT (SImode, tmp2, rshft)));
DONE;
}
else
"xtrb2 %0,%1"
[(set_attr "type" "shift")])
-;; this can be peepholed if it follows a ldb ...
+;; This can be peepholed if it follows a ldb ...
(define_insn ""
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "=r,b")
(zero_extract:SI (match_operand:SI 1 "mcore_arith_reg_operand" "0,r") (const_int 8) (const_int 0)))]
;; Block move - adapted from m88k.md
;; ------------------------------------------------------------------------
-(define_expand "movstrsi"
+(define_expand "movmemsi"
[(parallel [(set (mem:BLK (match_operand:BLK 0 "" ""))
(mem:BLK (match_operand:BLK 1 "" "")))
(use (match_operand:SI 2 "general_operand" ""))
""
"
{
- rtx dest_mem = operands[0];
- rtx src_mem = operands[1];
- operands[0] = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
- operands[1] = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
- mcore_expand_block_move (dest_mem, src_mem, operands);
- DONE;
+ if (mcore_expand_block_move (operands))
+ DONE;
+ else
+ FAIL;
}")
;; ;;; ??? These patterns are meant to be generated from expand_block_move,
(match_operand:SI 1 "const_int_operand" ""))
(set (match_operand:SI 2 "mcore_arith_reg_operand" "")
(ior:SI (match_dup 2) (match_dup 0)))]
- "TARGET_HARDLIT && mcore_num_ones (INTVAL (operands[1])) == 2 &&
- mcore_is_dead (insn, operands[0])"
+ "TARGET_HARDLIT
+ && mcore_num_ones (INTVAL (operands[1])) == 2
+ && mcore_is_dead (insn, operands[0])"
"* return mcore_output_bseti (operands[2], INTVAL (operands[1]));")
(define_peephole
output_asm_insn (\"mov\\t%2,%3\", operands);
return mcore_output_bclri (operands[2], INTVAL (operands[1]) | 0xffffff00);")
-/* do not fold these together -- mode is lost at final output phase */
+/* Do not fold these together -- mode is lost at final output phase. */
(define_peephole
[(set (match_operand:SI 0 "mcore_arith_reg_operand" "")
else if ((ofs = mcore_halfword_offset (INTVAL (operands[3]))) > -1)
mode = HImode;
else
- abort ();
+ gcc_unreachable ();
if (ofs > 0)
- operands[4] = gen_rtx (MEM, mode,
- gen_rtx (PLUS, SImode, base_reg, GEN_INT(ofs)));
+ operands[4] = gen_rtx_MEM (mode,
+ gen_rtx_PLUS (SImode, base_reg, GEN_INT(ofs)));
else
- operands[4] = gen_rtx (MEM, mode, base_reg);
+ operands[4] = gen_rtx_MEM (mode, base_reg);
if (mode == QImode)
return \"movi %0,0\\n\\tst.b %0,%4\";
""
"*
{
- int op0 = REGNO (operands[0]);
+ unsigned int op0 = REGNO (operands[0]);
if (GET_CODE (operands[3]) == REG)
{
return \"btsti %1,%2\\n\\tmovt %0,%4\";
}
- abort ();
+ gcc_unreachable ();
}
else if (GET_CODE (operands[3]) == CONST_INT
&& INTVAL (operands[3]) == 0
&& GET_CODE (operands[4]) == REG)
return \"btsti %1,%2\\n\\tclrt %0\";
- abort ();
- return \"\";
+ gcc_unreachable ();
}")
; experimental - do the constant folding ourselves. note that this isn't
-; re-applied like we'd really want. ie., four ands collapse into two
+; re-applied like we'd really want. i.e., four ands collapse into two
; instead of one. this is because peepholes are applied as a sliding
; window. the peephole does not generate new rtl's, but instead slides
; across the rtl's generating machine instructions. it would be nice
""
"
{
- /* if he wants no probing, just do it for him. */
+ /* If he wants no probing, just do it for him. */
if (mcore_stack_increment == 0)
{
emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,operands[1]));
DONE;
}
- /* for small constant growth, we unroll the code */
+ /* For small constant growth, we unroll the code. */
if (GET_CODE (operands[1]) == CONST_INT
- && INTVAL (operands[1]) < 8*STACK_UNITS_MAXSTEP)
+ && INTVAL (operands[1]) < 8 * STACK_UNITS_MAXSTEP)
{
- int left = INTVAL(operands[1]);
+ HOST_WIDE_INT left = INTVAL(operands[1]);
- /* if it's a long way, get close enough for a last shot */
+ /* If it's a long way, get close enough for a last shot. */
if (left >= STACK_UNITS_MAXSTEP)
{
rtx tmp = gen_reg_rtx (Pmode);
- emit_insn (gen_movsi (tmp, GEN_INT(STACK_UNITS_MAXSTEP)));
+ emit_insn (gen_movsi (tmp, GEN_INT (STACK_UNITS_MAXSTEP)));
do
{
- rtx memref = gen_rtx (MEM, SImode, stack_pointer_rtx);
- MEM_VOLATILE_P (memref) = 1;
- emit_insn(gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
- emit_insn(gen_movsi (memref, stack_pointer_rtx));
- left -= STACK_UNITS_MAXSTEP;
- } while (left > STACK_UNITS_MAXSTEP);
+ rtx memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
+
+ MEM_VOLATILE_P (memref) = 1;
+ emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
+ emit_insn (gen_movsi (memref, stack_pointer_rtx));
+ left -= STACK_UNITS_MAXSTEP;
+ }
+ while (left > STACK_UNITS_MAXSTEP);
}
- /* performs the final adjustment */
- emit_insn(gen_addsi3(stack_pointer_rtx,stack_pointer_rtx,GEN_INT(-left)));
+ /* Perform the final adjustment. */
+ emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-left)));
;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
DONE;
}
rtx memref;
#if 1
- emit_insn(gen_movsi(tmp, operands[1]));
- emit_insn(gen_movsi(step, GEN_INT(STACK_UNITS_MAXSTEP)));
+ emit_insn (gen_movsi (tmp, operands[1]));
+ emit_insn (gen_movsi (step, GEN_INT (STACK_UNITS_MAXSTEP)));
if (GET_CODE (operands[1]) != CONST_INT)
{
emit_jump_insn (gen_bgeu (out_label));
}
- /* run a loop that steps it incrementally */
+ /* Run a loop that steps it incrementally. */
emit_label (loop_label);
- /* extend a step, probe, and adjust remaining count */
+ /* Extend a step, probe, and adjust remaining count. */
emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx, step));
- memref = gen_rtx (MEM, SImode, stack_pointer_rtx);
+ memref = gen_rtx_MEM (SImode, stack_pointer_rtx);
MEM_VOLATILE_P (memref) = 1;
emit_insn(gen_movsi(memref, stack_pointer_rtx));
emit_insn(gen_subsi3(tmp, tmp, step));
- /* loop condition -- going back up */
+ /* Loop condition -- going back up. */
emit_insn (gen_cmpsi (step, tmp));
emit_jump_insn (gen_bltu (loop_label));
if (out_label)
emit_label (out_label);
- /* bump the residual */
- emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx, tmp));
+ /* Bump the residual. */
+ emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
DONE;
#else
/* simple one-shot -- ensure register and do a subtract.
- * this does NOT comply with the ABI. */
- emit_insn(gen_movsi(tmp, operands[1]));
- emit_insn(gen_subsi3(stack_pointer_rtx, stack_pointer_rtx, tmp));
+ * This does NOT comply with the ABI. */
+ emit_insn (gen_movsi (tmp, operands[1]));
+ emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, tmp));
;; emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
DONE;
#endif