+
+/* Return true when blocks A and B can be safely merged. */
+
+static bool
+cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
+{
+ /* If we are partitioning hot/cold basic blocks, we don't want to
+ mess up unconditional or indirect jumps that cross between hot
+ and cold sections.
+
+ Basic block partitioning may result in some jumps that appear to
+ be optimizable (or blocks that appear to be mergeable), but which really
+ must be left untouched (they are required to make it safely across
+ partition boundaries). See the comments at the top of
+ bb-reorder.c:partition_hot_cold_basic_blocks for complete details. */
+
+ if (BB_PARTITION (a) != BB_PARTITION (b))
+ return false;
+
+ /* There must be exactly one edge in between the blocks. */
+ return (single_succ_p (a)
+ && single_succ (a) == b
+ && single_pred_p (b) == 1
+ && a != b
+ /* Must be simple edge. */
+ && !(single_succ_edge (a)->flags & EDGE_COMPLEX)
+ && a != ENTRY_BLOCK_PTR && b != EXIT_BLOCK_PTR
+ /* If the jump insn has side effects, we can't kill the edge.
+ When not optimizing, try_redirect_by_replacing_jump will
+ not allow us to redirect an edge by replacing a table jump. */
+ && (!JUMP_P (BB_END (a))
+ || ((!optimize || reload_completed)
+ ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
+}
+
+/* Merge block A and B. The blocks must be mergeable. */
+
+static void
+cfg_layout_merge_blocks (basic_block a, basic_block b)
+{
+#ifdef ENABLE_CHECKING
+ gcc_assert (cfg_layout_can_merge_blocks_p (a, b));
+#endif
+
+ if (dump_file)
+ fprintf (dump_file, "merging block %d into block %d\n", b->index, a->index);
+
+ /* If there was a CODE_LABEL beginning B, delete it. */
+ if (LABEL_P (BB_HEAD (b)))
+ {
+ /* This might have been an EH label that no longer has incoming
+ EH edges. Update data structures to match. */
+ maybe_remove_eh_handler (BB_HEAD (b));
+
+ delete_insn (BB_HEAD (b));
+ }
+
+ /* We should have fallthru edge in a, or we can do dummy redirection to get
+ it cleaned up. */
+ if (JUMP_P (BB_END (a)))
+ try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), b, true);
+ gcc_assert (!JUMP_P (BB_END (a)));
+
+ /* When not optimizing and the edge is the only place in RTL which holds
+ some unique locus, emit a nop with that locus in between. */
+ if (!optimize && EDGE_SUCC (a, 0)->goto_locus)
+ {
+ rtx insn = BB_END (a), end = PREV_INSN (BB_HEAD (a));
+ int goto_locus = EDGE_SUCC (a, 0)->goto_locus;
+
+ while (insn != end && (!INSN_P (insn) || INSN_LOCATOR (insn) == 0))
+ insn = PREV_INSN (insn);
+ if (insn != end && locator_eq (INSN_LOCATOR (insn), goto_locus))
+ goto_locus = 0;
+ else
+ {
+ insn = BB_HEAD (b);
+ end = NEXT_INSN (BB_END (b));
+ while (insn != end && !INSN_P (insn))
+ insn = NEXT_INSN (insn);
+ if (insn != end && INSN_LOCATOR (insn) != 0
+ && locator_eq (INSN_LOCATOR (insn), goto_locus))
+ goto_locus = 0;
+ }
+ if (goto_locus)
+ {
+ BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
+ INSN_LOCATOR (BB_END (a)) = goto_locus;
+ }
+ }
+
+ /* Possible line number notes should appear in between. */
+ if (b->il.rtl->header)
+ {
+ rtx first = BB_END (a), last;
+
+ last = emit_insn_after_noloc (b->il.rtl->header, BB_END (a), a);
+ delete_insn_chain (NEXT_INSN (first), last, false);
+ b->il.rtl->header = NULL;
+ }
+
+ /* In the case basic blocks are not adjacent, move them around. */
+ if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
+ {
+ rtx first = unlink_insn_chain (BB_HEAD (b), BB_END (b));
+
+ emit_insn_after_noloc (first, BB_END (a), a);
+ /* Skip possible DELETED_LABEL insn. */
+ if (!NOTE_INSN_BASIC_BLOCK_P (first))
+ first = NEXT_INSN (first);
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (first));
+ BB_HEAD (b) = NULL;
+
+ /* emit_insn_after_noloc doesn't call df_insn_change_bb.
+ We need to explicitly call. */
+ update_bb_for_insn_chain (NEXT_INSN (first),
+ BB_END (b),
+ a);
+
+ delete_insn (first);
+ }
+ /* Otherwise just re-associate the instructions. */
+ else
+ {
+ rtx insn;
+
+ update_bb_for_insn_chain (BB_HEAD (b), BB_END (b), a);
+
+ insn = BB_HEAD (b);
+ /* Skip possible DELETED_LABEL insn. */
+ if (!NOTE_INSN_BASIC_BLOCK_P (insn))
+ insn = NEXT_INSN (insn);
+ gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
+ BB_HEAD (b) = NULL;
+ BB_END (a) = BB_END (b);
+ delete_insn (insn);
+ }
+
+ df_bb_delete (b->index);
+
+ /* Possible tablejumps and barriers should appear after the block. */
+ if (b->il.rtl->footer)
+ {
+ if (!a->il.rtl->footer)
+ a->il.rtl->footer = b->il.rtl->footer;
+ else
+ {
+ rtx last = a->il.rtl->footer;
+
+ while (NEXT_INSN (last))
+ last = NEXT_INSN (last);
+ NEXT_INSN (last) = b->il.rtl->footer;
+ PREV_INSN (b->il.rtl->footer) = last;
+ }
+ b->il.rtl->footer = NULL;
+ }
+
+ if (dump_file)
+ fprintf (dump_file, "Merged blocks %d and %d.\n",
+ a->index, b->index);
+}
+
+/* Split edge E. */
+
+static basic_block
+cfg_layout_split_edge (edge e)
+{
+ basic_block new_bb =
+ create_basic_block (e->src != ENTRY_BLOCK_PTR
+ ? NEXT_INSN (BB_END (e->src)) : get_insns (),
+ NULL_RTX, e->src);
+
+ if (e->dest == EXIT_BLOCK_PTR)
+ BB_COPY_PARTITION (new_bb, e->src);
+ else
+ BB_COPY_PARTITION (new_bb, e->dest);
+ make_edge (new_bb, e->dest, EDGE_FALLTHRU);
+ redirect_edge_and_branch_force (e, new_bb);
+
+ return new_bb;
+}
+
+/* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */
+
+static void
+rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED)
+{
+}
+
+/* Return 1 if BB ends with a call, possibly followed by some
+ instructions that must stay with the call, 0 otherwise. */
+
+static bool
+rtl_block_ends_with_call_p (basic_block bb)
+{
+ rtx insn = BB_END (bb);
+
+ while (!CALL_P (insn)
+ && insn != BB_HEAD (bb)
+ && (keep_with_call_p (insn)
+ || NOTE_P (insn)))
+ insn = PREV_INSN (insn);
+ return (CALL_P (insn));
+}
+
+/* Return 1 if BB ends with a conditional branch, 0 otherwise. */
+
+static bool
+rtl_block_ends_with_condjump_p (const_basic_block bb)
+{
+ return any_condjump_p (BB_END (bb));
+}
+
+/* Return true if we need to add fake edge to exit.
+ Helper function for rtl_flow_call_edges_add. */
+
+static bool
+need_fake_edge_p (const_rtx insn)
+{
+ if (!INSN_P (insn))
+ return false;
+
+ if ((CALL_P (insn)
+ && !SIBLING_CALL_P (insn)
+ && !find_reg_note (insn, REG_NORETURN, NULL)
+ && !(RTL_CONST_OR_PURE_CALL_P (insn))))
+ return true;
+
+ return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
+ && MEM_VOLATILE_P (PATTERN (insn)))
+ || (GET_CODE (PATTERN (insn)) == PARALLEL
+ && asm_noperands (insn) != -1
+ && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
+ || GET_CODE (PATTERN (insn)) == ASM_INPUT);
+}
+
+/* Add fake edges to the function exit for any non constant and non noreturn
+ calls, volatile inline assembly in the bitmap of blocks specified by
+ BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
+ that were split.
+
+ The goal is to expose cases in which entering a basic block does not imply
+ that all subsequent instructions must be executed. */
+
+static int
+rtl_flow_call_edges_add (sbitmap blocks)
+{
+ int i;
+ int blocks_split = 0;
+ int last_bb = last_basic_block;
+ bool check_last_block = false;
+
+ if (n_basic_blocks == NUM_FIXED_BLOCKS)
+ return 0;
+
+ if (! blocks)
+ check_last_block = true;
+ else
+ check_last_block = TEST_BIT (blocks, EXIT_BLOCK_PTR->prev_bb->index);
+
+ /* In the last basic block, before epilogue generation, there will be
+ a fallthru edge to EXIT. Special care is required if the last insn
+ of the last basic block is a call because make_edge folds duplicate
+ edges, which would result in the fallthru edge also being marked
+ fake, which would result in the fallthru edge being removed by
+ remove_fake_edges, which would result in an invalid CFG.
+
+ Moreover, we can't elide the outgoing fake edge, since the block
+ profiler needs to take this into account in order to solve the minimal
+ spanning tree in the case that the call doesn't return.
+
+ Handle this by adding a dummy instruction in a new last basic block. */
+ if (check_last_block)
+ {
+ basic_block bb = EXIT_BLOCK_PTR->prev_bb;
+ rtx insn = BB_END (bb);
+
+ /* Back up past insns that must be kept in the same block as a call. */
+ while (insn != BB_HEAD (bb)
+ && keep_with_call_p (insn))
+ insn = PREV_INSN (insn);
+
+ if (need_fake_edge_p (insn))
+ {
+ edge e;
+
+ e = find_edge (bb, EXIT_BLOCK_PTR);
+ if (e)
+ {
+ insert_insn_on_edge (gen_use (const0_rtx), e);
+ commit_edge_insertions ();
+ }
+ }
+ }
+
+ /* Now add fake edges to the function exit for any non constant
+ calls since there is no way that we can determine if they will
+ return or not... */
+
+ for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
+ {
+ basic_block bb = BASIC_BLOCK (i);
+ rtx insn;
+ rtx prev_insn;
+
+ if (!bb)
+ continue;
+
+ if (blocks && !TEST_BIT (blocks, i))
+ continue;
+
+ for (insn = BB_END (bb); ; insn = prev_insn)
+ {
+ prev_insn = PREV_INSN (insn);
+ if (need_fake_edge_p (insn))
+ {
+ edge e;
+ rtx split_at_insn = insn;
+
+ /* Don't split the block between a call and an insn that should
+ remain in the same block as the call. */
+ if (CALL_P (insn))
+ while (split_at_insn != BB_END (bb)
+ && keep_with_call_p (NEXT_INSN (split_at_insn)))
+ split_at_insn = NEXT_INSN (split_at_insn);
+
+ /* The handling above of the final block before the epilogue
+ should be enough to verify that there is no edge to the exit
+ block in CFG already. Calling make_edge in such case would
+ cause us to mark that edge as fake and remove it later. */
+
+#ifdef ENABLE_CHECKING
+ if (split_at_insn == BB_END (bb))
+ {
+ e = find_edge (bb, EXIT_BLOCK_PTR);
+ gcc_assert (e == NULL);
+ }
+#endif
+
+ /* Note that the following may create a new basic block
+ and renumber the existing basic blocks. */
+ if (split_at_insn != BB_END (bb))
+ {
+ e = split_block (bb, split_at_insn);
+ if (e)
+ blocks_split++;
+ }
+
+ make_edge (bb, EXIT_BLOCK_PTR, EDGE_FAKE);
+ }
+
+ if (insn == BB_HEAD (bb))
+ break;
+ }
+ }
+
+ if (blocks_split)
+ verify_flow_info ();
+
+ return blocks_split;
+}
+
+/* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
+ the conditional branch target, SECOND_HEAD should be the fall-thru
+ there is no need to handle this here the loop versioning code handles
+ this. the reason for SECON_HEAD is that it is needed for condition
+ in trees, and this should be of the same type since it is a hook. */
+static void
+rtl_lv_add_condition_to_bb (basic_block first_head ,
+ basic_block second_head ATTRIBUTE_UNUSED,
+ basic_block cond_bb, void *comp_rtx)
+{
+ rtx label, seq, jump;
+ rtx op0 = XEXP ((rtx)comp_rtx, 0);
+ rtx op1 = XEXP ((rtx)comp_rtx, 1);
+ enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
+ enum machine_mode mode;
+
+
+ label = block_label (first_head);
+ mode = GET_MODE (op0);
+ if (mode == VOIDmode)
+ mode = GET_MODE (op1);
+
+ start_sequence ();
+ op0 = force_operand (op0, NULL_RTX);
+ op1 = force_operand (op1, NULL_RTX);
+ do_compare_rtx_and_jump (op0, op1, comp, 0,
+ mode, NULL_RTX, NULL_RTX, label);
+ jump = get_last_insn ();
+ JUMP_LABEL (jump) = label;
+ LABEL_NUSES (label)++;
+ seq = get_insns ();
+ end_sequence ();
+
+ /* Add the new cond , in the new head. */
+ emit_insn_after(seq, BB_END(cond_bb));
+}
+
+
+/* Given a block B with unconditional branch at its end, get the
+ store the return the branch edge and the fall-thru edge in
+ BRANCH_EDGE and FALLTHRU_EDGE respectively. */
+static void
+rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
+ edge *fallthru_edge)
+{
+ edge e = EDGE_SUCC (b, 0);
+
+ if (e->flags & EDGE_FALLTHRU)
+ {
+ *fallthru_edge = e;
+ *branch_edge = EDGE_SUCC (b, 1);
+ }
+ else
+ {
+ *branch_edge = e;
+ *fallthru_edge = EDGE_SUCC (b, 1);
+ }
+}
+
+void
+init_rtl_bb_info (basic_block bb)
+{
+ gcc_assert (!bb->il.rtl);
+ bb->il.rtl = GGC_CNEW (struct rtl_bb_info);
+}
+
+
+/* Add EXPR to the end of basic block BB. */
+
+rtx
+insert_insn_end_bb_new (rtx pat, basic_block bb)
+{
+ rtx insn = BB_END (bb);
+ rtx new_insn;
+ rtx pat_end = pat;
+
+ while (NEXT_INSN (pat_end) != NULL_RTX)
+ pat_end = NEXT_INSN (pat_end);
+
+ /* If the last insn is a jump, insert EXPR in front [taking care to
+ handle cc0, etc. properly]. Similarly we need to care trapping
+ instructions in presence of non-call exceptions. */
+
+ if (JUMP_P (insn)
+ || (NONJUMP_INSN_P (insn)
+ && (!single_succ_p (bb)
+ || single_succ_edge (bb)->flags & EDGE_ABNORMAL)))
+ {
+#ifdef HAVE_cc0
+ rtx note;
+#endif
+ /* If this is a jump table, then we can't insert stuff here. Since
+ we know the previous real insn must be the tablejump, we insert
+ the new instruction just before the tablejump. */
+ if (GET_CODE (PATTERN (insn)) == ADDR_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
+ insn = prev_real_insn (insn);
+
+#ifdef HAVE_cc0
+ /* FIXME: 'twould be nice to call prev_cc0_setter here but it aborts
+ if cc0 isn't set. */
+ note = find_reg_note (insn, REG_CC_SETTER, NULL_RTX);
+ if (note)
+ insn = XEXP (note, 0);
+ else
+ {
+ rtx maybe_cc0_setter = prev_nonnote_insn (insn);
+ if (maybe_cc0_setter
+ && INSN_P (maybe_cc0_setter)
+ && sets_cc0_p (PATTERN (maybe_cc0_setter)))
+ insn = maybe_cc0_setter;
+ }
+#endif
+ /* FIXME: What if something in cc0/jump uses value set in new
+ insn? */
+ new_insn = emit_insn_before_noloc (pat, insn, bb);
+ }
+
+ /* Likewise if the last insn is a call, as will happen in the presence
+ of exception handling. */
+ else if (CALL_P (insn)
+ && (!single_succ_p (bb)
+ || single_succ_edge (bb)->flags & EDGE_ABNORMAL))
+ {
+ /* Keeping in mind SMALL_REGISTER_CLASSES and parameters in registers,
+ we search backward and place the instructions before the first
+ parameter is loaded. Do this for everyone for consistency and a
+ presumption that we'll get better code elsewhere as well. */
+
+ /* Since different machines initialize their parameter registers
+ in different orders, assume nothing. Collect the set of all
+ parameter registers. */
+ insn = find_first_parameter_load (insn, BB_HEAD (bb));
+
+ /* If we found all the parameter loads, then we want to insert
+ before the first parameter load.
+
+ If we did not find all the parameter loads, then we might have
+ stopped on the head of the block, which could be a CODE_LABEL.
+ If we inserted before the CODE_LABEL, then we would be putting
+ the insn in the wrong basic block. In that case, put the insn
+ after the CODE_LABEL. Also, respect NOTE_INSN_BASIC_BLOCK. */
+ while (LABEL_P (insn)
+ || NOTE_INSN_BASIC_BLOCK_P (insn))
+ insn = NEXT_INSN (insn);
+
+ new_insn = emit_insn_before_noloc (pat, insn, bb);
+ }
+ else
+ new_insn = emit_insn_after_noloc (pat, insn, bb);
+
+ return new_insn;
+}
+
+/* Returns true if it is possible to remove edge E by redirecting
+ it to the destination of the other edge from E->src. */
+
+static bool
+rtl_can_remove_branch_p (const_edge e)
+{
+ const_basic_block src = e->src;
+ const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest;
+ const_rtx insn = BB_END (src), set;
+
+ /* The conditions are taken from try_redirect_by_replacing_jump. */
+ if (target == EXIT_BLOCK_PTR)
+ return false;
+
+ if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
+ return false;
+
+ if (find_reg_note (insn, REG_CROSSING_JUMP, NULL_RTX)
+ || BB_PARTITION (src) != BB_PARTITION (target))
+ return false;
+
+ if (!onlyjump_p (insn)
+ || tablejump_p (insn, NULL, NULL))
+ return false;
+
+ set = single_set (insn);
+ if (!set || side_effects_p (set))
+ return false;
+
+ return true;
+}
+
+/* Implementation of CFG manipulation for linearized RTL. */
+struct cfg_hooks rtl_cfg_hooks = {
+ "rtl",
+ rtl_verify_flow_info,
+ rtl_dump_bb,
+ rtl_create_basic_block,
+ rtl_redirect_edge_and_branch,
+ rtl_redirect_edge_and_branch_force,
+ rtl_can_remove_branch_p,
+ rtl_delete_block,
+ rtl_split_block,
+ rtl_move_block_after,
+ rtl_can_merge_blocks, /* can_merge_blocks_p */
+ rtl_merge_blocks,
+ rtl_predict_edge,
+ rtl_predicted_by_p,
+ NULL, /* can_duplicate_block_p */
+ NULL, /* duplicate_block */
+ rtl_split_edge,
+ rtl_make_forwarder_block,
+ rtl_tidy_fallthru_edge,
+ rtl_block_ends_with_call_p,
+ rtl_block_ends_with_condjump_p,
+ rtl_flow_call_edges_add,
+ NULL, /* execute_on_growing_pred */
+ NULL, /* execute_on_shrinking_pred */
+ NULL, /* duplicate loop for trees */
+ NULL, /* lv_add_condition_to_bb */
+ NULL, /* lv_adjust_loop_header_phi*/
+ NULL, /* extract_cond_bb_edges */
+ NULL /* flush_pending_stmts */
+};
+
+/* Implementation of CFG manipulation for cfg layout RTL, where
+ basic block connected via fallthru edges does not have to be adjacent.
+ This representation will hopefully become the default one in future
+ version of the compiler. */
+
+/* We do not want to declare these functions in a header file, since they
+ should only be used through the cfghooks interface, and we do not want to
+ move them here since it would require also moving quite a lot of related
+ code. They are in cfglayout.c. */
+extern bool cfg_layout_can_duplicate_bb_p (const_basic_block);
+extern basic_block cfg_layout_duplicate_bb (basic_block);
+
+struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
+ "cfglayout mode",
+ rtl_verify_flow_info_1,
+ rtl_dump_bb,
+ cfg_layout_create_basic_block,
+ cfg_layout_redirect_edge_and_branch,
+ cfg_layout_redirect_edge_and_branch_force,
+ rtl_can_remove_branch_p,
+ cfg_layout_delete_block,
+ cfg_layout_split_block,
+ rtl_move_block_after,
+ cfg_layout_can_merge_blocks_p,
+ cfg_layout_merge_blocks,
+ rtl_predict_edge,
+ rtl_predicted_by_p,
+ cfg_layout_can_duplicate_bb_p,
+ cfg_layout_duplicate_bb,
+ cfg_layout_split_edge,
+ rtl_make_forwarder_block,
+ NULL,
+ rtl_block_ends_with_call_p,
+ rtl_block_ends_with_condjump_p,
+ rtl_flow_call_edges_add,
+ NULL, /* execute_on_growing_pred */
+ NULL, /* execute_on_shrinking_pred */
+ duplicate_loop_to_header_edge, /* duplicate loop for trees */
+ rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
+ NULL, /* lv_adjust_loop_header_phi*/
+ rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
+ NULL /* flush_pending_stmts */
+};