?? loop.c
字號:
{ scan_start = loop_top; loop_top = 0; } else /* We really do enter with a jump; scan the loop from the place where the jump jumps to. */ scan_start = p; } /* Count number of times each reg is set during this loop. Set may_not_optimize[I] if it is not safe to move out the setting of register I. */ bzero (n_times_set, nregs * sizeof (short)); bzero (may_not_optimize, nregs); count_loop_regs_set (loop_top ? loop_top : loop_start, end, may_not_optimize, &insn_count, nregs); for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) may_not_optimize[i] = 1, n_times_set[i] = 1; bcopy (n_times_set, n_times_used, nregs * sizeof (short)); if (loop_dump_stream) { fprintf (loop_dump_stream, "\nLoop from %d to %d: %d real insns.\n", INSN_UID (loop_start), INSN_UID (end), insn_count); if (loop_continue) fprintf (loop_dump_stream, "Continue at insn %d.\n", INSN_UID (loop_continue)); } /* Scan through the loop finding insns that are safe to move. In each such insn, store QImode as the mode, to mark it. Then set n_times_set to -1 for the reg being set, so that this reg will be considered invariant for subsequent insns. We consider whether subsequent insns use the reg in deciding whether it is worth actually moving. MAYBE_NEVER is nonzero if we have passed a conditional jump insn and therefore it is possible that the insns we are scanning would never be executed. At such times, we must make sure that it is safe to execute the insn once instead of zero times. When MAYBE_NEVER is 0, all insns will be executed at least once so that is not a problem. */ p = scan_start; while (1) { p = NEXT_INSN (p); /* At end of a straight-in loop, we are done. At end of a loop entered at the bottom, scan the top. */ if (p == scan_start) break; if (p == end) { if (loop_top != 0) p = NEXT_INSN (loop_top); else break; if (p == scan_start) break; } if (GET_CODE (p) == INSN && GET_CODE (PATTERN (p)) == SET && GET_CODE (SET_DEST (PATTERN (p))) == REG && ! may_not_optimize[REGNO (SET_DEST (PATTERN (p)))]) { int tem1 = 0; /* Don't try to optimize a register that was made by loop-optimization for an inner loop. We don't know its life-span, so we can't compute the benefit. */ if (REGNO (SET_DEST (PATTERN (p))) >= old_max_reg) ; /* IN order to move a register, we need to have one of three cases: (1) it is used only in the same basic block as the set (2) it is not a user variable. (3) the set is guaranteed to be executed once the loop starts, and the reg is not used until after that. */ else if (! ((! maybe_never && ! loop_reg_used_before_p (p, loop_start, scan_start, end)) || ! REG_USERVAR_P (SET_DEST (PATTERN (p))) || reg_in_basic_block_p (p, SET_DEST (PATTERN (p))))) ; else if (((tem = invariant_p (SET_SRC (PATTERN (p)))) || ((temp = loop_find_reg_equal (p)) && (tem = invariant_p (XEXP (temp, 0))))) && (n_times_set[REGNO (SET_DEST (PATTERN (p)))] == 1 || (tem1 = consec_sets_invariant_p (SET_DEST (PATTERN (p)), n_times_set[REGNO (SET_DEST (PATTERN (p)))], p))) /* If the insn can cause a trap (such as divide by zero), can't move it unless it's guaranteed to be executed once loop is entered. Even a function call might prevent the trap insn from being reached (since it might exit!) */ && ! ((maybe_never || call_passed) && (may_trap_p (SET_SRC (PATTERN (p))) || ((temp = loop_find_reg_equal (p)) && may_trap_p (XEXP (temp, 0)))))) { register struct movable *m; register int regno = REGNO (SET_DEST (PATTERN (p))); int count; m = (struct movable *) alloca (sizeof (struct movable)); m->next = 0; m->insn = p; temp = loop_find_reg_equal (p); if (temp) m->set_src = XEXP (temp, 0); else m->set_src = SET_SRC (PATTERN (p)); m->force = 0; m->consec = n_times_set[REGNO (SET_DEST (PATTERN (p)))] - 1; m->done = 0; m->forces = 0; m->partial = 0; m->savemode = VOIDmode; m->regno = regno; /* Set M->cond if either invariant_p or consec_sets_invariant_p returned 2 (only conditionally invariant). */ m->cond = ((tem | tem1) > 1); m->global = (uid_luid[regno_last_uid[regno]] > INSN_LUID (end) || uid_luid[regno_first_uid[regno]] < INSN_LUID (loop_start)); m->match = 0; m->lifetime = (uid_luid[regno_last_uid[regno]] - uid_luid[regno_first_uid[regno]]); m->savings = n_times_used[regno]; n_times_set[regno] = -1; /* Add M to the end of the chain MOVABLES. */ if (movables == 0) movables = m; else last_movable->next = m; last_movable = m; if (m->consec > 0) { /* Skip this insn, not checking REG_LIBCALL notes. */ p = NEXT_INSN (p); /* Skip the consecutive insns, if there are any. */ p = skip_consec_insns (p, m->consec); /* Back up, so the main loop will advance to the right place. */ p = PREV_INSN (p); } } /* If this register is always set within a STRICT_LOW_PART or set to zero, then its high bytes are constant. So clear them outside the loop and within the loop just load the low bytes. We must check that the machine has an instruction to do so. Also, if the value loaded into the register depends on the same register, this cannot be done. */ else if (SET_SRC (PATTERN (p)) == const0_rtx && GET_CODE (NEXT_INSN (p)) == INSN && GET_CODE (PATTERN (NEXT_INSN (p))) == SET && (GET_CODE (SET_DEST (PATTERN (NEXT_INSN (p)))) == STRICT_LOW_PART) && (GET_CODE (XEXP (SET_DEST (PATTERN (NEXT_INSN (p))), 0)) == SUBREG) && (SUBREG_REG (XEXP (SET_DEST (PATTERN (NEXT_INSN (p))), 0)) == SET_DEST (PATTERN (p))) && !reg_mentioned_p (SET_DEST (PATTERN (p)), SET_SRC (PATTERN (NEXT_INSN (p))))) { register int regno = REGNO (SET_DEST (PATTERN (p))); if (n_times_set[regno] == 2) { register struct movable *m; int count; m = (struct movable *) alloca (sizeof (struct movable)); m->next = 0; m->insn = p; m->force = 0; m->consec = 0; m->done = 0; m->forces = 0; m->partial = 1; /* If the insn may not be executed on some cycles, we can't clear the whole reg; clear just high part. Not even if the reg is used only within this loop. Consider this: while (1) while (s != t) { if (foo ()) x = *s; use (x); } Clearing x before the inner loop could clobber a value being saved from the last time around the outer loop. However, if the reg is not used outside this loop and all uses of the register are in the same basic block as the store, there is no problem. */ m->global = (uid_luid[regno_last_uid[regno]] > INSN_LUID (end) || uid_luid[regno_first_uid[regno]] < INSN_LUID (p) || (labels_in_range_p (p, uid_luid[regno_first_uid[regno]]))); if (maybe_never && m->global) m->savemode = GET_MODE (SET_SRC (PATTERN (NEXT_INSN (p)))); else m->savemode = VOIDmode; m->regno = regno; m->cond = 0; m->match = 0; m->lifetime = (uid_luid[regno_last_uid[regno]] - uid_luid[regno_first_uid[regno]]); m->savings = 1; n_times_set[regno] = -1; /* Add M to the end of the chain MOVABLES. */ if (movables == 0) movables = m; else last_movable->next = m; last_movable = m; } } } /* Past a call insn, we get to insns which might not be executed because the call might exit. This matters for insns that trap. */ else if (GET_CODE (p) == CALL_INSN) call_passed = 1; /* Past a label or a jump, we get to insns for which we can't count on whether or how many times they will be executed during each iteration. Therefore, we can only move out sets of trivial variables (those not used after the loop). */ else if ((GET_CODE (p) == CODE_LABEL || GET_CODE (p) == JUMP_INSN) /* If we enter the loop in the middle, and scan around to the beginning, don't set maybe_never for that. */ && ! (NEXT_INSN (p) == end && GET_CODE (p) == JUMP_INSN && simplejump_p (p))) maybe_never = 1; } /* If one movable subsumes another, ignore that other. */ ignore_some_movables (movables); /* For each movable insn, see if the reg that it loads leads when it dies right into another conditionally movable insn. If so, record that the second insn "forces" the first one, since the second can be moved only if the first is. */ force_movables (movables); /* See if there are multiple movable insns that load the same value. If there are, make all but the first point at the first one through the `match' field, and add the priorities of them all together as the priority of the first. */ combine_movables (movables, nregs); /* Now consider each movable insn to decide whether it is worth moving. Store 0 in n_times_set for each reg that is moved. */ move_movables (movables, threshold, insn_count, loop_start, end, nregs); /* Now candidates that still have -1 are those not moved. Change n_times_set to indicate that those are not actually invariant. */ for (i = 0; i < nregs; i++) if (n_times_set[i] < 0) n_times_set[i] = n_times_used[i]; if (flag_strength_reduce) strength_reduce (scan_start, end, loop_top, insn_count, loop_start, end, nregs);}/* Return 1 if all uses of REG are between INSN and the end of the basic block. */static int reg_in_basic_block_p (insn, reg) rtx insn, reg;{ int regno = REGNO (reg); rtx p; if (regno_first_uid[regno] != INSN_UID (insn)) return 0; /* Search this basic block for the already recorded last use of the reg. */ for (p = insn; p; p = NEXT_INSN (p)) { switch (GET_CODE (p)) { case NOTE: break; case INSN: case CALL_INSN: /* Ordinary insn: if this is the last use, we win. */ if (regno_last_uid[regno] == INSN_UID (p)) return 1; break; case JUMP_INSN: /* Jump insn: if this is the last use, we win. */ if (regno_last_uid[regno] == INSN_UID (p)) return 1; /* Otherwise, it's the end of the basic block, so we lose. */ return 0; case CODE_LABEL: case BARRIER: /* It's the end of the basic block, so we lose. */ return 0; } } /* The "last use" doesn't follow the "first use"?? */ abort ();}/* Skip COUNT insns from INSN, counting library calls as 1 insn. */static rtxskip_consec_insns (insn, count) rtx insn; int count;{ for (; count > 0; count--) { if (GET_CODE (insn) == NOTE) insn = NEXT_INSN (insn); else if (GET_CODE (insn) == BARRIER || GET_CODE (insn) == CODE_LABEL) abort (); else { rtx i1, temp; /* If first insn of gnulib call sequence, skip to end. */ /* Do this at start of loop, since INSN is guaranteed to be an insn here. */ if (temp = find_reg_note (insn, REG_LIBCALL, 0)) insn = XEXP (temp, 0); do insn = NEXT_INSN (insn); while (GET_CODE (insn) == NOTE); } } return insn;}/* Find a REG_EQUAL note in INSN but only if it is safe to use for our purposes. Those put in by CSE are not safe since they may fail to use the registers that appear in the actual insn source. */static rtxloop_find_reg_equal (insn) rtx insn;{ return (find_reg_note (insn, REG_RETVAL, 0) ? find_reg_note (insn, REG_EQUAL, 0) : 0);}/* Ignore any movable whose insn falls within a libcall which is part of another movable. We make use of the fact that the movable for the libcall value was made later and so appears later on the chain. */static voidignore_some_movables (movables) struct movable *movables;{ register struct movable *m, *m1; for (m = movables; m; m = m->next) { /* Is this a movable for the value of a libcall? */ rtx note = find_reg_note (m->insn, REG_RETVAL, 0); if (note) { /* Find the beginning of that libcall. */ rtx first_insn = XEXP (note, 0); /* Check for earlier movables inside that range, and mark them invalid. */ for (m1 = movables; m1 != m; m1 = m1->next) if (INSN_LUID (m1->insn) >= INSN_LUID (first_insn) && INSN_LUID (m1->insn) < INSN_LUID (m->insn)) m1->done = 1; } }} /* For each movable insn, see if the reg that it loads leads when it dies right into another conditionally movable insn. If so, record that the second insn "forces" the first one, since the second can be moved only if the first is. */static voidforce_movables (movables) struct movable *movables;{ register struct movable *m, *m1; for (m1 = movables; m1; m1 = m1->next) /* Omit this if moving just the (SET (REG) 0) of a zero-extend. */ if (!m1->partial && !m1->done) { int regno = m1->regno; for (m = m1->next; m; m = m->next) /* ??? Could this be a bug? What if CSE caused the register of M1 to be used after this insn? Since CSE does not update regno_last_uid, this insn M->insn might not be where it dies. But very likely this doesn't matter; what matters is that M's reg is computed from M1's reg. */ if (INSN_UID (m->insn) == regno_last_uid[regno] && !m->done) break; if (m != 0 && m->set_src == SET_DEST (PATTERN (m1->insn))) m = 0; /* Increase the priority of the moving the first insn since it permits the second to be moved as well. */ if (m != 0) { m->forces = m1; m1->lifetime += m->lifetime; m1->savings += m1->savings; } }}/* Find invariant expressions that are equal and can be combined into one register. */static voidcombine_movables (movables, nregs) struct movable *movables; int nregs;{ register struct movable *m; char *matched_regs = (char *) alloca (nregs); enum machine_mode mode; /* Regs that are set more than once are not allowed to match or be matched. I'm no longer sure why not. */ /* Perhaps testing m->consec_sets would be more appropriate here? */ for (m = movables; m; m = m->next)
?? 快捷鍵說明
復制代碼
Ctrl + C
搜索代碼
Ctrl + F
全屏模式
F11
切換主題
Ctrl + Shift + D
顯示快捷鍵
?
增大字號
Ctrl + =
減小字號
Ctrl + -