1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "parallel_move_resolver.h"
18 #include "nodes.h"
19 #include "locations.h"
20
21 namespace art {
22
EmitNativeCode(HParallelMove * parallel_move)23 void ParallelMoveResolver::EmitNativeCode(HParallelMove* parallel_move) {
24 DCHECK(moves_.IsEmpty());
25 // Build up a worklist of moves.
26 BuildInitialMoveList(parallel_move);
27
28 for (size_t i = 0; i < moves_.Size(); ++i) {
29 const MoveOperands& move = *moves_.Get(i);
30 // Skip constants to perform them last. They don't block other moves
31 // and skipping such moves with register destinations keeps those
32 // registers free for the whole algorithm.
33 if (!move.IsEliminated() && !move.GetSource().IsConstant()) {
34 PerformMove(i);
35 }
36 }
37
38 // Perform the moves with constant sources.
39 for (size_t i = 0; i < moves_.Size(); ++i) {
40 const MoveOperands& move = *moves_.Get(i);
41 if (!move.IsEliminated()) {
42 DCHECK(move.GetSource().IsConstant());
43 EmitMove(i);
44 }
45 }
46
47 moves_.Reset();
48 }
49
50
BuildInitialMoveList(HParallelMove * parallel_move)51 void ParallelMoveResolver::BuildInitialMoveList(HParallelMove* parallel_move) {
52 // Perform a linear sweep of the moves to add them to the initial list of
53 // moves to perform, ignoring any move that is redundant (the source is
54 // the same as the destination, the destination is ignored and
55 // unallocated, or the move was already eliminated).
56 for (size_t i = 0; i < parallel_move->NumMoves(); ++i) {
57 MoveOperands* move = parallel_move->MoveOperandsAt(i);
58 if (!move->IsRedundant()) {
59 moves_.Add(move);
60 }
61 }
62 }
63
64
PerformMove(size_t index)65 void ParallelMoveResolver::PerformMove(size_t index) {
66 // Each call to this function performs a move and deletes it from the move
67 // graph. We first recursively perform any move blocking this one. We
68 // mark a move as "pending" on entry to PerformMove in order to detect
69 // cycles in the move graph. We use operand swaps to resolve cycles,
70 // which means that a call to PerformMove could change any source operand
71 // in the move graph.
72
73 DCHECK(!moves_.Get(index)->IsPending());
74 DCHECK(!moves_.Get(index)->IsRedundant());
75
76 // Clear this move's destination to indicate a pending move. The actual
77 // destination is saved in a stack-allocated local. Recursion may allow
78 // multiple moves to be pending.
79 DCHECK(!moves_.Get(index)->GetSource().IsInvalid());
80 Location destination = moves_.Get(index)->MarkPending();
81
82 // Perform a depth-first traversal of the move graph to resolve
83 // dependencies. Any unperformed, unpending move with a source the same
84 // as this one's destination blocks this one so recursively perform all
85 // such moves.
86 for (size_t i = 0; i < moves_.Size(); ++i) {
87 const MoveOperands& other_move = *moves_.Get(i);
88 if (other_move.Blocks(destination) && !other_move.IsPending()) {
89 // Though PerformMove can change any source operand in the move graph,
90 // this call cannot create a blocking move via a swap (this loop does
91 // not miss any). Assume there is a non-blocking move with source A
92 // and this move is blocked on source B and there is a swap of A and
93 // B. Then A and B must be involved in the same cycle (or they would
94 // not be swapped). Since this move's destination is B and there is
95 // only a single incoming edge to an operand, this move must also be
96 // involved in the same cycle. In that case, the blocking move will
97 // be created but will be "pending" when we return from PerformMove.
98 PerformMove(i);
99 }
100 }
101 MoveOperands* move = moves_.Get(index);
102
103 // We are about to resolve this move and don't need it marked as
104 // pending, so restore its destination.
105 move->ClearPending(destination);
106
107 // This move's source may have changed due to swaps to resolve cycles and
108 // so it may now be the last move in the cycle. If so remove it.
109 if (move->GetSource().Equals(destination)) {
110 move->Eliminate();
111 return;
112 }
113
114 // The move may be blocked on a (at most one) pending move, in which case
115 // we have a cycle. Search for such a blocking move and perform a swap to
116 // resolve it.
117 bool do_swap = false;
118 for (size_t i = 0; i < moves_.Size(); ++i) {
119 const MoveOperands& other_move = *moves_.Get(i);
120 if (other_move.Blocks(destination)) {
121 DCHECK(other_move.IsPending());
122 do_swap = true;
123 break;
124 }
125 }
126
127 if (do_swap) {
128 EmitSwap(index);
129 // Any unperformed (including pending) move with a source of either
130 // this move's source or destination needs to have their source
131 // changed to reflect the state of affairs after the swap.
132 Location source = move->GetSource();
133 Location destination = move->GetDestination();
134 move->Eliminate();
135 for (size_t i = 0; i < moves_.Size(); ++i) {
136 const MoveOperands& other_move = *moves_.Get(i);
137 if (other_move.Blocks(source)) {
138 moves_.Get(i)->SetSource(destination);
139 } else if (other_move.Blocks(destination)) {
140 moves_.Get(i)->SetSource(source);
141 }
142 }
143 } else {
144 // This move is not blocked.
145 EmitMove(index);
146 move->Eliminate();
147 }
148 }
149
IsScratchLocation(Location loc)150 bool ParallelMoveResolver::IsScratchLocation(Location loc) {
151 for (size_t i = 0; i < moves_.Size(); ++i) {
152 if (moves_.Get(i)->Blocks(loc)) {
153 return false;
154 }
155 }
156
157 for (size_t i = 0; i < moves_.Size(); ++i) {
158 if (moves_.Get(i)->GetDestination().Equals(loc)) {
159 return true;
160 }
161 }
162
163 return false;
164 }
165
AllocateScratchRegister(int blocked,int register_count,int if_scratch,bool * spilled)166 int ParallelMoveResolver::AllocateScratchRegister(int blocked,
167 int register_count,
168 int if_scratch,
169 bool* spilled) {
170 DCHECK_NE(blocked, if_scratch);
171 int scratch = -1;
172 for (int reg = 0; reg < register_count; ++reg) {
173 if ((blocked != reg) &&
174 IsScratchLocation(Location::RegisterLocation(ManagedRegister(reg)))) {
175 scratch = reg;
176 break;
177 }
178 }
179
180 if (scratch == -1) {
181 *spilled = true;
182 scratch = if_scratch;
183 } else {
184 *spilled = false;
185 }
186
187 return scratch;
188 }
189
190
ScratchRegisterScope(ParallelMoveResolver * resolver,int blocked,int if_scratch,int number_of_registers)191 ParallelMoveResolver::ScratchRegisterScope::ScratchRegisterScope(
192 ParallelMoveResolver* resolver, int blocked, int if_scratch, int number_of_registers)
193 : resolver_(resolver),
194 reg_(kNoRegister),
195 spilled_(false) {
196 reg_ = resolver_->AllocateScratchRegister(blocked, number_of_registers, if_scratch, &spilled_);
197
198 if (spilled_) {
199 resolver->SpillScratch(reg_);
200 }
201 }
202
203
~ScratchRegisterScope()204 ParallelMoveResolver::ScratchRegisterScope::~ScratchRegisterScope() {
205 if (spilled_) {
206 resolver_->RestoreScratch(reg_);
207 }
208 }
209
210 } // namespace art
211