1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "compiler_internals.h"
18 #include "dataflow_iterator-inl.h"
19 #include "utils/scoped_arena_containers.h"
20
21 #define NOTVISITED (-1)
22
23 namespace art {
24
ClearAllVisitedFlags()25 void MIRGraph::ClearAllVisitedFlags() {
26 AllNodesIterator iter(this);
27 for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
28 bb->visited = false;
29 }
30 }
31
NeedsVisit(BasicBlock * bb)32 BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
33 if (bb != NULL) {
34 if (bb->visited || bb->hidden) {
35 bb = NULL;
36 }
37 }
38 return bb;
39 }
40
NextUnvisitedSuccessor(BasicBlock * bb)41 BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) {
42 BasicBlock* res = NeedsVisit(GetBasicBlock(bb->fall_through));
43 if (res == NULL) {
44 res = NeedsVisit(GetBasicBlock(bb->taken));
45 if (res == NULL) {
46 if (bb->successor_block_list_type != kNotUsed) {
47 GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
48 while (true) {
49 SuccessorBlockInfo *sbi = iterator.Next();
50 if (sbi == NULL) {
51 break;
52 }
53 res = NeedsVisit(GetBasicBlock(sbi->block));
54 if (res != NULL) {
55 break;
56 }
57 }
58 }
59 }
60 }
61 return res;
62 }
63
MarkPreOrder(BasicBlock * block)64 void MIRGraph::MarkPreOrder(BasicBlock* block) {
65 block->visited = true;
66 /* Enqueue the pre_order block id */
67 if (block->id != NullBasicBlockId) {
68 dfs_order_->Insert(block->id);
69 }
70 }
71
RecordDFSOrders(BasicBlock * block)72 void MIRGraph::RecordDFSOrders(BasicBlock* block) {
73 DCHECK(temp_scoped_alloc_.get() != nullptr);
74 ScopedArenaVector<BasicBlock*> succ(temp_scoped_alloc_->Adapter());
75 MarkPreOrder(block);
76 succ.push_back(block);
77 while (!succ.empty()) {
78 BasicBlock* curr = succ.back();
79 BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
80 if (next_successor != NULL) {
81 MarkPreOrder(next_successor);
82 succ.push_back(next_successor);
83 continue;
84 }
85 curr->dfs_id = dfs_post_order_->Size();
86 if (curr->id != NullBasicBlockId) {
87 dfs_post_order_->Insert(curr->id);
88 }
89 succ.pop_back();
90 }
91 }
92
93 /* Sort the blocks by the Depth-First-Search */
ComputeDFSOrders()94 void MIRGraph::ComputeDFSOrders() {
95 /* Initialize or reset the DFS pre_order list */
96 if (dfs_order_ == NULL) {
97 dfs_order_ = new (arena_) GrowableArray<BasicBlockId>(arena_, GetNumBlocks(),
98 kGrowableArrayDfsOrder);
99 } else {
100 /* Just reset the used length on the counter */
101 dfs_order_->Reset();
102 }
103
104 /* Initialize or reset the DFS post_order list */
105 if (dfs_post_order_ == NULL) {
106 dfs_post_order_ = new (arena_) GrowableArray<BasicBlockId>(arena_, GetNumBlocks(),
107 kGrowableArrayDfsPostOrder);
108 } else {
109 /* Just reset the used length on the counter */
110 dfs_post_order_->Reset();
111 }
112
113 // Reset visited flags from all nodes
114 ClearAllVisitedFlags();
115
116 // Record dfs orders
117 RecordDFSOrders(GetEntryBlock());
118
119 num_reachable_blocks_ = dfs_order_->Size();
120
121 if (num_reachable_blocks_ != num_blocks_) {
122 // Hide all unreachable blocks.
123 AllNodesIterator iter(this);
124 for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
125 if (!bb->visited) {
126 bb->Hide(cu_);
127 }
128 }
129 }
130 }
131
132 /*
133 * Mark block bit on the per-Dalvik register vector to denote that Dalvik
134 * register idx is defined in BasicBlock bb.
135 */
FillDefBlockMatrix(BasicBlock * bb)136 bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
137 if (bb->data_flow_info == NULL) {
138 return false;
139 }
140
141 for (uint32_t idx : bb->data_flow_info->def_v->Indexes()) {
142 /* Block bb defines register idx */
143 def_block_matrix_[idx]->SetBit(bb->id);
144 }
145 return true;
146 }
147
ComputeDefBlockMatrix()148 void MIRGraph::ComputeDefBlockMatrix() {
149 int num_registers = cu_->num_dalvik_registers;
150 /* Allocate num_dalvik_registers bit vector pointers */
151 def_block_matrix_ = static_cast<ArenaBitVector**>
152 (arena_->Alloc(sizeof(ArenaBitVector *) * num_registers,
153 kArenaAllocDFInfo));
154 int i;
155
156 /* Initialize num_register vectors with num_blocks bits each */
157 for (i = 0; i < num_registers; i++) {
158 def_block_matrix_[i] =
159 new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapBMatrix);
160 }
161 AllNodesIterator iter(this);
162 for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
163 FindLocalLiveIn(bb);
164 }
165 AllNodesIterator iter2(this);
166 for (BasicBlock* bb = iter2.Next(); bb != NULL; bb = iter2.Next()) {
167 FillDefBlockMatrix(bb);
168 }
169
170 /*
171 * Also set the incoming parameters as defs in the entry block.
172 * Only need to handle the parameters for the outer method.
173 */
174 int num_regs = cu_->num_dalvik_registers;
175 int in_reg = num_regs - cu_->num_ins;
176 for (; in_reg < num_regs; in_reg++) {
177 def_block_matrix_[in_reg]->SetBit(GetEntryBlock()->id);
178 }
179 }
180
ComputeDomPostOrderTraversal(BasicBlock * bb)181 void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) {
182 if (dom_post_order_traversal_ == NULL || max_num_reachable_blocks_ < num_reachable_blocks_) {
183 // First time or too small - create the array.
184 dom_post_order_traversal_ =
185 new (arena_) GrowableArray<BasicBlockId>(arena_, num_reachable_blocks_,
186 kGrowableArrayDomPostOrderTraversal);
187 } else {
188 dom_post_order_traversal_->Reset();
189 }
190 ClearAllVisitedFlags();
191 DCHECK(temp_scoped_alloc_.get() != nullptr);
192 ScopedArenaVector<std::pair<BasicBlock*, ArenaBitVector::IndexIterator>> work_stack(
193 temp_scoped_alloc_->Adapter());
194 bb->visited = true;
195 work_stack.push_back(std::make_pair(bb, bb->i_dominated->Indexes().begin()));
196 while (!work_stack.empty()) {
197 std::pair<BasicBlock*, ArenaBitVector::IndexIterator>* curr = &work_stack.back();
198 BasicBlock* curr_bb = curr->first;
199 ArenaBitVector::IndexIterator* curr_idom_iter = &curr->second;
200 while (!curr_idom_iter->Done() && (NeedsVisit(GetBasicBlock(**curr_idom_iter)) == nullptr)) {
201 ++*curr_idom_iter;
202 }
203 // NOTE: work_stack.push_back()/pop_back() invalidate curr and curr_idom_iter.
204 if (!curr_idom_iter->Done()) {
205 BasicBlock* new_bb = GetBasicBlock(**curr_idom_iter);
206 ++*curr_idom_iter;
207 new_bb->visited = true;
208 work_stack.push_back(std::make_pair(new_bb, new_bb->i_dominated->Indexes().begin()));
209 } else {
210 // no successor/next
211 if (curr_bb->id != NullBasicBlockId) {
212 dom_post_order_traversal_->Insert(curr_bb->id);
213 }
214 work_stack.pop_back();
215
216 /* hacky loop detection */
217 if ((curr_bb->taken != NullBasicBlockId) && curr_bb->dominators->IsBitSet(curr_bb->taken)) {
218 curr_bb->nesting_depth++;
219 attributes_ |= METHOD_HAS_LOOP;
220 }
221 }
222 }
223 }
224
CheckForDominanceFrontier(BasicBlock * dom_bb,const BasicBlock * succ_bb)225 void MIRGraph::CheckForDominanceFrontier(BasicBlock* dom_bb,
226 const BasicBlock* succ_bb) {
227 /*
228 * TODO - evaluate whether phi will ever need to be inserted into exit
229 * blocks.
230 */
231 if (succ_bb->i_dom != dom_bb->id &&
232 succ_bb->block_type == kDalvikByteCode &&
233 succ_bb->hidden == false) {
234 dom_bb->dom_frontier->SetBit(succ_bb->id);
235 }
236 }
237
238 /* Worker function to compute the dominance frontier */
ComputeDominanceFrontier(BasicBlock * bb)239 bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) {
240 /* Calculate DF_local */
241 if (bb->taken != NullBasicBlockId) {
242 CheckForDominanceFrontier(bb, GetBasicBlock(bb->taken));
243 }
244 if (bb->fall_through != NullBasicBlockId) {
245 CheckForDominanceFrontier(bb, GetBasicBlock(bb->fall_through));
246 }
247 if (bb->successor_block_list_type != kNotUsed) {
248 GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
249 while (true) {
250 SuccessorBlockInfo *successor_block_info = iterator.Next();
251 if (successor_block_info == NULL) {
252 break;
253 }
254 BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
255 CheckForDominanceFrontier(bb, succ_bb);
256 }
257 }
258
259 /* Calculate DF_up */
260 for (uint32_t dominated_idx : bb->i_dominated->Indexes()) {
261 BasicBlock* dominated_bb = GetBasicBlock(dominated_idx);
262 for (uint32_t df_up_block_idx : dominated_bb->dom_frontier->Indexes()) {
263 BasicBlock* df_up_block = GetBasicBlock(df_up_block_idx);
264 CheckForDominanceFrontier(bb, df_up_block);
265 }
266 }
267
268 return true;
269 }
270
271 /* Worker function for initializing domination-related data structures */
InitializeDominationInfo(BasicBlock * bb)272 void MIRGraph::InitializeDominationInfo(BasicBlock* bb) {
273 int num_total_blocks = GetBasicBlockListCount();
274
275 if (bb->dominators == NULL) {
276 bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
277 false /* expandable */, kBitMapDominators);
278 bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
279 false /* expandable */, kBitMapIDominated);
280 bb->dom_frontier = new (arena_) ArenaBitVector(arena_, num_total_blocks,
281 false /* expandable */, kBitMapDomFrontier);
282 } else {
283 bb->dominators->ClearAllBits();
284 bb->i_dominated->ClearAllBits();
285 bb->dom_frontier->ClearAllBits();
286 }
287 /* Set all bits in the dominator vector */
288 bb->dominators->SetInitialBits(num_total_blocks);
289
290 return;
291 }
292
293 /*
294 * Walk through the ordered i_dom list until we reach common parent.
295 * Given the ordering of i_dom_list, this common parent represents the
296 * last element of the intersection of block1 and block2 dominators.
297 */
FindCommonParent(int block1,int block2)298 int MIRGraph::FindCommonParent(int block1, int block2) {
299 while (block1 != block2) {
300 while (block1 < block2) {
301 block1 = i_dom_list_[block1];
302 DCHECK_NE(block1, NOTVISITED);
303 }
304 while (block2 < block1) {
305 block2 = i_dom_list_[block2];
306 DCHECK_NE(block2, NOTVISITED);
307 }
308 }
309 return block1;
310 }
311
312 /* Worker function to compute each block's immediate dominator */
ComputeblockIDom(BasicBlock * bb)313 bool MIRGraph::ComputeblockIDom(BasicBlock* bb) {
314 /* Special-case entry block */
315 if ((bb->id == NullBasicBlockId) || (bb == GetEntryBlock())) {
316 return false;
317 }
318
319 /* Iterate through the predecessors */
320 GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
321
322 /* Find the first processed predecessor */
323 int idom = -1;
324 while (true) {
325 BasicBlock* pred_bb = GetBasicBlock(iter.Next());
326 CHECK(pred_bb != NULL);
327 if (i_dom_list_[pred_bb->dfs_id] != NOTVISITED) {
328 idom = pred_bb->dfs_id;
329 break;
330 }
331 }
332
333 /* Scan the rest of the predecessors */
334 while (true) {
335 BasicBlock* pred_bb = GetBasicBlock(iter.Next());
336 if (!pred_bb) {
337 break;
338 }
339 if (i_dom_list_[pred_bb->dfs_id] == NOTVISITED) {
340 continue;
341 } else {
342 idom = FindCommonParent(pred_bb->dfs_id, idom);
343 }
344 }
345
346 DCHECK_NE(idom, NOTVISITED);
347
348 /* Did something change? */
349 if (i_dom_list_[bb->dfs_id] != idom) {
350 i_dom_list_[bb->dfs_id] = idom;
351 return true;
352 }
353 return false;
354 }
355
356 /* Worker function to compute each block's domintors */
ComputeBlockDominators(BasicBlock * bb)357 bool MIRGraph::ComputeBlockDominators(BasicBlock* bb) {
358 if (bb == GetEntryBlock()) {
359 bb->dominators->ClearAllBits();
360 } else {
361 bb->dominators->Copy(GetBasicBlock(bb->i_dom)->dominators);
362 }
363 bb->dominators->SetBit(bb->id);
364 return false;
365 }
366
SetDominators(BasicBlock * bb)367 bool MIRGraph::SetDominators(BasicBlock* bb) {
368 if (bb != GetEntryBlock()) {
369 int idom_dfs_idx = i_dom_list_[bb->dfs_id];
370 DCHECK_NE(idom_dfs_idx, NOTVISITED);
371 int i_dom_idx = dfs_post_order_->Get(idom_dfs_idx);
372 BasicBlock* i_dom = GetBasicBlock(i_dom_idx);
373 bb->i_dom = i_dom->id;
374 /* Add bb to the i_dominated set of the immediate dominator block */
375 i_dom->i_dominated->SetBit(bb->id);
376 }
377 return false;
378 }
379
380 /* Compute dominators, immediate dominator, and dominance fronter */
ComputeDominators()381 void MIRGraph::ComputeDominators() {
382 int num_reachable_blocks = num_reachable_blocks_;
383
384 /* Initialize domination-related data structures */
385 PreOrderDfsIterator iter(this);
386 for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
387 InitializeDominationInfo(bb);
388 }
389
390 /* Initialize & Clear i_dom_list */
391 if (max_num_reachable_blocks_ < num_reachable_blocks_) {
392 i_dom_list_ = static_cast<int*>(arena_->Alloc(sizeof(int) * num_reachable_blocks,
393 kArenaAllocDFInfo));
394 }
395 for (int i = 0; i < num_reachable_blocks; i++) {
396 i_dom_list_[i] = NOTVISITED;
397 }
398
399 /* For post-order, last block is entry block. Set its i_dom to istelf */
400 DCHECK_EQ(GetEntryBlock()->dfs_id, num_reachable_blocks-1);
401 i_dom_list_[GetEntryBlock()->dfs_id] = GetEntryBlock()->dfs_id;
402
403 /* Compute the immediate dominators */
404 RepeatingReversePostOrderDfsIterator iter2(this);
405 bool change = false;
406 for (BasicBlock* bb = iter2.Next(false); bb != NULL; bb = iter2.Next(change)) {
407 change = ComputeblockIDom(bb);
408 }
409
410 /* Set the dominator for the root node */
411 GetEntryBlock()->dominators->ClearAllBits();
412 GetEntryBlock()->dominators->SetBit(GetEntryBlock()->id);
413
414 GetEntryBlock()->i_dom = 0;
415
416 PreOrderDfsIterator iter3(this);
417 for (BasicBlock* bb = iter3.Next(); bb != NULL; bb = iter3.Next()) {
418 SetDominators(bb);
419 }
420
421 ReversePostOrderDfsIterator iter4(this);
422 for (BasicBlock* bb = iter4.Next(); bb != NULL; bb = iter4.Next()) {
423 ComputeBlockDominators(bb);
424 }
425
426 // Compute the dominance frontier for each block.
427 ComputeDomPostOrderTraversal(GetEntryBlock());
428 PostOrderDOMIterator iter5(this);
429 for (BasicBlock* bb = iter5.Next(); bb != NULL; bb = iter5.Next()) {
430 ComputeDominanceFrontier(bb);
431 }
432 }
433
434 /*
435 * Perform dest U= src1 ^ ~src2
436 * This is probably not general enough to be placed in BitVector.[ch].
437 */
ComputeSuccLineIn(ArenaBitVector * dest,const ArenaBitVector * src1,const ArenaBitVector * src2)438 void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
439 const ArenaBitVector* src2) {
440 if (dest->GetStorageSize() != src1->GetStorageSize() ||
441 dest->GetStorageSize() != src2->GetStorageSize() ||
442 dest->IsExpandable() != src1->IsExpandable() ||
443 dest->IsExpandable() != src2->IsExpandable()) {
444 LOG(FATAL) << "Incompatible set properties";
445 }
446
447 unsigned int idx;
448 for (idx = 0; idx < dest->GetStorageSize(); idx++) {
449 dest->GetRawStorage()[idx] |= src1->GetRawStorageWord(idx) & ~(src2->GetRawStorageWord(idx));
450 }
451 }
452
453 /*
454 * Iterate through all successor blocks and propagate up the live-in sets.
455 * The calculated result is used for phi-node pruning - where we only need to
456 * insert a phi node if the variable is live-in to the block.
457 */
ComputeBlockLiveIns(BasicBlock * bb)458 bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
459 DCHECK_EQ(temp_bit_vector_size_, cu_->num_dalvik_registers);
460 ArenaBitVector* temp_dalvik_register_v = temp_bit_vector_;
461
462 if (bb->data_flow_info == NULL) {
463 return false;
464 }
465 temp_dalvik_register_v->Copy(bb->data_flow_info->live_in_v);
466 BasicBlock* bb_taken = GetBasicBlock(bb->taken);
467 BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
468 if (bb_taken && bb_taken->data_flow_info)
469 ComputeSuccLineIn(temp_dalvik_register_v, bb_taken->data_flow_info->live_in_v,
470 bb->data_flow_info->def_v);
471 if (bb_fall_through && bb_fall_through->data_flow_info)
472 ComputeSuccLineIn(temp_dalvik_register_v, bb_fall_through->data_flow_info->live_in_v,
473 bb->data_flow_info->def_v);
474 if (bb->successor_block_list_type != kNotUsed) {
475 GrowableArray<SuccessorBlockInfo*>::Iterator iterator(bb->successor_blocks);
476 while (true) {
477 SuccessorBlockInfo *successor_block_info = iterator.Next();
478 if (successor_block_info == NULL) {
479 break;
480 }
481 BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
482 if (succ_bb->data_flow_info) {
483 ComputeSuccLineIn(temp_dalvik_register_v, succ_bb->data_flow_info->live_in_v,
484 bb->data_flow_info->def_v);
485 }
486 }
487 }
488 if (!temp_dalvik_register_v->Equal(bb->data_flow_info->live_in_v)) {
489 bb->data_flow_info->live_in_v->Copy(temp_dalvik_register_v);
490 return true;
491 }
492 return false;
493 }
494
495 /* Insert phi nodes to for each variable to the dominance frontiers */
InsertPhiNodes()496 void MIRGraph::InsertPhiNodes() {
497 int dalvik_reg;
498 ArenaBitVector* phi_blocks = new (temp_scoped_alloc_.get()) ArenaBitVector(
499 temp_scoped_alloc_.get(), GetNumBlocks(), false, kBitMapPhi);
500 ArenaBitVector* input_blocks = new (temp_scoped_alloc_.get()) ArenaBitVector(
501 temp_scoped_alloc_.get(), GetNumBlocks(), false, kBitMapInputBlocks);
502
503 RepeatingPostOrderDfsIterator iter(this);
504 bool change = false;
505 for (BasicBlock* bb = iter.Next(false); bb != NULL; bb = iter.Next(change)) {
506 change = ComputeBlockLiveIns(bb);
507 }
508
509 /* Iterate through each Dalvik register */
510 for (dalvik_reg = cu_->num_dalvik_registers - 1; dalvik_reg >= 0; dalvik_reg--) {
511 input_blocks->Copy(def_block_matrix_[dalvik_reg]);
512 phi_blocks->ClearAllBits();
513 do {
514 // TUNING: When we repeat this, we could skip indexes from the previous pass.
515 for (uint32_t idx : input_blocks->Indexes()) {
516 BasicBlock* def_bb = GetBasicBlock(idx);
517 if (def_bb->dom_frontier != nullptr) {
518 phi_blocks->Union(def_bb->dom_frontier);
519 }
520 }
521 } while (input_blocks->Union(phi_blocks));
522
523 /*
524 * Insert a phi node for dalvik_reg in the phi_blocks if the Dalvik
525 * register is in the live-in set.
526 */
527 for (uint32_t idx : phi_blocks->Indexes()) {
528 BasicBlock* phi_bb = GetBasicBlock(idx);
529 /* Variable will be clobbered before being used - no need for phi */
530 if (!phi_bb->data_flow_info->live_in_v->IsBitSet(dalvik_reg)) {
531 continue;
532 }
533 MIR *phi = NewMIR();
534 phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
535 phi->dalvikInsn.vA = dalvik_reg;
536 phi->offset = phi_bb->start_offset;
537 phi->m_unit_index = 0; // Arbitrarily assign all Phi nodes to outermost method.
538 phi_bb->PrependMIR(phi);
539 }
540 }
541 }
542
543 /*
544 * Worker function to insert phi-operands with latest SSA names from
545 * predecessor blocks
546 */
InsertPhiNodeOperands(BasicBlock * bb)547 bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
548 /* Phi nodes are at the beginning of each block */
549 for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
550 if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
551 return true;
552 int ssa_reg = mir->ssa_rep->defs[0];
553 DCHECK_GE(ssa_reg, 0); // Shouldn't see compiler temps here
554 int v_reg = SRegToVReg(ssa_reg);
555
556 /* Iterate through the predecessors */
557 GrowableArray<BasicBlockId>::Iterator iter(bb->predecessors);
558 size_t num_uses = bb->predecessors->Size();
559 AllocateSSAUseData(mir, num_uses);
560 int* uses = mir->ssa_rep->uses;
561 BasicBlockId* incoming =
562 static_cast<BasicBlockId*>(arena_->Alloc(sizeof(BasicBlockId) * num_uses,
563 kArenaAllocDFInfo));
564 mir->meta.phi_incoming = incoming;
565 int idx = 0;
566 while (true) {
567 BasicBlock* pred_bb = GetBasicBlock(iter.Next());
568 if (!pred_bb) {
569 break;
570 }
571 int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
572 uses[idx] = ssa_reg;
573 incoming[idx] = pred_bb->id;
574 idx++;
575 }
576 }
577
578 return true;
579 }
580
DoDFSPreOrderSSARename(BasicBlock * block)581 void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) {
582 if (block->visited || block->hidden) {
583 return;
584 }
585 block->visited = true;
586
587 /* Process this block */
588 DoSSAConversion(block);
589 int map_size = sizeof(int) * cu_->num_dalvik_registers;
590
591 /* Save SSA map snapshot */
592 ScopedArenaAllocator allocator(&cu_->arena_stack);
593 int* saved_ssa_map =
594 static_cast<int*>(allocator.Alloc(map_size, kArenaAllocDalvikToSSAMap));
595 memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
596
597 if (block->fall_through != NullBasicBlockId) {
598 DoDFSPreOrderSSARename(GetBasicBlock(block->fall_through));
599 /* Restore SSA map snapshot */
600 memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
601 }
602 if (block->taken != NullBasicBlockId) {
603 DoDFSPreOrderSSARename(GetBasicBlock(block->taken));
604 /* Restore SSA map snapshot */
605 memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
606 }
607 if (block->successor_block_list_type != kNotUsed) {
608 GrowableArray<SuccessorBlockInfo*>::Iterator iterator(block->successor_blocks);
609 while (true) {
610 SuccessorBlockInfo *successor_block_info = iterator.Next();
611 if (successor_block_info == NULL) {
612 break;
613 }
614 BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
615 DoDFSPreOrderSSARename(succ_bb);
616 /* Restore SSA map snapshot */
617 memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
618 }
619 }
620 return;
621 }
622
623 } // namespace art
624