1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "base/bit_vector-inl.h"
18 #include "base/logging.h"
19 #include "base/scoped_arena_containers.h"
20 #include "compiler_ir.h"
21 #include "dataflow_iterator-inl.h"
22
23 #define NOTVISITED (-1)
24
25 namespace art {
26
ClearAllVisitedFlags()27 void MIRGraph::ClearAllVisitedFlags() {
28 AllNodesIterator iter(this);
29 for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
30 bb->visited = false;
31 }
32 }
33
NeedsVisit(BasicBlock * bb)34 BasicBlock* MIRGraph::NeedsVisit(BasicBlock* bb) {
35 if (bb != nullptr) {
36 if (bb->visited || bb->hidden) {
37 bb = nullptr;
38 }
39 }
40 return bb;
41 }
42
NextUnvisitedSuccessor(BasicBlock * bb)43 BasicBlock* MIRGraph::NextUnvisitedSuccessor(BasicBlock* bb) {
44 BasicBlock* res = NeedsVisit(GetBasicBlock(bb->fall_through));
45 if (res == nullptr) {
46 res = NeedsVisit(GetBasicBlock(bb->taken));
47 if (res == nullptr) {
48 if (bb->successor_block_list_type != kNotUsed) {
49 for (SuccessorBlockInfo* sbi : bb->successor_blocks) {
50 res = NeedsVisit(GetBasicBlock(sbi->block));
51 if (res != nullptr) {
52 break;
53 }
54 }
55 }
56 }
57 }
58 return res;
59 }
60
MarkPreOrder(BasicBlock * block)61 void MIRGraph::MarkPreOrder(BasicBlock* block) {
62 block->visited = true;
63 /* Enqueue the pre_order block id */
64 if (block->id != NullBasicBlockId) {
65 dfs_order_.push_back(block->id);
66 }
67 }
68
RecordDFSOrders(BasicBlock * block)69 void MIRGraph::RecordDFSOrders(BasicBlock* block) {
70 ScopedArenaAllocator allocator(&cu_->arena_stack);
71 ScopedArenaVector<BasicBlock*> succ(allocator.Adapter());
72 succ.reserve(GetNumBlocks());
73 MarkPreOrder(block);
74 succ.push_back(block);
75 while (!succ.empty()) {
76 BasicBlock* curr = succ.back();
77 BasicBlock* next_successor = NextUnvisitedSuccessor(curr);
78 if (next_successor != nullptr) {
79 MarkPreOrder(next_successor);
80 succ.push_back(next_successor);
81 continue;
82 }
83 curr->dfs_id = dfs_post_order_.size();
84 if (curr->id != NullBasicBlockId) {
85 dfs_post_order_.push_back(curr->id);
86 }
87 succ.pop_back();
88 }
89 }
90
91 /* Sort the blocks by the Depth-First-Search */
ComputeDFSOrders()92 void MIRGraph::ComputeDFSOrders() {
93 /* Clear the DFS pre-order and post-order lists. */
94 dfs_order_.clear();
95 dfs_order_.reserve(GetNumBlocks());
96 dfs_post_order_.clear();
97 dfs_post_order_.reserve(GetNumBlocks());
98
99 // Reset visited flags from all nodes
100 ClearAllVisitedFlags();
101
102 // Record dfs orders
103 RecordDFSOrders(GetEntryBlock());
104
105 num_reachable_blocks_ = dfs_order_.size();
106
107 if (num_reachable_blocks_ != GetNumBlocks()) {
108 // Kill all unreachable blocks.
109 AllNodesIterator iter(this);
110 for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
111 if (!bb->visited) {
112 bb->Kill(this);
113 }
114 }
115 }
116 dfs_orders_up_to_date_ = true;
117 }
118
119 /*
120 * Mark block bit on the per-Dalvik register vector to denote that Dalvik
121 * register idx is defined in BasicBlock bb.
122 */
FillDefBlockMatrix(BasicBlock * bb)123 bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
124 if (bb->data_flow_info == nullptr) {
125 return false;
126 }
127
128 for (uint32_t idx : bb->data_flow_info->def_v->Indexes()) {
129 /* Block bb defines register idx */
130 temp_.ssa.def_block_matrix[idx]->SetBit(bb->id);
131 }
132 return true;
133 }
134
ComputeDefBlockMatrix()135 void MIRGraph::ComputeDefBlockMatrix() {
136 int num_registers = GetNumOfCodeAndTempVRs();
137 /* Allocate num_registers bit vector pointers */
138 DCHECK(temp_scoped_alloc_ != nullptr);
139 DCHECK(temp_.ssa.def_block_matrix == nullptr);
140 temp_.ssa.def_block_matrix =
141 temp_scoped_alloc_->AllocArray<ArenaBitVector*>(num_registers, kArenaAllocDFInfo);
142 int i;
143
144 /* Initialize num_register vectors with num_blocks bits each */
145 for (i = 0; i < num_registers; i++) {
146 temp_.ssa.def_block_matrix[i] = new (temp_scoped_alloc_.get()) ArenaBitVector(
147 arena_, GetNumBlocks(), false, kBitMapBMatrix);
148 temp_.ssa.def_block_matrix[i]->ClearAllBits();
149 }
150
151 AllNodesIterator iter(this);
152 for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
153 FindLocalLiveIn(bb);
154 }
155 AllNodesIterator iter2(this);
156 for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
157 FillDefBlockMatrix(bb);
158 }
159
160 /*
161 * Also set the incoming parameters as defs in the entry block.
162 * Only need to handle the parameters for the outer method.
163 */
164 int num_regs = GetNumOfCodeVRs();
165 int in_reg = GetFirstInVR();
166 for (; in_reg < num_regs; in_reg++) {
167 temp_.ssa.def_block_matrix[in_reg]->SetBit(GetEntryBlock()->id);
168 }
169 }
170
ComputeDomPostOrderTraversal(BasicBlock * bb)171 void MIRGraph::ComputeDomPostOrderTraversal(BasicBlock* bb) {
172 // Clear the dominator post-order list.
173 dom_post_order_traversal_.clear();
174 dom_post_order_traversal_.reserve(num_reachable_blocks_);
175
176 ClearAllVisitedFlags();
177 ScopedArenaAllocator allocator(&cu_->arena_stack);
178 ScopedArenaVector<std::pair<BasicBlock*, ArenaBitVector::IndexIterator>> work_stack(
179 allocator.Adapter());
180 bb->visited = true;
181 work_stack.push_back(std::make_pair(bb, bb->i_dominated->Indexes().begin()));
182 while (!work_stack.empty()) {
183 std::pair<BasicBlock*, ArenaBitVector::IndexIterator>* curr = &work_stack.back();
184 BasicBlock* curr_bb = curr->first;
185 ArenaBitVector::IndexIterator* curr_idom_iter = &curr->second;
186 while (!curr_idom_iter->Done() && (NeedsVisit(GetBasicBlock(**curr_idom_iter)) == nullptr)) {
187 ++*curr_idom_iter;
188 }
189 // NOTE: work_stack.push_back()/pop_back() invalidate curr and curr_idom_iter.
190 if (!curr_idom_iter->Done()) {
191 BasicBlock* new_bb = GetBasicBlock(**curr_idom_iter);
192 ++*curr_idom_iter;
193 new_bb->visited = true;
194 work_stack.push_back(std::make_pair(new_bb, new_bb->i_dominated->Indexes().begin()));
195 } else {
196 // no successor/next
197 if (curr_bb->id != NullBasicBlockId) {
198 dom_post_order_traversal_.push_back(curr_bb->id);
199 }
200 work_stack.pop_back();
201 }
202 }
203 }
204
CheckForDominanceFrontier(BasicBlock * dom_bb,const BasicBlock * succ_bb)205 void MIRGraph::CheckForDominanceFrontier(BasicBlock* dom_bb,
206 const BasicBlock* succ_bb) {
207 /*
208 * TODO - evaluate whether phi will ever need to be inserted into exit
209 * blocks.
210 */
211 if (succ_bb->i_dom != dom_bb->id &&
212 succ_bb->block_type == kDalvikByteCode &&
213 succ_bb->hidden == false) {
214 dom_bb->dom_frontier->SetBit(succ_bb->id);
215 }
216 }
217
218 /* Worker function to compute the dominance frontier */
ComputeDominanceFrontier(BasicBlock * bb)219 bool MIRGraph::ComputeDominanceFrontier(BasicBlock* bb) {
220 /* Calculate DF_local */
221 if (bb->taken != NullBasicBlockId) {
222 CheckForDominanceFrontier(bb, GetBasicBlock(bb->taken));
223 }
224 if (bb->fall_through != NullBasicBlockId) {
225 CheckForDominanceFrontier(bb, GetBasicBlock(bb->fall_through));
226 }
227 if (bb->successor_block_list_type != kNotUsed) {
228 for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
229 BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
230 CheckForDominanceFrontier(bb, succ_bb);
231 }
232 }
233
234 /* Calculate DF_up */
235 for (uint32_t dominated_idx : bb->i_dominated->Indexes()) {
236 BasicBlock* dominated_bb = GetBasicBlock(dominated_idx);
237 for (uint32_t df_up_block_idx : dominated_bb->dom_frontier->Indexes()) {
238 BasicBlock* df_up_block = GetBasicBlock(df_up_block_idx);
239 CheckForDominanceFrontier(bb, df_up_block);
240 }
241 }
242
243 return true;
244 }
245
246 /* Worker function for initializing domination-related data structures */
InitializeDominationInfo(BasicBlock * bb)247 void MIRGraph::InitializeDominationInfo(BasicBlock* bb) {
248 int num_total_blocks = GetBasicBlockListCount();
249
250 if (bb->dominators == nullptr) {
251 bb->dominators = new (arena_) ArenaBitVector(arena_, num_total_blocks,
252 true /* expandable */, kBitMapDominators);
253 bb->i_dominated = new (arena_) ArenaBitVector(arena_, num_total_blocks,
254 true /* expandable */, kBitMapIDominated);
255 bb->dom_frontier = new (arena_) ArenaBitVector(arena_, num_total_blocks,
256 true /* expandable */, kBitMapDomFrontier);
257 } else {
258 bb->dominators->ClearAllBits();
259 bb->i_dominated->ClearAllBits();
260 bb->dom_frontier->ClearAllBits();
261 }
262 /* Set all bits in the dominator vector */
263 bb->dominators->SetInitialBits(num_total_blocks);
264
265 return;
266 }
267
268 /*
269 * Walk through the ordered i_dom list until we reach common parent.
270 * Given the ordering of i_dom_list, this common parent represents the
271 * last element of the intersection of block1 and block2 dominators.
272 */
FindCommonParent(int block1,int block2)273 int MIRGraph::FindCommonParent(int block1, int block2) {
274 while (block1 != block2) {
275 while (block1 < block2) {
276 block1 = i_dom_list_[block1];
277 DCHECK_NE(block1, NOTVISITED);
278 }
279 while (block2 < block1) {
280 block2 = i_dom_list_[block2];
281 DCHECK_NE(block2, NOTVISITED);
282 }
283 }
284 return block1;
285 }
286
287 /* Worker function to compute each block's immediate dominator */
ComputeblockIDom(BasicBlock * bb)288 bool MIRGraph::ComputeblockIDom(BasicBlock* bb) {
289 /* Special-case entry block */
290 if ((bb->id == NullBasicBlockId) || (bb == GetEntryBlock())) {
291 return false;
292 }
293
294 /* Iterate through the predecessors */
295 auto it = bb->predecessors.begin(), end = bb->predecessors.end();
296
297 /* Find the first processed predecessor */
298 int idom = -1;
299 for ( ; ; ++it) {
300 CHECK(it != end);
301 BasicBlock* pred_bb = GetBasicBlock(*it);
302 DCHECK(pred_bb != nullptr);
303 if (i_dom_list_[pred_bb->dfs_id] != NOTVISITED) {
304 idom = pred_bb->dfs_id;
305 break;
306 }
307 }
308
309 /* Scan the rest of the predecessors */
310 for ( ; it != end; ++it) {
311 BasicBlock* pred_bb = GetBasicBlock(*it);
312 DCHECK(pred_bb != nullptr);
313 if (i_dom_list_[pred_bb->dfs_id] == NOTVISITED) {
314 continue;
315 } else {
316 idom = FindCommonParent(pred_bb->dfs_id, idom);
317 }
318 }
319
320 DCHECK_NE(idom, NOTVISITED);
321
322 /* Did something change? */
323 if (i_dom_list_[bb->dfs_id] != idom) {
324 i_dom_list_[bb->dfs_id] = idom;
325 return true;
326 }
327 return false;
328 }
329
330 /* Worker function to compute each block's domintors */
ComputeBlockDominators(BasicBlock * bb)331 bool MIRGraph::ComputeBlockDominators(BasicBlock* bb) {
332 if (bb == GetEntryBlock()) {
333 bb->dominators->ClearAllBits();
334 } else {
335 bb->dominators->Copy(GetBasicBlock(bb->i_dom)->dominators);
336 }
337 bb->dominators->SetBit(bb->id);
338 return false;
339 }
340
SetDominators(BasicBlock * bb)341 bool MIRGraph::SetDominators(BasicBlock* bb) {
342 if (bb != GetEntryBlock()) {
343 int idom_dfs_idx = i_dom_list_[bb->dfs_id];
344 DCHECK_NE(idom_dfs_idx, NOTVISITED);
345 int i_dom_idx = dfs_post_order_[idom_dfs_idx];
346 BasicBlock* i_dom = GetBasicBlock(i_dom_idx);
347 bb->i_dom = i_dom->id;
348 /* Add bb to the i_dominated set of the immediate dominator block */
349 i_dom->i_dominated->SetBit(bb->id);
350 }
351 return false;
352 }
353
354 /* Compute dominators, immediate dominator, and dominance fronter */
ComputeDominators()355 void MIRGraph::ComputeDominators() {
356 int num_reachable_blocks = num_reachable_blocks_;
357
358 /* Initialize domination-related data structures */
359 PreOrderDfsIterator iter(this);
360 for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
361 InitializeDominationInfo(bb);
362 }
363
364 /* Initialize & Clear i_dom_list */
365 if (max_num_reachable_blocks_ < num_reachable_blocks_) {
366 i_dom_list_ = arena_->AllocArray<int>(num_reachable_blocks, kArenaAllocDFInfo);
367 }
368 for (int i = 0; i < num_reachable_blocks; i++) {
369 i_dom_list_[i] = NOTVISITED;
370 }
371
372 /* For post-order, last block is entry block. Set its i_dom to istelf */
373 DCHECK_EQ(GetEntryBlock()->dfs_id, num_reachable_blocks-1);
374 i_dom_list_[GetEntryBlock()->dfs_id] = GetEntryBlock()->dfs_id;
375
376 /* Compute the immediate dominators */
377 RepeatingReversePostOrderDfsIterator iter2(this);
378 bool change = false;
379 for (BasicBlock* bb = iter2.Next(false); bb != nullptr; bb = iter2.Next(change)) {
380 change = ComputeblockIDom(bb);
381 }
382
383 /* Set the dominator for the root node */
384 GetEntryBlock()->dominators->ClearAllBits();
385 GetEntryBlock()->dominators->SetBit(GetEntryBlock()->id);
386
387 GetEntryBlock()->i_dom = 0;
388
389 PreOrderDfsIterator iter3(this);
390 for (BasicBlock* bb = iter3.Next(); bb != nullptr; bb = iter3.Next()) {
391 SetDominators(bb);
392 }
393
394 ReversePostOrderDfsIterator iter4(this);
395 for (BasicBlock* bb = iter4.Next(); bb != nullptr; bb = iter4.Next()) {
396 ComputeBlockDominators(bb);
397 }
398
399 // Compute the dominance frontier for each block.
400 ComputeDomPostOrderTraversal(GetEntryBlock());
401 PostOrderDOMIterator iter5(this);
402 for (BasicBlock* bb = iter5.Next(); bb != nullptr; bb = iter5.Next()) {
403 ComputeDominanceFrontier(bb);
404 }
405
406 domination_up_to_date_ = true;
407 }
408
409 /*
410 * Perform dest U= src1 ^ ~src2
411 * This is probably not general enough to be placed in BitVector.[ch].
412 */
ComputeSuccLineIn(ArenaBitVector * dest,const ArenaBitVector * src1,const ArenaBitVector * src2)413 void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src1,
414 const ArenaBitVector* src2) {
415 if (dest->GetStorageSize() != src1->GetStorageSize() ||
416 dest->GetStorageSize() != src2->GetStorageSize() ||
417 dest->IsExpandable() != src1->IsExpandable() ||
418 dest->IsExpandable() != src2->IsExpandable()) {
419 LOG(FATAL) << "Incompatible set properties";
420 }
421
422 unsigned int idx;
423 for (idx = 0; idx < dest->GetStorageSize(); idx++) {
424 dest->GetRawStorage()[idx] |= src1->GetRawStorageWord(idx) & ~(src2->GetRawStorageWord(idx));
425 }
426 }
427
428 /*
429 * Iterate through all successor blocks and propagate up the live-in sets.
430 * The calculated result is used for phi-node pruning - where we only need to
431 * insert a phi node if the variable is live-in to the block.
432 */
ComputeBlockLiveIns(BasicBlock * bb)433 bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
434 DCHECK_EQ(temp_.ssa.num_vregs, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
435 ArenaBitVector* temp_live_vregs = temp_.ssa.work_live_vregs;
436
437 if (bb->data_flow_info == nullptr) {
438 return false;
439 }
440 temp_live_vregs->Copy(bb->data_flow_info->live_in_v);
441 BasicBlock* bb_taken = GetBasicBlock(bb->taken);
442 BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
443 if (bb_taken && bb_taken->data_flow_info)
444 ComputeSuccLineIn(temp_live_vregs, bb_taken->data_flow_info->live_in_v,
445 bb->data_flow_info->def_v);
446 if (bb_fall_through && bb_fall_through->data_flow_info)
447 ComputeSuccLineIn(temp_live_vregs, bb_fall_through->data_flow_info->live_in_v,
448 bb->data_flow_info->def_v);
449 if (bb->successor_block_list_type != kNotUsed) {
450 for (SuccessorBlockInfo* successor_block_info : bb->successor_blocks) {
451 BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
452 if (succ_bb->data_flow_info) {
453 ComputeSuccLineIn(temp_live_vregs, succ_bb->data_flow_info->live_in_v,
454 bb->data_flow_info->def_v);
455 }
456 }
457 }
458 if (!temp_live_vregs->Equal(bb->data_flow_info->live_in_v)) {
459 bb->data_flow_info->live_in_v->Copy(temp_live_vregs);
460 return true;
461 }
462 return false;
463 }
464
465 /* For each dalvik reg, find blocks that need phi nodes according to the dominance frontiers. */
FindPhiNodeBlocks()466 void MIRGraph::FindPhiNodeBlocks() {
467 RepeatingPostOrderDfsIterator iter(this);
468 bool change = false;
469 for (BasicBlock* bb = iter.Next(false); bb != nullptr; bb = iter.Next(change)) {
470 change = ComputeBlockLiveIns(bb);
471 }
472
473 ArenaBitVector* phi_blocks = new (temp_scoped_alloc_.get()) ArenaBitVector(
474 temp_scoped_alloc_.get(), GetNumBlocks(), false, kBitMapBMatrix);
475
476 // Reuse the def_block_matrix storage for phi_node_blocks.
477 ArenaBitVector** def_block_matrix = temp_.ssa.def_block_matrix;
478 ArenaBitVector** phi_node_blocks = def_block_matrix;
479 DCHECK(temp_.ssa.phi_node_blocks == nullptr);
480 temp_.ssa.phi_node_blocks = phi_node_blocks;
481 temp_.ssa.def_block_matrix = nullptr;
482
483 /* Iterate through each Dalvik register */
484 for (int dalvik_reg = GetNumOfCodeAndTempVRs() - 1; dalvik_reg >= 0; dalvik_reg--) {
485 phi_blocks->ClearAllBits();
486 ArenaBitVector* input_blocks = def_block_matrix[dalvik_reg];
487 do {
488 // TUNING: When we repeat this, we could skip indexes from the previous pass.
489 for (uint32_t idx : input_blocks->Indexes()) {
490 BasicBlock* def_bb = GetBasicBlock(idx);
491 if (def_bb->dom_frontier != nullptr) {
492 phi_blocks->Union(def_bb->dom_frontier);
493 }
494 }
495 } while (input_blocks->Union(phi_blocks));
496
497 def_block_matrix[dalvik_reg] = phi_blocks;
498 phi_blocks = input_blocks; // Reuse the bit vector in next iteration.
499 }
500 }
501
502 /*
503 * Worker function to insert phi-operands with latest SSA names from
504 * predecessor blocks
505 */
InsertPhiNodeOperands(BasicBlock * bb)506 bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
507 /* Phi nodes are at the beginning of each block */
508 for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
509 if (mir->dalvikInsn.opcode != static_cast<Instruction::Code>(kMirOpPhi))
510 return true;
511 int ssa_reg = mir->ssa_rep->defs[0];
512 DCHECK_GE(ssa_reg, 0); // Shouldn't see compiler temps here
513 int v_reg = SRegToVReg(ssa_reg);
514
515 /* Iterate through the predecessors */
516 size_t num_uses = bb->predecessors.size();
517 AllocateSSAUseData(mir, num_uses);
518 int* uses = mir->ssa_rep->uses;
519 BasicBlockId* incoming = arena_->AllocArray<BasicBlockId>(num_uses, kArenaAllocDFInfo);
520 mir->meta.phi_incoming = incoming;
521 int idx = 0;
522 for (BasicBlockId pred_id : bb->predecessors) {
523 BasicBlock* pred_bb = GetBasicBlock(pred_id);
524 DCHECK(pred_bb != nullptr);
525 uses[idx] = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg];
526 incoming[idx] = pred_id;
527 idx++;
528 }
529 }
530
531 return true;
532 }
533
DoDFSPreOrderSSARename(BasicBlock * block)534 void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) {
535 if (block->visited || block->hidden) {
536 return;
537 }
538 block->visited = true;
539
540 /* Process this block */
541 DoSSAConversion(block);
542
543 /* Save SSA map snapshot */
544 ScopedArenaAllocator allocator(&cu_->arena_stack);
545 uint32_t num_vregs = GetNumOfCodeAndTempVRs();
546 int32_t* saved_ssa_map = allocator.AllocArray<int32_t>(num_vregs, kArenaAllocDalvikToSSAMap);
547 size_t map_size = sizeof(saved_ssa_map[0]) * num_vregs;
548 memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
549
550 if (block->fall_through != NullBasicBlockId) {
551 DoDFSPreOrderSSARename(GetBasicBlock(block->fall_through));
552 /* Restore SSA map snapshot */
553 memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
554 }
555 if (block->taken != NullBasicBlockId) {
556 DoDFSPreOrderSSARename(GetBasicBlock(block->taken));
557 /* Restore SSA map snapshot */
558 memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
559 }
560 if (block->successor_block_list_type != kNotUsed) {
561 for (SuccessorBlockInfo* successor_block_info : block->successor_blocks) {
562 BasicBlock* succ_bb = GetBasicBlock(successor_block_info->block);
563 DoDFSPreOrderSSARename(succ_bb);
564 /* Restore SSA map snapshot */
565 memcpy(vreg_to_ssa_map_, saved_ssa_map, map_size);
566 }
567 }
568 return;
569 }
570
571 } // namespace art
572