1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "base/bit_vector-inl.h"
18 #include "base/logging.h"
19 #include "base/scoped_arena_containers.h"
20 #include "dataflow_iterator-inl.h"
21 #include "dex/verified_method.h"
22 #include "dex_flags.h"
23 #include "driver/compiler_driver.h"
24 #include "driver/dex_compilation_unit.h"
25 #include "global_value_numbering.h"
26 #include "gvn_dead_code_elimination.h"
27 #include "local_value_numbering.h"
28 #include "mir_field_info.h"
29 #include "mirror/string.h"
30 #include "quick/dex_file_method_inliner.h"
31 #include "quick/dex_file_to_method_inliner_map.h"
32 #include "stack.h"
33 #include "type_inference.h"
34 #include "utils.h"
35 
36 namespace art {
37 
Predecessors(BasicBlock * bb)38 static unsigned int Predecessors(BasicBlock* bb) {
39   return bb->predecessors.size();
40 }
41 
42 /* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
SetConstant(int32_t ssa_reg,int32_t value)43 void MIRGraph::SetConstant(int32_t ssa_reg, int32_t value) {
44   is_constant_v_->SetBit(ssa_reg);
45   constant_values_[ssa_reg] = value;
46   reg_location_[ssa_reg].is_const = true;
47 }
48 
SetConstantWide(int32_t ssa_reg,int64_t value)49 void MIRGraph::SetConstantWide(int32_t ssa_reg, int64_t value) {
50   is_constant_v_->SetBit(ssa_reg);
51   is_constant_v_->SetBit(ssa_reg + 1);
52   constant_values_[ssa_reg] = Low32Bits(value);
53   constant_values_[ssa_reg + 1] = High32Bits(value);
54   reg_location_[ssa_reg].is_const = true;
55   reg_location_[ssa_reg + 1].is_const = true;
56 }
57 
DoConstantPropagation(BasicBlock * bb)58 void MIRGraph::DoConstantPropagation(BasicBlock* bb) {
59   MIR* mir;
60 
61   for (mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
62     // Skip pass if BB has MIR without SSA representation.
63     if (mir->ssa_rep == nullptr) {
64        return;
65     }
66 
67     uint64_t df_attributes = GetDataFlowAttributes(mir);
68 
69     MIR::DecodedInstruction* d_insn = &mir->dalvikInsn;
70 
71     if (!(df_attributes & DF_HAS_DEFS)) continue;
72 
73     /* Handle instructions that set up constants directly */
74     if (df_attributes & DF_SETS_CONST) {
75       if (df_attributes & DF_DA) {
76         int32_t vB = static_cast<int32_t>(d_insn->vB);
77         switch (d_insn->opcode) {
78           case Instruction::CONST_4:
79           case Instruction::CONST_16:
80           case Instruction::CONST:
81             SetConstant(mir->ssa_rep->defs[0], vB);
82             break;
83           case Instruction::CONST_HIGH16:
84             SetConstant(mir->ssa_rep->defs[0], vB << 16);
85             break;
86           case Instruction::CONST_WIDE_16:
87           case Instruction::CONST_WIDE_32:
88             SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB));
89             break;
90           case Instruction::CONST_WIDE:
91             SetConstantWide(mir->ssa_rep->defs[0], d_insn->vB_wide);
92             break;
93           case Instruction::CONST_WIDE_HIGH16:
94             SetConstantWide(mir->ssa_rep->defs[0], static_cast<int64_t>(vB) << 48);
95             break;
96           default:
97             break;
98         }
99       }
100       /* Handle instructions that set up constants directly */
101     } else if (df_attributes & DF_IS_MOVE) {
102       int i;
103 
104       for (i = 0; i < mir->ssa_rep->num_uses; i++) {
105         if (!is_constant_v_->IsBitSet(mir->ssa_rep->uses[i])) break;
106       }
107       /* Move a register holding a constant to another register */
108       if (i == mir->ssa_rep->num_uses) {
109         SetConstant(mir->ssa_rep->defs[0], constant_values_[mir->ssa_rep->uses[0]]);
110         if (df_attributes & DF_A_WIDE) {
111           SetConstant(mir->ssa_rep->defs[1], constant_values_[mir->ssa_rep->uses[1]]);
112         }
113       }
114     }
115   }
116   /* TODO: implement code to handle arithmetic operations */
117 }
118 
119 /* Advance to next strictly dominated MIR node in an extended basic block */
AdvanceMIR(BasicBlock ** p_bb,MIR * mir)120 MIR* MIRGraph::AdvanceMIR(BasicBlock** p_bb, MIR* mir) {
121   BasicBlock* bb = *p_bb;
122   if (mir != nullptr) {
123     mir = mir->next;
124     while (mir == nullptr) {
125       bb = GetBasicBlock(bb->fall_through);
126       if ((bb == nullptr) || Predecessors(bb) != 1) {
127         // mir is null and we cannot proceed further.
128         break;
129       } else {
130         *p_bb = bb;
131         mir = bb->first_mir_insn;
132       }
133     }
134   }
135   return mir;
136 }
137 
138 /*
139  * To be used at an invoke mir.  If the logically next mir node represents
140  * a move-result, return it.  Else, return nullptr.  If a move-result exists,
141  * it is required to immediately follow the invoke with no intervening
142  * opcodes or incoming arcs.  However, if the result of the invoke is not
143  * used, a move-result may not be present.
144  */
FindMoveResult(BasicBlock * bb,MIR * mir)145 MIR* MIRGraph::FindMoveResult(BasicBlock* bb, MIR* mir) {
146   BasicBlock* tbb = bb;
147   mir = AdvanceMIR(&tbb, mir);
148   while (mir != nullptr) {
149     if ((mir->dalvikInsn.opcode == Instruction::MOVE_RESULT) ||
150         (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_OBJECT) ||
151         (mir->dalvikInsn.opcode == Instruction::MOVE_RESULT_WIDE)) {
152       break;
153     }
154     // Keep going if pseudo op, otherwise terminate
155     if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
156       mir = AdvanceMIR(&tbb, mir);
157     } else {
158       mir = nullptr;
159     }
160   }
161   return mir;
162 }
163 
NextDominatedBlock(BasicBlock * bb)164 BasicBlock* MIRGraph::NextDominatedBlock(BasicBlock* bb) {
165   if (bb->block_type == kDead) {
166     return nullptr;
167   }
168   DCHECK((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
169       || (bb->block_type == kExitBlock));
170   BasicBlock* bb_taken = GetBasicBlock(bb->taken);
171   BasicBlock* bb_fall_through = GetBasicBlock(bb->fall_through);
172   if (((bb_fall_through == nullptr) && (bb_taken != nullptr)) &&
173       ((bb_taken->block_type == kDalvikByteCode) || (bb_taken->block_type == kExitBlock))) {
174     // Follow simple unconditional branches.
175     bb = bb_taken;
176   } else {
177     // Follow simple fallthrough
178     bb = (bb_taken != nullptr) ? nullptr : bb_fall_through;
179   }
180   if (bb == nullptr || (Predecessors(bb) != 1)) {
181     return nullptr;
182   }
183   DCHECK((bb->block_type == kDalvikByteCode) || (bb->block_type == kExitBlock));
184   return bb;
185 }
186 
FindPhi(BasicBlock * bb,int ssa_name)187 static MIR* FindPhi(BasicBlock* bb, int ssa_name) {
188   for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
189     if (static_cast<int>(mir->dalvikInsn.opcode) == kMirOpPhi) {
190       for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
191         if (mir->ssa_rep->uses[i] == ssa_name) {
192           return mir;
193         }
194       }
195     }
196   }
197   return nullptr;
198 }
199 
SelectKind(MIR * mir)200 static SelectInstructionKind SelectKind(MIR* mir) {
201   // Work with the case when mir is null.
202   if (mir == nullptr) {
203     return kSelectNone;
204   }
205   switch (mir->dalvikInsn.opcode) {
206     case Instruction::MOVE:
207     case Instruction::MOVE_OBJECT:
208     case Instruction::MOVE_16:
209     case Instruction::MOVE_OBJECT_16:
210     case Instruction::MOVE_FROM16:
211     case Instruction::MOVE_OBJECT_FROM16:
212       return kSelectMove;
213     case Instruction::CONST:
214     case Instruction::CONST_4:
215     case Instruction::CONST_16:
216       return kSelectConst;
217     case Instruction::GOTO:
218     case Instruction::GOTO_16:
219     case Instruction::GOTO_32:
220       return kSelectGoto;
221     default:
222       return kSelectNone;
223   }
224 }
225 
226 static constexpr ConditionCode kIfCcZConditionCodes[] = {
227     kCondEq, kCondNe, kCondLt, kCondGe, kCondGt, kCondLe
228 };
229 
230 static_assert(arraysize(kIfCcZConditionCodes) == Instruction::IF_LEZ - Instruction::IF_EQZ + 1,
231               "if_ccz_ccodes_size1");
232 
ConditionCodeForIfCcZ(Instruction::Code opcode)233 static constexpr ConditionCode ConditionCodeForIfCcZ(Instruction::Code opcode) {
234   return kIfCcZConditionCodes[opcode - Instruction::IF_EQZ];
235 }
236 
237 static_assert(ConditionCodeForIfCcZ(Instruction::IF_EQZ) == kCondEq, "if_eqz ccode");
238 static_assert(ConditionCodeForIfCcZ(Instruction::IF_NEZ) == kCondNe, "if_nez ccode");
239 static_assert(ConditionCodeForIfCcZ(Instruction::IF_LTZ) == kCondLt, "if_ltz ccode");
240 static_assert(ConditionCodeForIfCcZ(Instruction::IF_GEZ) == kCondGe, "if_gez ccode");
241 static_assert(ConditionCodeForIfCcZ(Instruction::IF_GTZ) == kCondGt, "if_gtz ccode");
242 static_assert(ConditionCodeForIfCcZ(Instruction::IF_LEZ) == kCondLe, "if_lez ccode");
243 
GetSSAUseCount(int s_reg)244 int MIRGraph::GetSSAUseCount(int s_reg) {
245   DCHECK_LT(static_cast<size_t>(s_reg), ssa_subscripts_.size());
246   return raw_use_counts_[s_reg];
247 }
248 
GetNumBytesForSpecialTemps() const249 size_t MIRGraph::GetNumBytesForSpecialTemps() const {
250   // This logic is written with assumption that Method* is only special temp.
251   DCHECK_EQ(max_available_special_compiler_temps_, 1u);
252   return InstructionSetPointerSize(cu_->instruction_set);
253 }
254 
GetNumAvailableVRTemps()255 size_t MIRGraph::GetNumAvailableVRTemps() {
256   // First take into account all temps reserved for backend.
257   if (max_available_non_special_compiler_temps_ < reserved_temps_for_backend_) {
258     return 0;
259   }
260 
261   // Calculate remaining ME temps available.
262   size_t remaining_me_temps = max_available_non_special_compiler_temps_ -
263       reserved_temps_for_backend_;
264 
265   if (num_non_special_compiler_temps_ >= remaining_me_temps) {
266     return 0;
267   } else {
268     return remaining_me_temps - num_non_special_compiler_temps_;
269   }
270 }
271 
272 // FIXME - will probably need to revisit all uses of this, as type not defined.
273 static const RegLocation temp_loc = {kLocCompilerTemp,
274                                      0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/,
275                                      RegStorage(), INVALID_SREG, INVALID_SREG};
276 
GetNewCompilerTemp(CompilerTempType ct_type,bool wide)277 CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide) {
278   // Once the compiler temps have been committed, new ones cannot be requested anymore.
279   DCHECK_EQ(compiler_temps_committed_, false);
280   // Make sure that reserved for BE set is sane.
281   DCHECK_LE(reserved_temps_for_backend_, max_available_non_special_compiler_temps_);
282 
283   bool verbose = cu_->verbose;
284   const char* ct_type_str = nullptr;
285 
286   if (verbose) {
287     switch (ct_type) {
288       case kCompilerTempBackend:
289         ct_type_str = "backend";
290         break;
291       case kCompilerTempSpecialMethodPtr:
292         ct_type_str = "method*";
293         break;
294       case kCompilerTempVR:
295         ct_type_str = "VR";
296         break;
297       default:
298         ct_type_str = "unknown";
299         break;
300     }
301     LOG(INFO) << "CompilerTemps: A compiler temp of type " << ct_type_str << " that is "
302         << (wide ? "wide is being requested." : "not wide is being requested.");
303   }
304 
305   CompilerTemp *compiler_temp = static_cast<CompilerTemp *>(arena_->Alloc(sizeof(CompilerTemp),
306                                                             kArenaAllocRegAlloc));
307 
308   // Create the type of temp requested. Special temps need special handling because
309   // they have a specific virtual register assignment.
310   if (ct_type == kCompilerTempSpecialMethodPtr) {
311     // This has a special location on stack which is 32-bit or 64-bit depending
312     // on mode. However, we don't want to overlap with non-special section
313     // and thus even for 64-bit, we allow only a non-wide temp to be requested.
314     DCHECK_EQ(wide, false);
315 
316     // The vreg is always the first special temp for method ptr.
317     compiler_temp->v_reg = GetFirstSpecialTempVR();
318 
319     CHECK(reg_location_ == nullptr);
320   } else if (ct_type == kCompilerTempBackend) {
321     requested_backend_temp_ = true;
322 
323     // Make sure that we are not exceeding temps reserved for BE.
324     // Since VR temps cannot be requested once the BE temps are requested, we
325     // allow reservation of VR temps as well for BE. We
326     size_t available_temps = reserved_temps_for_backend_ + GetNumAvailableVRTemps();
327     size_t needed_temps = wide ? 2u : 1u;
328     if (available_temps < needed_temps) {
329       if (verbose) {
330         LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str
331             << " are available.";
332       }
333       return nullptr;
334     }
335 
336     // Update the remaining reserved temps since we have now used them.
337     // Note that the code below is actually subtracting to remove them from reserve
338     // once they have been claimed. It is careful to not go below zero.
339     reserved_temps_for_backend_ =
340         std::max(reserved_temps_for_backend_, needed_temps) - needed_temps;
341 
342     // The new non-special compiler temp must receive a unique v_reg.
343     compiler_temp->v_reg = GetFirstNonSpecialTempVR() + num_non_special_compiler_temps_;
344     num_non_special_compiler_temps_++;
345   } else if (ct_type == kCompilerTempVR) {
346     // Once we start giving out BE temps, we don't allow anymore ME temps to be requested.
347     // This is done in order to prevent problems with ssa since these structures are allocated
348     // and managed by the ME.
349     DCHECK_EQ(requested_backend_temp_, false);
350 
351     // There is a limit to the number of non-special temps so check to make sure it wasn't exceeded.
352     size_t available_temps = GetNumAvailableVRTemps();
353     if (available_temps <= 0 || (available_temps <= 1 && wide)) {
354       if (verbose) {
355         LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str
356             << " are available.";
357       }
358       return nullptr;
359     }
360 
361     // The new non-special compiler temp must receive a unique v_reg.
362     compiler_temp->v_reg = GetFirstNonSpecialTempVR() + num_non_special_compiler_temps_;
363     num_non_special_compiler_temps_++;
364   } else {
365     UNIMPLEMENTED(FATAL) << "No handling for compiler temp type " << ct_type_str << ".";
366   }
367 
368   // We allocate an sreg as well to make developer life easier.
369   // However, if this is requested from an ME pass that will recalculate ssa afterwards,
370   // this sreg is no longer valid. The caller should be aware of this.
371   compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
372 
373   if (verbose) {
374     LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v"
375         << compiler_temp->v_reg << " and s" << compiler_temp->s_reg_low << " has been created.";
376   }
377 
378   if (wide) {
379     // Only non-special temps are handled as wide for now.
380     // Note that the number of non special temps is incremented below.
381     DCHECK(ct_type == kCompilerTempBackend || ct_type == kCompilerTempVR);
382 
383     // Ensure that the two registers are consecutive.
384     int ssa_reg_low = compiler_temp->s_reg_low;
385     int ssa_reg_high = AddNewSReg(compiler_temp->v_reg + 1);
386     num_non_special_compiler_temps_++;
387 
388     if (verbose) {
389       LOG(INFO) << "CompilerTemps: The wide part of temp of type " << ct_type_str << " is v"
390           << compiler_temp->v_reg + 1 << " and s" << ssa_reg_high << ".";
391     }
392 
393     if (reg_location_ != nullptr) {
394       reg_location_[ssa_reg_high] = temp_loc;
395       reg_location_[ssa_reg_high].high_word = true;
396       reg_location_[ssa_reg_high].s_reg_low = ssa_reg_low;
397       reg_location_[ssa_reg_high].wide = true;
398     }
399   }
400 
401   // If the register locations have already been allocated, add the information
402   // about the temp. We will not overflow because they have been initialized
403   // to support the maximum number of temps. For ME temps that have multiple
404   // ssa versions, the structures below will be expanded on the post pass cleanup.
405   if (reg_location_ != nullptr) {
406     int ssa_reg_low = compiler_temp->s_reg_low;
407     reg_location_[ssa_reg_low] = temp_loc;
408     reg_location_[ssa_reg_low].s_reg_low = ssa_reg_low;
409     reg_location_[ssa_reg_low].wide = wide;
410   }
411 
412   return compiler_temp;
413 }
414 
RemoveLastCompilerTemp(CompilerTempType ct_type,bool wide,CompilerTemp * temp)415 void MIRGraph::RemoveLastCompilerTemp(CompilerTempType ct_type, bool wide, CompilerTemp* temp) {
416   // Once the compiler temps have been committed, it's too late for any modifications.
417   DCHECK_EQ(compiler_temps_committed_, false);
418 
419   size_t used_temps = wide ? 2u : 1u;
420 
421   if (ct_type == kCompilerTempBackend) {
422     DCHECK(requested_backend_temp_);
423 
424     // Make the temps available to backend again.
425     reserved_temps_for_backend_ += used_temps;
426   } else if (ct_type == kCompilerTempVR) {
427     DCHECK(!requested_backend_temp_);
428   } else {
429     UNIMPLEMENTED(FATAL) << "No handling for compiler temp type " << static_cast<int>(ct_type);
430   }
431 
432   // Reduce the number of non-special compiler temps.
433   DCHECK_LE(used_temps, num_non_special_compiler_temps_);
434   num_non_special_compiler_temps_ -= used_temps;
435 
436   // Check that this was really the last temp.
437   DCHECK_EQ(static_cast<size_t>(temp->v_reg),
438             GetFirstNonSpecialTempVR() + num_non_special_compiler_temps_);
439 
440   if (cu_->verbose) {
441     LOG(INFO) << "Last temporary has been removed.";
442   }
443 }
444 
EvaluateBranch(Instruction::Code opcode,int32_t src1,int32_t src2)445 static bool EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src2) {
446   bool is_taken;
447   switch (opcode) {
448     case Instruction::IF_EQ: is_taken = (src1 == src2); break;
449     case Instruction::IF_NE: is_taken = (src1 != src2); break;
450     case Instruction::IF_LT: is_taken = (src1 < src2); break;
451     case Instruction::IF_GE: is_taken = (src1 >= src2); break;
452     case Instruction::IF_GT: is_taken = (src1 > src2); break;
453     case Instruction::IF_LE: is_taken = (src1 <= src2); break;
454     case Instruction::IF_EQZ: is_taken = (src1 == 0); break;
455     case Instruction::IF_NEZ: is_taken = (src1 != 0); break;
456     case Instruction::IF_LTZ: is_taken = (src1 < 0); break;
457     case Instruction::IF_GEZ: is_taken = (src1 >= 0); break;
458     case Instruction::IF_GTZ: is_taken = (src1 > 0); break;
459     case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
460     default:
461       LOG(FATAL) << "Unexpected opcode " << opcode;
462       UNREACHABLE();
463   }
464   return is_taken;
465 }
466 
467 /* Do some MIR-level extended basic block optimizations */
BasicBlockOpt(BasicBlock * bb)468 bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
469   if (bb->block_type == kDead) {
470     return true;
471   }
472   // Currently multiply-accumulate backend supports are only available on arm32 and arm64.
473   if (cu_->instruction_set == kArm64 || cu_->instruction_set == kThumb2) {
474     MultiplyAddOpt(bb);
475   }
476   bool use_lvn = bb->use_lvn && (cu_->disable_opt & (1u << kLocalValueNumbering)) == 0u;
477   std::unique_ptr<ScopedArenaAllocator> allocator;
478   std::unique_ptr<GlobalValueNumbering> global_valnum;
479   std::unique_ptr<LocalValueNumbering> local_valnum;
480   if (use_lvn) {
481     allocator.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
482     global_valnum.reset(new (allocator.get()) GlobalValueNumbering(cu_, allocator.get(),
483                                                                    GlobalValueNumbering::kModeLvn));
484     local_valnum.reset(new (allocator.get()) LocalValueNumbering(global_valnum.get(), bb->id,
485                                                                  allocator.get()));
486   }
487   while (bb != nullptr) {
488     for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
489       // TUNING: use the returned value number for CSE.
490       if (use_lvn) {
491         local_valnum->GetValueNumber(mir);
492       }
493       // Look for interesting opcodes, skip otherwise
494       Instruction::Code opcode = mir->dalvikInsn.opcode;
495       switch (opcode) {
496         case Instruction::IF_EQ:
497         case Instruction::IF_NE:
498         case Instruction::IF_LT:
499         case Instruction::IF_GE:
500         case Instruction::IF_GT:
501         case Instruction::IF_LE:
502           if (!IsConst(mir->ssa_rep->uses[1])) {
503             break;
504           }
505           FALLTHROUGH_INTENDED;
506         case Instruction::IF_EQZ:
507         case Instruction::IF_NEZ:
508         case Instruction::IF_LTZ:
509         case Instruction::IF_GEZ:
510         case Instruction::IF_GTZ:
511         case Instruction::IF_LEZ:
512           // Result known at compile time?
513           if (IsConst(mir->ssa_rep->uses[0])) {
514             int32_t rhs = (mir->ssa_rep->num_uses == 2) ? ConstantValue(mir->ssa_rep->uses[1]) : 0;
515             bool is_taken = EvaluateBranch(opcode, ConstantValue(mir->ssa_rep->uses[0]), rhs);
516             BasicBlockId edge_to_kill = is_taken ? bb->fall_through : bb->taken;
517             if (is_taken) {
518               // Replace with GOTO.
519               bb->fall_through = NullBasicBlockId;
520               mir->dalvikInsn.opcode = Instruction::GOTO;
521               mir->dalvikInsn.vA =
522                   IsInstructionIfCc(opcode) ? mir->dalvikInsn.vC : mir->dalvikInsn.vB;
523             } else {
524               // Make NOP.
525               bb->taken = NullBasicBlockId;
526               mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
527             }
528             mir->ssa_rep->num_uses = 0;
529             BasicBlock* successor_to_unlink = GetBasicBlock(edge_to_kill);
530             successor_to_unlink->ErasePredecessor(bb->id);
531             // We have changed the graph structure.
532             dfs_orders_up_to_date_ = false;
533             domination_up_to_date_ = false;
534             topological_order_up_to_date_ = false;
535             // Keep MIR SSA rep, the worst that can happen is a Phi with just 1 input.
536           }
537           break;
538         case Instruction::CMPL_FLOAT:
539         case Instruction::CMPL_DOUBLE:
540         case Instruction::CMPG_FLOAT:
541         case Instruction::CMPG_DOUBLE:
542         case Instruction::CMP_LONG:
543           if ((cu_->disable_opt & (1 << kBranchFusing)) != 0) {
544             // Bitcode doesn't allow this optimization.
545             break;
546           }
547           if (mir->next != nullptr) {
548             MIR* mir_next = mir->next;
549             // Make sure result of cmp is used by next insn and nowhere else
550             if (IsInstructionIfCcZ(mir_next->dalvikInsn.opcode) &&
551                 (mir->ssa_rep->defs[0] == mir_next->ssa_rep->uses[0]) &&
552                 (GetSSAUseCount(mir->ssa_rep->defs[0]) == 1)) {
553               mir_next->meta.ccode = ConditionCodeForIfCcZ(mir_next->dalvikInsn.opcode);
554               switch (opcode) {
555                 case Instruction::CMPL_FLOAT:
556                   mir_next->dalvikInsn.opcode =
557                       static_cast<Instruction::Code>(kMirOpFusedCmplFloat);
558                   break;
559                 case Instruction::CMPL_DOUBLE:
560                   mir_next->dalvikInsn.opcode =
561                       static_cast<Instruction::Code>(kMirOpFusedCmplDouble);
562                   break;
563                 case Instruction::CMPG_FLOAT:
564                   mir_next->dalvikInsn.opcode =
565                       static_cast<Instruction::Code>(kMirOpFusedCmpgFloat);
566                   break;
567                 case Instruction::CMPG_DOUBLE:
568                   mir_next->dalvikInsn.opcode =
569                       static_cast<Instruction::Code>(kMirOpFusedCmpgDouble);
570                   break;
571                 case Instruction::CMP_LONG:
572                   mir_next->dalvikInsn.opcode =
573                       static_cast<Instruction::Code>(kMirOpFusedCmpLong);
574                   break;
575                 default: LOG(ERROR) << "Unexpected opcode: " << opcode;
576               }
577               mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
578               // Clear use count of temp VR.
579               use_counts_[mir->ssa_rep->defs[0]] = 0;
580               raw_use_counts_[mir->ssa_rep->defs[0]] = 0;
581               // Copy the SSA information that is relevant.
582               mir_next->ssa_rep->num_uses = mir->ssa_rep->num_uses;
583               mir_next->ssa_rep->uses = mir->ssa_rep->uses;
584               mir_next->ssa_rep->num_defs = 0;
585               mir->ssa_rep->num_uses = 0;
586               mir->ssa_rep->num_defs = 0;
587               // Copy in the decoded instruction information for potential SSA re-creation.
588               mir_next->dalvikInsn.vA = mir->dalvikInsn.vB;
589               mir_next->dalvikInsn.vB = mir->dalvikInsn.vC;
590             }
591           }
592           break;
593         default:
594           break;
595       }
596       // Is this the select pattern?
597       // TODO: flesh out support for Mips.  NOTE: llvm's select op doesn't quite work here.
598       // TUNING: expand to support IF_xx compare & branches
599       if ((cu_->instruction_set == kArm64 || cu_->instruction_set == kThumb2 ||
600            cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) &&
601           IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
602         BasicBlock* ft = GetBasicBlock(bb->fall_through);
603         DCHECK(ft != nullptr);
604         BasicBlock* ft_ft = GetBasicBlock(ft->fall_through);
605         BasicBlock* ft_tk = GetBasicBlock(ft->taken);
606 
607         BasicBlock* tk = GetBasicBlock(bb->taken);
608         DCHECK(tk != nullptr);
609         BasicBlock* tk_ft = GetBasicBlock(tk->fall_through);
610         BasicBlock* tk_tk = GetBasicBlock(tk->taken);
611 
612         /*
613          * In the select pattern, the taken edge goes to a block that unconditionally
614          * transfers to the rejoin block and the fall_though edge goes to a block that
615          * unconditionally falls through to the rejoin block.
616          */
617         if ((tk_ft == nullptr) && (ft_tk == nullptr) && (tk_tk == ft_ft) &&
618             (Predecessors(tk) == 1) && (Predecessors(ft) == 1)) {
619           /*
620            * Okay - we have the basic diamond shape.
621            */
622 
623           // TODO: Add logic for LONG.
624           // Are the block bodies something we can handle?
625           if ((ft->first_mir_insn == ft->last_mir_insn) &&
626               (tk->first_mir_insn != tk->last_mir_insn) &&
627               (tk->first_mir_insn->next == tk->last_mir_insn) &&
628               ((SelectKind(ft->first_mir_insn) == kSelectMove) ||
629               (SelectKind(ft->first_mir_insn) == kSelectConst)) &&
630               (SelectKind(ft->first_mir_insn) == SelectKind(tk->first_mir_insn)) &&
631               (SelectKind(tk->last_mir_insn) == kSelectGoto)) {
632             // Almost there.  Are the instructions targeting the same vreg?
633             MIR* if_true = tk->first_mir_insn;
634             MIR* if_false = ft->first_mir_insn;
635             // It's possible that the target of the select isn't used - skip those (rare) cases.
636             MIR* phi = FindPhi(tk_tk, if_true->ssa_rep->defs[0]);
637             if ((phi != nullptr) && (if_true->dalvikInsn.vA == if_false->dalvikInsn.vA)) {
638               /*
639                * We'll convert the IF_EQZ/IF_NEZ to a SELECT.  We need to find the
640                * Phi node in the merge block and delete it (while using the SSA name
641                * of the merge as the target of the SELECT.  Delete both taken and
642                * fallthrough blocks, and set fallthrough to merge block.
643                * NOTE: not updating other dataflow info (no longer used at this point).
644                * If this changes, need to update i_dom, etc. here (and in CombineBlocks).
645                */
646               mir->meta.ccode = ConditionCodeForIfCcZ(mir->dalvikInsn.opcode);
647               mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpSelect);
648               bool const_form = (SelectKind(if_true) == kSelectConst);
649               if ((SelectKind(if_true) == kSelectMove)) {
650                 if (IsConst(if_true->ssa_rep->uses[0]) &&
651                     IsConst(if_false->ssa_rep->uses[0])) {
652                     const_form = true;
653                     if_true->dalvikInsn.vB = ConstantValue(if_true->ssa_rep->uses[0]);
654                     if_false->dalvikInsn.vB = ConstantValue(if_false->ssa_rep->uses[0]);
655                 }
656               }
657               if (const_form) {
658                 /*
659                  * TODO: If both constants are the same value, then instead of generating
660                  * a select, we should simply generate a const bytecode. This should be
661                  * considered after inlining which can lead to CFG of this form.
662                  */
663                 // "true" set val in vB
664                 mir->dalvikInsn.vB = if_true->dalvikInsn.vB;
665                 // "false" set val in vC
666                 mir->dalvikInsn.vC = if_false->dalvikInsn.vB;
667               } else {
668                 DCHECK_EQ(SelectKind(if_true), kSelectMove);
669                 DCHECK_EQ(SelectKind(if_false), kSelectMove);
670                 int32_t* src_ssa = arena_->AllocArray<int32_t>(3, kArenaAllocDFInfo);
671                 src_ssa[0] = mir->ssa_rep->uses[0];
672                 src_ssa[1] = if_true->ssa_rep->uses[0];
673                 src_ssa[2] = if_false->ssa_rep->uses[0];
674                 mir->ssa_rep->uses = src_ssa;
675                 mir->ssa_rep->num_uses = 3;
676               }
677               AllocateSSADefData(mir, 1);
678               /*
679                * There is usually a Phi node in the join block for our two cases.  If the
680                * Phi node only contains our two cases as input, we will use the result
681                * SSA name of the Phi node as our select result and delete the Phi.  If
682                * the Phi node has more than two operands, we will arbitrarily use the SSA
683                * name of the "false" path, delete the SSA name of the "true" path from the
684                * Phi node (and fix up the incoming arc list).
685                */
686               if (phi->ssa_rep->num_uses == 2) {
687                 mir->ssa_rep->defs[0] = phi->ssa_rep->defs[0];
688                 // Rather than changing the Phi to kMirOpNop, remove it completely.
689                 // This avoids leaving other Phis after kMirOpNop (i.e. a non-Phi) insn.
690                 tk_tk->RemoveMIR(phi);
691                 int dead_false_def = if_false->ssa_rep->defs[0];
692                 raw_use_counts_[dead_false_def] = use_counts_[dead_false_def] = 0;
693               } else {
694                 int live_def = if_false->ssa_rep->defs[0];
695                 mir->ssa_rep->defs[0] = live_def;
696               }
697               int dead_true_def = if_true->ssa_rep->defs[0];
698               raw_use_counts_[dead_true_def] = use_counts_[dead_true_def] = 0;
699               // Update ending vreg->sreg map for GC maps generation.
700               int def_vreg = SRegToVReg(mir->ssa_rep->defs[0]);
701               bb->data_flow_info->vreg_to_ssa_map_exit[def_vreg] = mir->ssa_rep->defs[0];
702               // We want to remove ft and tk and link bb directly to ft_ft. First, we need
703               // to update all Phi inputs correctly with UpdatePredecessor(ft->id, bb->id)
704               // since the live_def above comes from ft->first_mir_insn (if_false).
705               DCHECK(if_false == ft->first_mir_insn);
706               ft_ft->UpdatePredecessor(ft->id, bb->id);
707               // Correct the rest of the links between bb, ft and ft_ft.
708               ft->ErasePredecessor(bb->id);
709               ft->fall_through = NullBasicBlockId;
710               bb->fall_through = ft_ft->id;
711               // Now we can kill tk and ft.
712               tk->Kill(this);
713               ft->Kill(this);
714               // NOTE: DFS order, domination info and topological order are still usable
715               // despite the newly dead blocks.
716             }
717           }
718         }
719       }
720     }
721     bb = ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) ? NextDominatedBlock(bb) :
722         nullptr;
723   }
724   if (use_lvn && UNLIKELY(!global_valnum->Good())) {
725     LOG(WARNING) << "LVN overflow in " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
726   }
727 
728   return true;
729 }
730 
731 /* Collect stats on number of checks removed */
CountChecks(class BasicBlock * bb)732 void MIRGraph::CountChecks(class BasicBlock* bb) {
733   if (bb->data_flow_info != nullptr) {
734     for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
735       if (mir->ssa_rep == nullptr) {
736         continue;
737       }
738       uint64_t df_attributes = GetDataFlowAttributes(mir);
739       if (df_attributes & DF_HAS_NULL_CHKS) {
740         checkstats_->null_checks++;
741         if (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) {
742           checkstats_->null_checks_eliminated++;
743         }
744       }
745       if (df_attributes & DF_HAS_RANGE_CHKS) {
746         checkstats_->range_checks++;
747         if (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) {
748           checkstats_->range_checks_eliminated++;
749         }
750       }
751     }
752   }
753 }
754 
755 /* Try to make common case the fallthrough path. */
LayoutBlocks(BasicBlock * bb)756 bool MIRGraph::LayoutBlocks(BasicBlock* bb) {
757   // TODO: For now, just looking for direct throws.  Consider generalizing for profile feedback.
758   if (!bb->explicit_throw) {
759     return false;
760   }
761 
762   // If we visited it, we are done.
763   if (bb->visited) {
764     return false;
765   }
766   bb->visited = true;
767 
768   BasicBlock* walker = bb;
769   while (true) {
770     // Check termination conditions.
771     if ((walker->block_type == kEntryBlock) || (Predecessors(walker) != 1)) {
772       break;
773     }
774     DCHECK(!walker->predecessors.empty());
775     BasicBlock* prev = GetBasicBlock(walker->predecessors[0]);
776 
777     // If we visited the predecessor, we are done.
778     if (prev->visited) {
779       return false;
780     }
781     prev->visited = true;
782 
783     if (prev->conditional_branch) {
784       if (GetBasicBlock(prev->fall_through) == walker) {
785         // Already done - return.
786         break;
787       }
788       DCHECK_EQ(walker, GetBasicBlock(prev->taken));
789       // Got one.  Flip it and exit.
790       Instruction::Code opcode = prev->last_mir_insn->dalvikInsn.opcode;
791       switch (opcode) {
792         case Instruction::IF_EQ: opcode = Instruction::IF_NE; break;
793         case Instruction::IF_NE: opcode = Instruction::IF_EQ; break;
794         case Instruction::IF_LT: opcode = Instruction::IF_GE; break;
795         case Instruction::IF_GE: opcode = Instruction::IF_LT; break;
796         case Instruction::IF_GT: opcode = Instruction::IF_LE; break;
797         case Instruction::IF_LE: opcode = Instruction::IF_GT; break;
798         case Instruction::IF_EQZ: opcode = Instruction::IF_NEZ; break;
799         case Instruction::IF_NEZ: opcode = Instruction::IF_EQZ; break;
800         case Instruction::IF_LTZ: opcode = Instruction::IF_GEZ; break;
801         case Instruction::IF_GEZ: opcode = Instruction::IF_LTZ; break;
802         case Instruction::IF_GTZ: opcode = Instruction::IF_LEZ; break;
803         case Instruction::IF_LEZ: opcode = Instruction::IF_GTZ; break;
804         default: LOG(FATAL) << "Unexpected opcode " << opcode;
805       }
806       prev->last_mir_insn->dalvikInsn.opcode = opcode;
807       BasicBlockId t_bb = prev->taken;
808       prev->taken = prev->fall_through;
809       prev->fall_through = t_bb;
810       break;
811     }
812     walker = prev;
813   }
814   return false;
815 }
816 
817 /* Combine any basic blocks terminated by instructions that we now know can't throw */
CombineBlocks(class BasicBlock * bb)818 void MIRGraph::CombineBlocks(class BasicBlock* bb) {
819   // Loop here to allow combining a sequence of blocks
820   while ((bb->block_type == kDalvikByteCode) &&
821       (bb->last_mir_insn != nullptr) &&
822       (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) == kMirOpCheck)) {
823     MIR* mir = bb->last_mir_insn;
824     DCHECK(bb->first_mir_insn !=  nullptr);
825 
826     // Get the paired insn and check if it can still throw.
827     MIR* throw_insn = mir->meta.throw_insn;
828     if (CanThrow(throw_insn)) {
829       break;
830     }
831 
832     // OK - got one.  Combine
833     BasicBlock* bb_next = GetBasicBlock(bb->fall_through);
834     DCHECK(!bb_next->catch_entry);
835     DCHECK_EQ(bb_next->predecessors.size(), 1u);
836 
837     // Now move instructions from bb_next to bb. Start off with doing a sanity check
838     // that kMirOpCheck's throw instruction is first one in the bb_next.
839     DCHECK_EQ(bb_next->first_mir_insn, throw_insn);
840     // Now move all instructions (throw instruction to last one) from bb_next to bb.
841     MIR* last_to_move = bb_next->last_mir_insn;
842     bb_next->RemoveMIRList(throw_insn, last_to_move);
843     bb->InsertMIRListAfter(bb->last_mir_insn, throw_insn, last_to_move);
844     // The kMirOpCheck instruction is not needed anymore.
845     mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
846     bb->RemoveMIR(mir);
847 
848     // Before we overwrite successors, remove their predecessor links to bb.
849     bb_next->ErasePredecessor(bb->id);
850     if (bb->taken != NullBasicBlockId) {
851       DCHECK_EQ(bb->successor_block_list_type, kNotUsed);
852       BasicBlock* bb_taken = GetBasicBlock(bb->taken);
853       // bb->taken will be overwritten below.
854       DCHECK_EQ(bb_taken->block_type, kExceptionHandling);
855       DCHECK_EQ(bb_taken->predecessors.size(), 1u);
856       DCHECK_EQ(bb_taken->predecessors[0], bb->id);
857       bb_taken->predecessors.clear();
858       bb_taken->block_type = kDead;
859       DCHECK(bb_taken->data_flow_info == nullptr);
860     } else {
861       DCHECK_EQ(bb->successor_block_list_type, kCatch);
862       for (SuccessorBlockInfo* succ_info : bb->successor_blocks) {
863         if (succ_info->block != NullBasicBlockId) {
864           BasicBlock* succ_bb = GetBasicBlock(succ_info->block);
865           DCHECK(succ_bb->catch_entry);
866           succ_bb->ErasePredecessor(bb->id);
867         }
868       }
869     }
870     // Use the successor info from the next block
871     bb->successor_block_list_type = bb_next->successor_block_list_type;
872     bb->successor_blocks.swap(bb_next->successor_blocks);  // Swap instead of copying.
873     bb_next->successor_block_list_type = kNotUsed;
874     // Use the ending block linkage from the next block
875     bb->fall_through = bb_next->fall_through;
876     bb_next->fall_through = NullBasicBlockId;
877     bb->taken = bb_next->taken;
878     bb_next->taken = NullBasicBlockId;
879     /*
880      * If lower-half of pair of blocks to combine contained
881      * a return or a conditional branch or an explicit throw,
882      * move the flag to the newly combined block.
883      */
884     bb->terminated_by_return = bb_next->terminated_by_return;
885     bb->conditional_branch = bb_next->conditional_branch;
886     bb->explicit_throw = bb_next->explicit_throw;
887     // Merge the use_lvn flag.
888     bb->use_lvn |= bb_next->use_lvn;
889 
890     // Kill the unused block.
891     bb_next->data_flow_info = nullptr;
892 
893     /*
894      * NOTE: we aren't updating all dataflow info here.  Should either make sure this pass
895      * happens after uses of i_dominated, dom_frontier or update the dataflow info here.
896      * NOTE: GVN uses bb->data_flow_info->live_in_v which is unaffected by the block merge.
897      */
898 
899     // Kill bb_next and remap now-dead id to parent.
900     bb_next->block_type = kDead;
901     bb_next->data_flow_info = nullptr;  // Must be null for dead blocks. (Relied on by the GVN.)
902     block_id_map_.Overwrite(bb_next->id, bb->id);
903     // Update predecessors in children.
904     ChildBlockIterator iter(bb, this);
905     for (BasicBlock* child = iter.Next(); child != nullptr; child = iter.Next()) {
906       child->UpdatePredecessor(bb_next->id, bb->id);
907     }
908 
909     // DFS orders, domination and topological order are not up to date anymore.
910     dfs_orders_up_to_date_ = false;
911     domination_up_to_date_ = false;
912     topological_order_up_to_date_ = false;
913 
914     // Now, loop back and see if we can keep going
915   }
916 }
917 
EliminateNullChecksGate()918 bool MIRGraph::EliminateNullChecksGate() {
919   if ((cu_->disable_opt & (1 << kNullCheckElimination)) != 0 ||
920       (merged_df_flags_ & DF_HAS_NULL_CHKS) == 0) {
921     return false;
922   }
923 
924   DCHECK(temp_scoped_alloc_.get() == nullptr);
925   temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
926   temp_.nce.num_vregs = GetNumOfCodeAndTempVRs();
927   temp_.nce.work_vregs_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
928       temp_scoped_alloc_.get(), temp_.nce.num_vregs, false, kBitMapNullCheck);
929   temp_.nce.ending_vregs_to_check_matrix =
930       temp_scoped_alloc_->AllocArray<ArenaBitVector*>(GetNumBlocks(), kArenaAllocMisc);
931   std::fill_n(temp_.nce.ending_vregs_to_check_matrix, GetNumBlocks(), nullptr);
932 
933   // reset MIR_MARK
934   AllNodesIterator iter(this);
935   for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
936     for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
937       mir->optimization_flags &= ~MIR_MARK;
938     }
939   }
940 
941   return true;
942 }
943 
944 /*
945  * Eliminate unnecessary null checks for a basic block.
946  */
EliminateNullChecks(BasicBlock * bb)947 bool MIRGraph::EliminateNullChecks(BasicBlock* bb) {
948   if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock) {
949     // Ignore the kExitBlock as well.
950     DCHECK(bb->first_mir_insn == nullptr);
951     return false;
952   }
953 
954   ArenaBitVector* vregs_to_check = temp_.nce.work_vregs_to_check;
955   /*
956    * Set initial state. Catch blocks don't need any special treatment.
957    */
958   if (bb->block_type == kEntryBlock) {
959     vregs_to_check->ClearAllBits();
960     // Assume all ins are objects.
961     for (uint16_t in_reg = GetFirstInVR();
962          in_reg < GetNumOfCodeVRs(); in_reg++) {
963       vregs_to_check->SetBit(in_reg);
964     }
965     if ((cu_->access_flags & kAccStatic) == 0) {
966       // If non-static method, mark "this" as non-null.
967       int this_reg = GetFirstInVR();
968       vregs_to_check->ClearBit(this_reg);
969     }
970   } else {
971     DCHECK_EQ(bb->block_type, kDalvikByteCode);
972     // Starting state is union of all incoming arcs.
973     bool copied_first = false;
974     for (BasicBlockId pred_id : bb->predecessors) {
975       if (temp_.nce.ending_vregs_to_check_matrix[pred_id] == nullptr) {
976         continue;
977       }
978       BasicBlock* pred_bb = GetBasicBlock(pred_id);
979       DCHECK(pred_bb != nullptr);
980       MIR* null_check_insn = nullptr;
981       // Check to see if predecessor had an explicit null-check.
982       if (pred_bb->BranchesToSuccessorOnlyIfNotZero(bb->id)) {
983         // Remember the null check insn if there's no other predecessor requiring null check.
984         if (!copied_first || !vregs_to_check->IsBitSet(pred_bb->last_mir_insn->dalvikInsn.vA)) {
985           null_check_insn = pred_bb->last_mir_insn;
986           DCHECK(null_check_insn != nullptr);
987         }
988       }
989       if (!copied_first) {
990         copied_first = true;
991         vregs_to_check->Copy(temp_.nce.ending_vregs_to_check_matrix[pred_id]);
992       } else {
993         vregs_to_check->Union(temp_.nce.ending_vregs_to_check_matrix[pred_id]);
994       }
995       if (null_check_insn != nullptr) {
996         vregs_to_check->ClearBit(null_check_insn->dalvikInsn.vA);
997       }
998     }
999     DCHECK(copied_first);  // At least one predecessor must have been processed before this bb.
1000   }
1001   // At this point, vregs_to_check shows which sregs have an object definition with
1002   // no intervening uses.
1003 
1004   // Walk through the instruction in the block, updating as necessary
1005   for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1006     uint64_t df_attributes = GetDataFlowAttributes(mir);
1007 
1008     if ((df_attributes & DF_NULL_TRANSFER_N) != 0u) {
1009       // The algorithm was written in a phi agnostic way.
1010       continue;
1011     }
1012 
1013     // Might need a null check?
1014     if (df_attributes & DF_HAS_NULL_CHKS) {
1015       int src_vreg;
1016       if (df_attributes & DF_NULL_CHK_OUT0) {
1017         DCHECK_NE(df_attributes & DF_IS_INVOKE, 0u);
1018         src_vreg = mir->dalvikInsn.vC;
1019       } else if (df_attributes & DF_NULL_CHK_B) {
1020         DCHECK_NE(df_attributes & DF_REF_B, 0u);
1021         src_vreg = mir->dalvikInsn.vB;
1022       } else {
1023         DCHECK_NE(df_attributes & DF_NULL_CHK_A, 0u);
1024         DCHECK_NE(df_attributes & DF_REF_A, 0u);
1025         src_vreg = mir->dalvikInsn.vA;
1026       }
1027       if (!vregs_to_check->IsBitSet(src_vreg)) {
1028         // Eliminate the null check.
1029         mir->optimization_flags |= MIR_MARK;
1030       } else {
1031         // Do the null check.
1032         mir->optimization_flags &= ~MIR_MARK;
1033         // Mark src_vreg as null-checked.
1034         vregs_to_check->ClearBit(src_vreg);
1035       }
1036     }
1037 
1038     if ((df_attributes & DF_A_WIDE) ||
1039         (df_attributes & (DF_REF_A | DF_SETS_CONST | DF_NULL_TRANSFER)) == 0) {
1040       continue;
1041     }
1042 
1043     /*
1044      * First, mark all object definitions as requiring null check.
1045      * Note: we can't tell if a CONST definition might be used as an object, so treat
1046      * them all as object definitions.
1047      */
1048     if ((df_attributes & (DF_DA | DF_REF_A)) == (DF_DA | DF_REF_A) ||
1049         (df_attributes & DF_SETS_CONST))  {
1050       vregs_to_check->SetBit(mir->dalvikInsn.vA);
1051     }
1052 
1053     // Then, remove mark from all object definitions we know are non-null.
1054     if (df_attributes & DF_NON_NULL_DST) {
1055       // Mark target of NEW* as non-null
1056       DCHECK_NE(df_attributes & DF_REF_A, 0u);
1057       vregs_to_check->ClearBit(mir->dalvikInsn.vA);
1058     }
1059 
1060     // Mark non-null returns from invoke-style NEW*
1061     if (df_attributes & DF_NON_NULL_RET) {
1062       MIR* next_mir = mir->next;
1063       // Next should be an MOVE_RESULT_OBJECT
1064       if (UNLIKELY(next_mir == nullptr)) {
1065         // The MethodVerifier makes sure there's no MOVE_RESULT at the catch entry or branch
1066         // target, so the MOVE_RESULT cannot be broken away into another block.
1067         LOG(WARNING) << "Unexpected end of block following new";
1068       } else if (UNLIKELY(next_mir->dalvikInsn.opcode != Instruction::MOVE_RESULT_OBJECT)) {
1069         LOG(WARNING) << "Unexpected opcode following new: " << next_mir->dalvikInsn.opcode;
1070       } else {
1071         // Mark as null checked.
1072         vregs_to_check->ClearBit(next_mir->dalvikInsn.vA);
1073       }
1074     }
1075 
1076     // Propagate null check state on register copies.
1077     if (df_attributes & DF_NULL_TRANSFER_0) {
1078       DCHECK_EQ(df_attributes | ~(DF_DA | DF_REF_A | DF_UB | DF_REF_B), static_cast<uint64_t>(-1));
1079       if (vregs_to_check->IsBitSet(mir->dalvikInsn.vB)) {
1080         vregs_to_check->SetBit(mir->dalvikInsn.vA);
1081       } else {
1082         vregs_to_check->ClearBit(mir->dalvikInsn.vA);
1083       }
1084     }
1085   }
1086 
1087   // Did anything change?
1088   bool nce_changed = false;
1089   ArenaBitVector* old_ending_ssa_regs_to_check = temp_.nce.ending_vregs_to_check_matrix[bb->id];
1090   if (old_ending_ssa_regs_to_check == nullptr) {
1091     DCHECK(temp_scoped_alloc_.get() != nullptr);
1092     nce_changed = vregs_to_check->GetHighestBitSet() != -1;
1093     temp_.nce.ending_vregs_to_check_matrix[bb->id] = vregs_to_check;
1094     // Create a new vregs_to_check for next BB.
1095     temp_.nce.work_vregs_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
1096         temp_scoped_alloc_.get(), temp_.nce.num_vregs, false, kBitMapNullCheck);
1097   } else if (!vregs_to_check->SameBitsSet(old_ending_ssa_regs_to_check)) {
1098     nce_changed = true;
1099     temp_.nce.ending_vregs_to_check_matrix[bb->id] = vregs_to_check;
1100     temp_.nce.work_vregs_to_check = old_ending_ssa_regs_to_check;  // Reuse for next BB.
1101   }
1102   return nce_changed;
1103 }
1104 
EliminateNullChecksEnd()1105 void MIRGraph::EliminateNullChecksEnd() {
1106   // Clean up temporaries.
1107   temp_.nce.num_vregs = 0u;
1108   temp_.nce.work_vregs_to_check = nullptr;
1109   temp_.nce.ending_vregs_to_check_matrix = nullptr;
1110   DCHECK(temp_scoped_alloc_.get() != nullptr);
1111   temp_scoped_alloc_.reset();
1112 
1113   // converge MIR_MARK with MIR_IGNORE_NULL_CHECK
1114   AllNodesIterator iter(this);
1115   for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1116     for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1117       constexpr int kMarkToIgnoreNullCheckShift = kMIRMark - kMIRIgnoreNullCheck;
1118       static_assert(kMarkToIgnoreNullCheckShift > 0, "Not a valid right-shift");
1119       uint16_t mirMarkAdjustedToIgnoreNullCheck =
1120           (mir->optimization_flags & MIR_MARK) >> kMarkToIgnoreNullCheckShift;
1121       mir->optimization_flags |= mirMarkAdjustedToIgnoreNullCheck;
1122     }
1123   }
1124 }
1125 
InferTypesStart()1126 void MIRGraph::InferTypesStart() {
1127   DCHECK(temp_scoped_alloc_ != nullptr);
1128   temp_.ssa.ti = new (temp_scoped_alloc_.get()) TypeInference(this, temp_scoped_alloc_.get());
1129 }
1130 
1131 /*
1132  * Perform type and size inference for a basic block.
1133  */
InferTypes(BasicBlock * bb)1134 bool MIRGraph::InferTypes(BasicBlock* bb) {
1135   if (bb->data_flow_info == nullptr) return false;
1136 
1137   DCHECK(temp_.ssa.ti != nullptr);
1138   return temp_.ssa.ti->Apply(bb);
1139 }
1140 
InferTypesEnd()1141 void MIRGraph::InferTypesEnd() {
1142   DCHECK(temp_.ssa.ti != nullptr);
1143   temp_.ssa.ti->Finish();
1144   delete temp_.ssa.ti;
1145   temp_.ssa.ti = nullptr;
1146 }
1147 
EliminateClassInitChecksGate()1148 bool MIRGraph::EliminateClassInitChecksGate() {
1149   if ((cu_->disable_opt & (1 << kClassInitCheckElimination)) != 0 ||
1150       (merged_df_flags_ & DF_CLINIT) == 0) {
1151     return false;
1152   }
1153 
1154   DCHECK(temp_scoped_alloc_.get() == nullptr);
1155   temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
1156 
1157   // Each insn we use here has at least 2 code units, offset/2 will be a unique index.
1158   const size_t end = (GetNumDalvikInsns() + 1u) / 2u;
1159   temp_.cice.indexes = temp_scoped_alloc_->AllocArray<uint16_t>(end, kArenaAllocGrowableArray);
1160   std::fill_n(temp_.cice.indexes, end, 0xffffu);
1161 
1162   uint32_t unique_class_count = 0u;
1163   {
1164     // Get unique_class_count and store indexes in temp_insn_data_ using a map on a nested
1165     // ScopedArenaAllocator.
1166 
1167     // Embed the map value in the entry to save space.
1168     struct MapEntry {
1169       // Map key: the class identified by the declaring dex file and type index.
1170       const DexFile* declaring_dex_file;
1171       uint16_t declaring_class_idx;
1172       // Map value: index into bit vectors of classes requiring initialization checks.
1173       uint16_t index;
1174     };
1175     struct MapEntryComparator {
1176       bool operator()(const MapEntry& lhs, const MapEntry& rhs) const {
1177         if (lhs.declaring_class_idx != rhs.declaring_class_idx) {
1178           return lhs.declaring_class_idx < rhs.declaring_class_idx;
1179         }
1180         return lhs.declaring_dex_file < rhs.declaring_dex_file;
1181       }
1182     };
1183 
1184     ScopedArenaAllocator allocator(&cu_->arena_stack);
1185     ScopedArenaSet<MapEntry, MapEntryComparator> class_to_index_map(MapEntryComparator(),
1186                                                                     allocator.Adapter());
1187 
1188     // First, find all SGET/SPUTs that may need class initialization checks, record INVOKE_STATICs.
1189     AllNodesIterator iter(this);
1190     for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1191       if (bb->block_type == kDalvikByteCode) {
1192         for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1193           if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) {
1194             const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(mir);
1195             if (!field_info.IsReferrersClass()) {
1196               DCHECK_LT(class_to_index_map.size(), 0xffffu);
1197               MapEntry entry = {
1198                   // Treat unresolved fields as if each had its own class.
1199                   field_info.IsResolved() ? field_info.DeclaringDexFile()
1200                                           : nullptr,
1201                   field_info.IsResolved() ? field_info.DeclaringClassIndex()
1202                                           : field_info.FieldIndex(),
1203                   static_cast<uint16_t>(class_to_index_map.size())
1204               };
1205               uint16_t index = class_to_index_map.insert(entry).first->index;
1206               // Using offset/2 for index into temp_.cice.indexes.
1207               temp_.cice.indexes[mir->offset / 2u] = index;
1208             }
1209           } else if (IsInstructionInvokeStatic(mir->dalvikInsn.opcode)) {
1210             const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(mir);
1211             DCHECK(method_info.IsStatic());
1212             if (method_info.FastPath() && !method_info.IsReferrersClass()) {
1213               MapEntry entry = {
1214                   method_info.DeclaringDexFile(),
1215                   method_info.DeclaringClassIndex(),
1216                   static_cast<uint16_t>(class_to_index_map.size())
1217               };
1218               uint16_t index = class_to_index_map.insert(entry).first->index;
1219               // Using offset/2 for index into temp_.cice.indexes.
1220               temp_.cice.indexes[mir->offset / 2u] = index;
1221             }
1222           }
1223         }
1224       }
1225     }
1226     unique_class_count = static_cast<uint32_t>(class_to_index_map.size());
1227   }
1228 
1229   if (unique_class_count == 0u) {
1230     // All SGET/SPUTs refer to initialized classes. Nothing to do.
1231     temp_.cice.indexes = nullptr;
1232     temp_scoped_alloc_.reset();
1233     return false;
1234   }
1235 
1236   // 2 bits for each class: is class initialized, is class in dex cache.
1237   temp_.cice.num_class_bits = 2u * unique_class_count;
1238   temp_.cice.work_classes_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
1239       temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false, kBitMapClInitCheck);
1240   temp_.cice.ending_classes_to_check_matrix =
1241       temp_scoped_alloc_->AllocArray<ArenaBitVector*>(GetNumBlocks(), kArenaAllocMisc);
1242   std::fill_n(temp_.cice.ending_classes_to_check_matrix, GetNumBlocks(), nullptr);
1243   DCHECK_GT(temp_.cice.num_class_bits, 0u);
1244   return true;
1245 }
1246 
1247 /*
1248  * Eliminate unnecessary class initialization checks for a basic block.
1249  */
EliminateClassInitChecks(BasicBlock * bb)1250 bool MIRGraph::EliminateClassInitChecks(BasicBlock* bb) {
1251   DCHECK_EQ((cu_->disable_opt & (1 << kClassInitCheckElimination)), 0u);
1252   if (bb->block_type != kDalvikByteCode && bb->block_type != kEntryBlock) {
1253     // Ignore the kExitBlock as well.
1254     DCHECK(bb->first_mir_insn == nullptr);
1255     return false;
1256   }
1257 
1258   /*
1259    * Set initial state.  Catch blocks don't need any special treatment.
1260    */
1261   ArenaBitVector* classes_to_check = temp_.cice.work_classes_to_check;
1262   DCHECK(classes_to_check != nullptr);
1263   if (bb->block_type == kEntryBlock) {
1264     classes_to_check->SetInitialBits(temp_.cice.num_class_bits);
1265   } else {
1266     // Starting state is union of all incoming arcs.
1267     bool copied_first = false;
1268     for (BasicBlockId pred_id : bb->predecessors) {
1269       if (temp_.cice.ending_classes_to_check_matrix[pred_id] == nullptr) {
1270         continue;
1271       }
1272       if (!copied_first) {
1273         copied_first = true;
1274         classes_to_check->Copy(temp_.cice.ending_classes_to_check_matrix[pred_id]);
1275       } else {
1276         classes_to_check->Union(temp_.cice.ending_classes_to_check_matrix[pred_id]);
1277       }
1278     }
1279     DCHECK(copied_first);  // At least one predecessor must have been processed before this bb.
1280   }
1281   // At this point, classes_to_check shows which classes need clinit checks.
1282 
1283   // Walk through the instruction in the block, updating as necessary
1284   for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1285     uint16_t index = temp_.cice.indexes[mir->offset / 2u];
1286     if (index != 0xffffu) {
1287       bool check_initialization = false;
1288       bool check_dex_cache = false;
1289 
1290       // NOTE: index != 0xffff does not guarantee that this is an SGET/SPUT/INVOKE_STATIC.
1291       // Dex instructions with width 1 can have the same offset/2.
1292 
1293       if (IsInstructionSGetOrSPut(mir->dalvikInsn.opcode)) {
1294         check_initialization = true;
1295         check_dex_cache = true;
1296       } else if (IsInstructionInvokeStatic(mir->dalvikInsn.opcode)) {
1297         check_initialization = true;
1298         // NOTE: INVOKE_STATIC doesn't guarantee that the type will be in the dex cache.
1299       }
1300 
1301       if (check_dex_cache) {
1302         uint32_t check_dex_cache_index = 2u * index + 1u;
1303         if (!classes_to_check->IsBitSet(check_dex_cache_index)) {
1304           // Eliminate the class init check.
1305           mir->optimization_flags |= MIR_CLASS_IS_IN_DEX_CACHE;
1306         } else {
1307           // Do the class init check.
1308           mir->optimization_flags &= ~MIR_CLASS_IS_IN_DEX_CACHE;
1309         }
1310         classes_to_check->ClearBit(check_dex_cache_index);
1311       }
1312       if (check_initialization) {
1313         uint32_t check_clinit_index = 2u * index;
1314         if (!classes_to_check->IsBitSet(check_clinit_index)) {
1315           // Eliminate the class init check.
1316           mir->optimization_flags |= MIR_CLASS_IS_INITIALIZED;
1317         } else {
1318           // Do the class init check.
1319           mir->optimization_flags &= ~MIR_CLASS_IS_INITIALIZED;
1320         }
1321         // Mark the class as initialized.
1322         classes_to_check->ClearBit(check_clinit_index);
1323       }
1324     }
1325   }
1326 
1327   // Did anything change?
1328   bool changed = false;
1329   ArenaBitVector* old_ending_classes_to_check = temp_.cice.ending_classes_to_check_matrix[bb->id];
1330   if (old_ending_classes_to_check == nullptr) {
1331     DCHECK(temp_scoped_alloc_.get() != nullptr);
1332     changed = classes_to_check->GetHighestBitSet() != -1;
1333     temp_.cice.ending_classes_to_check_matrix[bb->id] = classes_to_check;
1334     // Create a new classes_to_check for next BB.
1335     temp_.cice.work_classes_to_check = new (temp_scoped_alloc_.get()) ArenaBitVector(
1336         temp_scoped_alloc_.get(), temp_.cice.num_class_bits, false, kBitMapClInitCheck);
1337   } else if (!classes_to_check->Equal(old_ending_classes_to_check)) {
1338     changed = true;
1339     temp_.cice.ending_classes_to_check_matrix[bb->id] = classes_to_check;
1340     temp_.cice.work_classes_to_check = old_ending_classes_to_check;  // Reuse for next BB.
1341   }
1342   return changed;
1343 }
1344 
EliminateClassInitChecksEnd()1345 void MIRGraph::EliminateClassInitChecksEnd() {
1346   // Clean up temporaries.
1347   temp_.cice.num_class_bits = 0u;
1348   temp_.cice.work_classes_to_check = nullptr;
1349   temp_.cice.ending_classes_to_check_matrix = nullptr;
1350   DCHECK(temp_.cice.indexes != nullptr);
1351   temp_.cice.indexes = nullptr;
1352   DCHECK(temp_scoped_alloc_.get() != nullptr);
1353   temp_scoped_alloc_.reset();
1354 }
1355 
DisableGVNDependentOptimizations(CompilationUnit * cu)1356 static void DisableGVNDependentOptimizations(CompilationUnit* cu) {
1357   cu->disable_opt |= (1u << kGvnDeadCodeElimination);
1358 }
1359 
ApplyGlobalValueNumberingGate()1360 bool MIRGraph::ApplyGlobalValueNumberingGate() {
1361   if (GlobalValueNumbering::Skip(cu_)) {
1362     DisableGVNDependentOptimizations(cu_);
1363     return false;
1364   }
1365 
1366   DCHECK(temp_scoped_alloc_ == nullptr);
1367   temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
1368   temp_.gvn.ifield_ids =
1369       GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), ifield_lowering_infos_);
1370   temp_.gvn.sfield_ids =
1371       GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), sfield_lowering_infos_);
1372   DCHECK(temp_.gvn.gvn == nullptr);
1373   temp_.gvn.gvn = new (temp_scoped_alloc_.get()) GlobalValueNumbering(
1374       cu_, temp_scoped_alloc_.get(), GlobalValueNumbering::kModeGvn);
1375   return true;
1376 }
1377 
ApplyGlobalValueNumbering(BasicBlock * bb)1378 bool MIRGraph::ApplyGlobalValueNumbering(BasicBlock* bb) {
1379   DCHECK(temp_.gvn.gvn != nullptr);
1380   LocalValueNumbering* lvn = temp_.gvn.gvn->PrepareBasicBlock(bb);
1381   if (lvn != nullptr) {
1382     for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1383       lvn->GetValueNumber(mir);
1384     }
1385   }
1386   bool change = (lvn != nullptr) && temp_.gvn.gvn->FinishBasicBlock(bb);
1387   return change;
1388 }
1389 
ApplyGlobalValueNumberingEnd()1390 void MIRGraph::ApplyGlobalValueNumberingEnd() {
1391   // Perform modifications.
1392   DCHECK(temp_.gvn.gvn != nullptr);
1393   if (temp_.gvn.gvn->Good()) {
1394     temp_.gvn.gvn->StartPostProcessing();
1395     if (max_nested_loops_ != 0u) {
1396       TopologicalSortIterator iter(this);
1397       for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1398         ScopedArenaAllocator allocator(&cu_->arena_stack);  // Reclaim memory after each LVN.
1399         LocalValueNumbering* lvn = temp_.gvn.gvn->PrepareBasicBlock(bb, &allocator);
1400         if (lvn != nullptr) {
1401           for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1402             lvn->GetValueNumber(mir);
1403           }
1404           bool change = temp_.gvn.gvn->FinishBasicBlock(bb);
1405           DCHECK(!change) << PrettyMethod(cu_->method_idx, *cu_->dex_file);
1406         }
1407       }
1408     }
1409     // GVN was successful, running the LVN would be useless.
1410     cu_->disable_opt |= (1u << kLocalValueNumbering);
1411   } else {
1412     LOG(WARNING) << "GVN failed for " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
1413     DisableGVNDependentOptimizations(cu_);
1414   }
1415 }
1416 
EliminateDeadCodeGate()1417 bool MIRGraph::EliminateDeadCodeGate() {
1418   if ((cu_->disable_opt & (1 << kGvnDeadCodeElimination)) != 0 || temp_.gvn.gvn == nullptr) {
1419     return false;
1420   }
1421   DCHECK(temp_scoped_alloc_ != nullptr);
1422   temp_.gvn.dce = new (temp_scoped_alloc_.get()) GvnDeadCodeElimination(temp_.gvn.gvn,
1423                                                                         temp_scoped_alloc_.get());
1424   return true;
1425 }
1426 
EliminateDeadCode(BasicBlock * bb)1427 bool MIRGraph::EliminateDeadCode(BasicBlock* bb) {
1428   DCHECK(temp_scoped_alloc_ != nullptr);
1429   DCHECK(temp_.gvn.gvn != nullptr);
1430   if (bb->block_type != kDalvikByteCode) {
1431     return false;
1432   }
1433   DCHECK(temp_.gvn.dce != nullptr);
1434   temp_.gvn.dce->Apply(bb);
1435   return false;  // No need to repeat.
1436 }
1437 
EliminateDeadCodeEnd()1438 void MIRGraph::EliminateDeadCodeEnd() {
1439   if (kIsDebugBuild) {
1440     // DCE can make some previously dead vregs alive again. Make sure the obsolete
1441     // live-in information is not used anymore.
1442     AllNodesIterator iter(this);
1443     for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1444       if (bb->data_flow_info != nullptr) {
1445         bb->data_flow_info->live_in_v = nullptr;
1446       }
1447     }
1448   }
1449 }
1450 
GlobalValueNumberingCleanup()1451 void MIRGraph::GlobalValueNumberingCleanup() {
1452   // If the GVN didn't run, these pointers should be null and everything is effectively no-op.
1453   delete temp_.gvn.dce;
1454   temp_.gvn.dce = nullptr;
1455   delete temp_.gvn.gvn;
1456   temp_.gvn.gvn = nullptr;
1457   temp_.gvn.ifield_ids = nullptr;
1458   temp_.gvn.sfield_ids = nullptr;
1459   temp_scoped_alloc_.reset();
1460 }
1461 
ComputeInlineIFieldLoweringInfo(uint16_t field_idx,MIR * invoke,MIR * iget_or_iput)1462 void MIRGraph::ComputeInlineIFieldLoweringInfo(uint16_t field_idx, MIR* invoke, MIR* iget_or_iput) {
1463   uint32_t method_index = invoke->meta.method_lowering_info;
1464   if (temp_.smi.processed_indexes->IsBitSet(method_index)) {
1465     iget_or_iput->meta.ifield_lowering_info = temp_.smi.lowering_infos[method_index];
1466     DCHECK_EQ(field_idx, GetIFieldLoweringInfo(iget_or_iput).FieldIndex());
1467     return;
1468   }
1469 
1470   const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(invoke);
1471   MethodReference target = method_info.GetTargetMethod();
1472   DexCompilationUnit inlined_unit(
1473       cu_, cu_->class_loader, cu_->class_linker, *target.dex_file,
1474       nullptr /* code_item not used */, 0u /* class_def_idx not used */, target.dex_method_index,
1475       0u /* access_flags not used */, nullptr /* verified_method not used */);
1476   DexMemAccessType type = IGetOrIPutMemAccessType(iget_or_iput->dalvikInsn.opcode);
1477   MirIFieldLoweringInfo inlined_field_info(field_idx, type, false);
1478   MirIFieldLoweringInfo::Resolve(cu_->compiler_driver, &inlined_unit, &inlined_field_info, 1u);
1479   DCHECK(inlined_field_info.IsResolved());
1480 
1481   uint32_t field_info_index = ifield_lowering_infos_.size();
1482   ifield_lowering_infos_.push_back(inlined_field_info);
1483   temp_.smi.processed_indexes->SetBit(method_index);
1484   temp_.smi.lowering_infos[method_index] = field_info_index;
1485   iget_or_iput->meta.ifield_lowering_info = field_info_index;
1486 }
1487 
InlineSpecialMethodsGate()1488 bool MIRGraph::InlineSpecialMethodsGate() {
1489   if ((cu_->disable_opt & (1 << kSuppressMethodInlining)) != 0 ||
1490       method_lowering_infos_.size() == 0u) {
1491     return false;
1492   }
1493   if (cu_->compiler_driver->GetMethodInlinerMap() == nullptr) {
1494     // This isn't the Quick compiler.
1495     return false;
1496   }
1497   return true;
1498 }
1499 
InlineSpecialMethodsStart()1500 void MIRGraph::InlineSpecialMethodsStart() {
1501   // Prepare for inlining getters/setters. Since we're inlining at most 1 IGET/IPUT from
1502   // each INVOKE, we can index the data by the MIR::meta::method_lowering_info index.
1503 
1504   DCHECK(temp_scoped_alloc_.get() == nullptr);
1505   temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
1506   temp_.smi.num_indexes = method_lowering_infos_.size();
1507   temp_.smi.processed_indexes = new (temp_scoped_alloc_.get()) ArenaBitVector(
1508       temp_scoped_alloc_.get(), temp_.smi.num_indexes, false, kBitMapMisc);
1509   temp_.smi.processed_indexes->ClearAllBits();
1510   temp_.smi.lowering_infos =
1511       temp_scoped_alloc_->AllocArray<uint16_t>(temp_.smi.num_indexes, kArenaAllocGrowableArray);
1512 }
1513 
InlineSpecialMethods(BasicBlock * bb)1514 void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
1515   if (bb->block_type != kDalvikByteCode) {
1516     return;
1517   }
1518   for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1519     if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
1520       continue;
1521     }
1522     if (!(mir->dalvikInsn.FlagsOf() & Instruction::kInvoke)) {
1523       continue;
1524     }
1525     const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(mir);
1526     if (!method_info.FastPath() || !method_info.IsSpecial()) {
1527       continue;
1528     }
1529 
1530     InvokeType sharp_type = method_info.GetSharpType();
1531     if ((sharp_type != kDirect) && (sharp_type != kStatic)) {
1532       continue;
1533     }
1534 
1535     if (sharp_type == kStatic) {
1536       bool needs_clinit = !method_info.IsClassInitialized() &&
1537           ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) == 0);
1538       if (needs_clinit) {
1539         continue;
1540       }
1541     }
1542 
1543     DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1544     MethodReference target = method_info.GetTargetMethod();
1545     if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(target.dex_file)
1546             ->GenInline(this, bb, mir, target.dex_method_index)) {
1547       if (cu_->verbose || cu_->print_pass) {
1548         LOG(INFO) << "SpecialMethodInliner: Inlined " << method_info.GetInvokeType() << " ("
1549             << sharp_type << ") call to \"" << PrettyMethod(target.dex_method_index,
1550                                                             *target.dex_file)
1551             << "\" from \"" << PrettyMethod(cu_->method_idx, *cu_->dex_file)
1552             << "\" @0x" << std::hex << mir->offset;
1553       }
1554     }
1555   }
1556 }
1557 
InlineSpecialMethodsEnd()1558 void MIRGraph::InlineSpecialMethodsEnd() {
1559   // Clean up temporaries.
1560   DCHECK(temp_.smi.lowering_infos != nullptr);
1561   temp_.smi.lowering_infos = nullptr;
1562   temp_.smi.num_indexes = 0u;
1563   DCHECK(temp_.smi.processed_indexes != nullptr);
1564   temp_.smi.processed_indexes = nullptr;
1565   DCHECK(temp_scoped_alloc_.get() != nullptr);
1566   temp_scoped_alloc_.reset();
1567 }
1568 
DumpCheckStats()1569 void MIRGraph::DumpCheckStats() {
1570   Checkstats* stats =
1571       static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo));
1572   checkstats_ = stats;
1573   AllNodesIterator iter(this);
1574   for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1575     CountChecks(bb);
1576   }
1577   if (stats->null_checks > 0) {
1578     float eliminated = static_cast<float>(stats->null_checks_eliminated);
1579     float checks = static_cast<float>(stats->null_checks);
1580     LOG(INFO) << "Null Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
1581               << stats->null_checks_eliminated << " of " << stats->null_checks << " -> "
1582               << (eliminated/checks) * 100.0 << "%";
1583     }
1584   if (stats->range_checks > 0) {
1585     float eliminated = static_cast<float>(stats->range_checks_eliminated);
1586     float checks = static_cast<float>(stats->range_checks);
1587     LOG(INFO) << "Range Checks: " << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " "
1588               << stats->range_checks_eliminated << " of " << stats->range_checks << " -> "
1589               << (eliminated/checks) * 100.0 << "%";
1590   }
1591 }
1592 
BuildExtendedBBList(class BasicBlock * bb)1593 bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
1594   if (bb->visited) return false;
1595   if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
1596       || (bb->block_type == kExitBlock))) {
1597     // Ignore special blocks
1598     bb->visited = true;
1599     return false;
1600   }
1601   // Must be head of extended basic block.
1602   BasicBlock* start_bb = bb;
1603   extended_basic_blocks_.push_back(bb->id);
1604   bool terminated_by_return = false;
1605   bool do_local_value_numbering = false;
1606   // Visit blocks strictly dominated by this head.
1607   while (bb != nullptr) {
1608     bb->visited = true;
1609     terminated_by_return |= bb->terminated_by_return;
1610     do_local_value_numbering |= bb->use_lvn;
1611     bb = NextDominatedBlock(bb);
1612   }
1613   if (terminated_by_return || do_local_value_numbering) {
1614     // Do lvn for all blocks in this extended set.
1615     bb = start_bb;
1616     while (bb != nullptr) {
1617       bb->use_lvn = do_local_value_numbering;
1618       bb->dominates_return = terminated_by_return;
1619       bb = NextDominatedBlock(bb);
1620     }
1621   }
1622   return false;  // Not iterative - return value will be ignored
1623 }
1624 
BasicBlockOptimizationStart()1625 void MIRGraph::BasicBlockOptimizationStart() {
1626   if ((cu_->disable_opt & (1 << kLocalValueNumbering)) == 0) {
1627     temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
1628     temp_.gvn.ifield_ids =
1629         GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), ifield_lowering_infos_);
1630     temp_.gvn.sfield_ids =
1631         GlobalValueNumbering::PrepareGvnFieldIds(temp_scoped_alloc_.get(), sfield_lowering_infos_);
1632   }
1633 }
1634 
BasicBlockOptimization()1635 void MIRGraph::BasicBlockOptimization() {
1636   if ((cu_->disable_opt & (1 << kSuppressExceptionEdges)) != 0) {
1637     ClearAllVisitedFlags();
1638     PreOrderDfsIterator iter2(this);
1639     for (BasicBlock* bb = iter2.Next(); bb != nullptr; bb = iter2.Next()) {
1640       BuildExtendedBBList(bb);
1641     }
1642     // Perform extended basic block optimizations.
1643     for (unsigned int i = 0; i < extended_basic_blocks_.size(); i++) {
1644       BasicBlockOpt(GetBasicBlock(extended_basic_blocks_[i]));
1645     }
1646   } else {
1647     PreOrderDfsIterator iter(this);
1648     for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1649       BasicBlockOpt(bb);
1650     }
1651   }
1652 }
1653 
BasicBlockOptimizationEnd()1654 void MIRGraph::BasicBlockOptimizationEnd() {
1655   // Clean up after LVN.
1656   temp_.gvn.ifield_ids = nullptr;
1657   temp_.gvn.sfield_ids = nullptr;
1658   temp_scoped_alloc_.reset();
1659 }
1660 
StringChange()1661 void MIRGraph::StringChange() {
1662   AllNodesIterator iter(this);
1663   for (BasicBlock* bb = iter.Next(); bb != nullptr; bb = iter.Next()) {
1664     for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1665       // Look for new instance opcodes, skip otherwise
1666       Instruction::Code opcode = mir->dalvikInsn.opcode;
1667       if (opcode == Instruction::NEW_INSTANCE) {
1668         uint32_t type_idx = mir->dalvikInsn.vB;
1669         if (cu_->compiler_driver->IsStringTypeIndex(type_idx, cu_->dex_file)) {
1670           // Change NEW_INSTANCE into CONST_4 of 0
1671           mir->dalvikInsn.opcode = Instruction::CONST_4;
1672           mir->dalvikInsn.vB = 0;
1673         }
1674       } else if ((opcode == Instruction::INVOKE_DIRECT) ||
1675                  (opcode == Instruction::INVOKE_DIRECT_RANGE)) {
1676         uint32_t method_idx = mir->dalvikInsn.vB;
1677         DexFileMethodInliner* inliner =
1678             cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file);
1679         if (inliner->IsStringInitMethodIndex(method_idx)) {
1680           bool is_range = (opcode == Instruction::INVOKE_DIRECT_RANGE);
1681           uint32_t orig_this_reg = is_range ? mir->dalvikInsn.vC : mir->dalvikInsn.arg[0];
1682           // Remove this pointer from string init and change to static call.
1683           mir->dalvikInsn.vA--;
1684           if (!is_range) {
1685             mir->dalvikInsn.opcode = Instruction::INVOKE_STATIC;
1686             for (uint32_t i = 0; i < mir->dalvikInsn.vA; i++) {
1687               mir->dalvikInsn.arg[i] = mir->dalvikInsn.arg[i + 1];
1688             }
1689           } else {
1690             mir->dalvikInsn.opcode = Instruction::INVOKE_STATIC_RANGE;
1691             mir->dalvikInsn.vC++;
1692           }
1693           // Insert a move-result instruction to the original this pointer reg.
1694           MIR* move_result_mir = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
1695           move_result_mir->dalvikInsn.opcode = Instruction::MOVE_RESULT_OBJECT;
1696           move_result_mir->dalvikInsn.vA = orig_this_reg;
1697           move_result_mir->offset = mir->offset;
1698           move_result_mir->m_unit_index = mir->m_unit_index;
1699           bb->InsertMIRAfter(mir, move_result_mir);
1700           // Add additional moves if this pointer was copied to other registers.
1701           const VerifiedMethod* verified_method =
1702               cu_->compiler_driver->GetVerifiedMethod(cu_->dex_file, cu_->method_idx);
1703           DCHECK(verified_method != nullptr);
1704           const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map =
1705               verified_method->GetStringInitPcRegMap();
1706           auto map_it = string_init_map.find(mir->offset);
1707           if (map_it != string_init_map.end()) {
1708             const std::set<uint32_t>& reg_set = map_it->second;
1709             for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) {
1710               MIR* move_mir = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
1711               move_mir->dalvikInsn.opcode = Instruction::MOVE_OBJECT;
1712               move_mir->dalvikInsn.vA = *set_it;
1713               move_mir->dalvikInsn.vB = orig_this_reg;
1714               move_mir->offset = mir->offset;
1715               move_mir->m_unit_index = mir->m_unit_index;
1716               bb->InsertMIRAfter(move_result_mir, move_mir);
1717             }
1718           }
1719         }
1720       }
1721     }
1722   }
1723 }
1724 
1725 
EliminateSuspendChecksGate()1726 bool MIRGraph::EliminateSuspendChecksGate() {
1727   if (kLeafOptimization ||           // Incompatible (could create loops without suspend checks).
1728       (cu_->disable_opt & (1 << kSuspendCheckElimination)) != 0 ||  // Disabled.
1729       GetMaxNestedLoops() == 0u ||   // Nothing to do.
1730       GetMaxNestedLoops() >= 32u ||  // Only 32 bits in suspend_checks_in_loops_[.].
1731                                      // Exclude 32 as well to keep bit shifts well-defined.
1732       !HasInvokes()) {               // No invokes to actually eliminate any suspend checks.
1733     return false;
1734   }
1735   suspend_checks_in_loops_ = arena_->AllocArray<uint32_t>(GetNumBlocks(), kArenaAllocMisc);
1736   return true;
1737 }
1738 
EliminateSuspendChecks(BasicBlock * bb)1739 bool MIRGraph::EliminateSuspendChecks(BasicBlock* bb) {
1740   if (bb->block_type != kDalvikByteCode) {
1741     return false;
1742   }
1743   DCHECK_EQ(GetTopologicalSortOrderLoopHeadStack()->size(), bb->nesting_depth);
1744   if (bb->nesting_depth == 0u) {
1745     // Out of loops.
1746     DCHECK_EQ(suspend_checks_in_loops_[bb->id], 0u);  // The array was zero-initialized.
1747     return false;
1748   }
1749   uint32_t suspend_checks_in_loops = (1u << bb->nesting_depth) - 1u;  // Start with all loop heads.
1750   bool found_invoke = false;
1751   for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1752     if ((IsInstructionInvoke(mir->dalvikInsn.opcode) ||
1753         IsInstructionQuickInvoke(mir->dalvikInsn.opcode)) &&
1754         !GetMethodLoweringInfo(mir).IsIntrinsic()) {
1755       // Non-intrinsic invoke, rely on a suspend point in the invoked method.
1756       found_invoke = true;
1757       break;
1758     }
1759   }
1760   if (!found_invoke) {
1761     // Intersect suspend checks from predecessors.
1762     uint16_t bb_topo_idx = topological_order_indexes_[bb->id];
1763     uint32_t pred_mask_union = 0u;
1764     for (BasicBlockId pred_id : bb->predecessors) {
1765       uint16_t pred_topo_idx = topological_order_indexes_[pred_id];
1766       if (pred_topo_idx < bb_topo_idx) {
1767         // Determine the loop depth of the predecessors relative to this block.
1768         size_t pred_loop_depth = topological_order_loop_head_stack_.size();
1769         while (pred_loop_depth != 0u &&
1770             pred_topo_idx < topological_order_loop_head_stack_[pred_loop_depth - 1].first) {
1771           --pred_loop_depth;
1772         }
1773         DCHECK_LE(pred_loop_depth, GetBasicBlock(pred_id)->nesting_depth);
1774         uint32_t pred_mask = (1u << pred_loop_depth) - 1u;
1775         // Intersect pred_mask bits in suspend_checks_in_loops with
1776         // suspend_checks_in_loops_[pred_id].
1777         uint32_t pred_loops_without_checks = pred_mask & ~suspend_checks_in_loops_[pred_id];
1778         suspend_checks_in_loops = suspend_checks_in_loops & ~pred_loops_without_checks;
1779         pred_mask_union |= pred_mask;
1780       }
1781     }
1782     // DCHECK_EQ() may not hold for unnatural loop heads, so use DCHECK_GE().
1783     DCHECK_GE(((1u << (IsLoopHead(bb->id) ? bb->nesting_depth - 1u: bb->nesting_depth)) - 1u),
1784               pred_mask_union);
1785     suspend_checks_in_loops &= pred_mask_union;
1786   }
1787   suspend_checks_in_loops_[bb->id] = suspend_checks_in_loops;
1788   if (suspend_checks_in_loops == 0u) {
1789     return false;
1790   }
1791   // Apply MIR_IGNORE_SUSPEND_CHECK if appropriate.
1792   if (bb->taken != NullBasicBlockId) {
1793     DCHECK(bb->last_mir_insn != nullptr);
1794     DCHECK(IsInstructionIfCc(bb->last_mir_insn->dalvikInsn.opcode) ||
1795            IsInstructionIfCcZ(bb->last_mir_insn->dalvikInsn.opcode) ||
1796            IsInstructionGoto(bb->last_mir_insn->dalvikInsn.opcode) ||
1797            (static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) >= kMirOpFusedCmplFloat &&
1798             static_cast<int>(bb->last_mir_insn->dalvikInsn.opcode) <= kMirOpFusedCmpLong));
1799     if (!IsSuspendCheckEdge(bb, bb->taken) &&
1800         (bb->fall_through == NullBasicBlockId || !IsSuspendCheckEdge(bb, bb->fall_through))) {
1801       bb->last_mir_insn->optimization_flags |= MIR_IGNORE_SUSPEND_CHECK;
1802     }
1803   } else if (bb->fall_through != NullBasicBlockId && IsSuspendCheckEdge(bb, bb->fall_through)) {
1804     // We've got a fall-through suspend edge. Add an artificial GOTO to force suspend check.
1805     MIR* mir = NewMIR();
1806     mir->dalvikInsn.opcode = Instruction::GOTO;
1807     mir->dalvikInsn.vA = 0;  // Branch offset.
1808     mir->offset = GetBasicBlock(bb->fall_through)->start_offset;
1809     mir->m_unit_index = current_method_;
1810     mir->ssa_rep = reinterpret_cast<SSARepresentation*>(
1811         arena_->Alloc(sizeof(SSARepresentation), kArenaAllocDFInfo));  // Zero-initialized.
1812     bb->AppendMIR(mir);
1813     std::swap(bb->fall_through, bb->taken);  // The fall-through has become taken.
1814   }
1815   return true;
1816 }
1817 
CanThrow(MIR * mir) const1818 bool MIRGraph::CanThrow(MIR* mir) const {
1819   if ((mir->dalvikInsn.FlagsOf() & Instruction::kThrow) == 0) {
1820     return false;
1821   }
1822   const int opt_flags = mir->optimization_flags;
1823   uint64_t df_attributes = GetDataFlowAttributes(mir);
1824 
1825   // First, check if the insn can still throw NPE.
1826   if (((df_attributes & DF_HAS_NULL_CHKS) != 0) && ((opt_flags & MIR_IGNORE_NULL_CHECK) == 0)) {
1827     return true;
1828   }
1829 
1830   // Now process specific instructions.
1831   if ((df_attributes & DF_IFIELD) != 0) {
1832     // The IGET/IPUT family. We have processed the IGET/IPUT null check above.
1833     DCHECK_NE(opt_flags & MIR_IGNORE_NULL_CHECK, 0);
1834     // If not fast, weird things can happen and the insn can throw.
1835     const MirIFieldLoweringInfo& field_info = GetIFieldLoweringInfo(mir);
1836     bool fast = (df_attributes & DF_DA) != 0 ? field_info.FastGet() : field_info.FastPut();
1837     return !fast;
1838   } else if ((df_attributes & DF_SFIELD) != 0) {
1839     // The SGET/SPUT family. Check for potentially throwing class initialization.
1840     // Also, if not fast, weird things can happen and the insn can throw.
1841     const MirSFieldLoweringInfo& field_info = GetSFieldLoweringInfo(mir);
1842     bool fast = (df_attributes & DF_DA) != 0 ? field_info.FastGet() : field_info.FastPut();
1843     bool is_class_initialized = field_info.IsClassInitialized() ||
1844         ((mir->optimization_flags & MIR_CLASS_IS_INITIALIZED) != 0);
1845     return !(fast && is_class_initialized);
1846   } else if ((df_attributes & DF_HAS_RANGE_CHKS) != 0) {
1847     // Only AGET/APUT have range checks. We have processed the AGET/APUT null check above.
1848     DCHECK_NE(opt_flags & MIR_IGNORE_NULL_CHECK, 0);
1849     // Non-throwing only if range check has been eliminated.
1850     return ((opt_flags & MIR_IGNORE_RANGE_CHECK) == 0);
1851   } else if (mir->dalvikInsn.opcode == Instruction::CHECK_CAST &&
1852       (opt_flags & MIR_IGNORE_CHECK_CAST) != 0) {
1853     return false;
1854   } else if (mir->dalvikInsn.opcode == Instruction::ARRAY_LENGTH ||
1855       static_cast<int>(mir->dalvikInsn.opcode) == kMirOpNullCheck) {
1856     // No more checks for these (null check was processed above).
1857     return false;
1858   }
1859   return true;
1860 }
1861 
HasAntiDependency(MIR * first,MIR * second)1862 bool MIRGraph::HasAntiDependency(MIR* first, MIR* second) {
1863   DCHECK(first->ssa_rep != nullptr);
1864   DCHECK(second->ssa_rep != nullptr);
1865   if ((second->ssa_rep->num_defs > 0) && (first->ssa_rep->num_uses > 0)) {
1866     int vreg0 = SRegToVReg(second->ssa_rep->defs[0]);
1867     int vreg1 = (second->ssa_rep->num_defs == 2) ?
1868         SRegToVReg(second->ssa_rep->defs[1]) : INVALID_VREG;
1869     for (int i = 0; i < first->ssa_rep->num_uses; i++) {
1870       int32_t use = SRegToVReg(first->ssa_rep->uses[i]);
1871       if (use == vreg0 || use == vreg1) {
1872         return true;
1873       }
1874     }
1875   }
1876   return false;
1877 }
1878 
CombineMultiplyAdd(MIR * mul_mir,MIR * add_mir,bool mul_is_first_addend,bool is_wide,bool is_sub)1879 void MIRGraph::CombineMultiplyAdd(MIR* mul_mir, MIR* add_mir, bool mul_is_first_addend,
1880                                   bool is_wide, bool is_sub) {
1881   if (is_wide) {
1882     if (is_sub) {
1883       add_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpMsubLong);
1884     } else {
1885       add_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpMaddLong);
1886     }
1887   } else {
1888     if (is_sub) {
1889       add_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpMsubInt);
1890     } else {
1891       add_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpMaddInt);
1892     }
1893   }
1894   add_mir->ssa_rep->num_uses = is_wide ? 6 : 3;
1895   int32_t addend0 = INVALID_SREG;
1896   int32_t addend1 = INVALID_SREG;
1897   if (is_wide) {
1898     addend0 = mul_is_first_addend ? add_mir->ssa_rep->uses[2] : add_mir->ssa_rep->uses[0];
1899     addend1 = mul_is_first_addend ? add_mir->ssa_rep->uses[3] : add_mir->ssa_rep->uses[1];
1900   } else {
1901     addend0 = mul_is_first_addend ? add_mir->ssa_rep->uses[1] : add_mir->ssa_rep->uses[0];
1902   }
1903 
1904   AllocateSSAUseData(add_mir, add_mir->ssa_rep->num_uses);
1905   add_mir->ssa_rep->uses[0] = mul_mir->ssa_rep->uses[0];
1906   add_mir->ssa_rep->uses[1] = mul_mir->ssa_rep->uses[1];
1907   // Clear the original multiply product ssa use count, as it is not used anymore.
1908   raw_use_counts_[mul_mir->ssa_rep->defs[0]] = 0;
1909   use_counts_[mul_mir->ssa_rep->defs[0]] = 0;
1910   if (is_wide) {
1911     DCHECK_EQ(add_mir->ssa_rep->num_uses, 6);
1912     add_mir->ssa_rep->uses[2] = mul_mir->ssa_rep->uses[2];
1913     add_mir->ssa_rep->uses[3] = mul_mir->ssa_rep->uses[3];
1914     add_mir->ssa_rep->uses[4] = addend0;
1915     add_mir->ssa_rep->uses[5] = addend1;
1916     raw_use_counts_[mul_mir->ssa_rep->defs[1]] = 0;
1917     use_counts_[mul_mir->ssa_rep->defs[1]] = 0;
1918   } else {
1919     DCHECK_EQ(add_mir->ssa_rep->num_uses, 3);
1920     add_mir->ssa_rep->uses[2] = addend0;
1921   }
1922   // Copy in the decoded instruction information.
1923   add_mir->dalvikInsn.vB = SRegToVReg(add_mir->ssa_rep->uses[0]);
1924   if (is_wide) {
1925     add_mir->dalvikInsn.vC = SRegToVReg(add_mir->ssa_rep->uses[2]);
1926     add_mir->dalvikInsn.arg[0] = SRegToVReg(add_mir->ssa_rep->uses[4]);
1927   } else {
1928     add_mir->dalvikInsn.vC = SRegToVReg(add_mir->ssa_rep->uses[1]);
1929     add_mir->dalvikInsn.arg[0] = SRegToVReg(add_mir->ssa_rep->uses[2]);
1930   }
1931   // Original multiply MIR is set to Nop.
1932   mul_mir->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
1933 }
1934 
MultiplyAddOpt(BasicBlock * bb)1935 void MIRGraph::MultiplyAddOpt(BasicBlock* bb) {
1936   if (bb->block_type == kDead) {
1937     return;
1938   }
1939   ScopedArenaAllocator allocator(&cu_->arena_stack);
1940   ScopedArenaSafeMap<uint32_t, MIR*> ssa_mul_map(std::less<uint32_t>(), allocator.Adapter());
1941   ScopedArenaSafeMap<uint32_t, MIR*>::iterator map_it;
1942   for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
1943     Instruction::Code opcode = mir->dalvikInsn.opcode;
1944     bool is_sub = true;
1945     bool is_candidate_multiply = false;
1946     switch (opcode) {
1947       case Instruction::MUL_INT:
1948       case Instruction::MUL_INT_2ADDR:
1949         is_candidate_multiply = true;
1950         break;
1951       case Instruction::MUL_LONG:
1952       case Instruction::MUL_LONG_2ADDR:
1953         if (cu_->target64) {
1954           is_candidate_multiply = true;
1955         }
1956         break;
1957       case Instruction::ADD_INT:
1958       case Instruction::ADD_INT_2ADDR:
1959         is_sub = false;
1960         FALLTHROUGH_INTENDED;
1961       case Instruction::SUB_INT:
1962       case Instruction::SUB_INT_2ADDR:
1963         if (((map_it = ssa_mul_map.find(mir->ssa_rep->uses[0])) != ssa_mul_map.end()) && !is_sub) {
1964           // a*b+c
1965           CombineMultiplyAdd(map_it->second, mir, true /* product is the first addend */,
1966                              false /* is_wide */, false /* is_sub */);
1967           ssa_mul_map.erase(mir->ssa_rep->uses[0]);
1968         } else if ((map_it = ssa_mul_map.find(mir->ssa_rep->uses[1])) != ssa_mul_map.end()) {
1969           // c+a*b or c-a*b
1970           CombineMultiplyAdd(map_it->second, mir, false /* product is the second addend */,
1971                              false /* is_wide */, is_sub);
1972           ssa_mul_map.erase(map_it);
1973         }
1974         break;
1975       case Instruction::ADD_LONG:
1976       case Instruction::ADD_LONG_2ADDR:
1977         is_sub = false;
1978         FALLTHROUGH_INTENDED;
1979       case Instruction::SUB_LONG:
1980       case Instruction::SUB_LONG_2ADDR:
1981         if (!cu_->target64) {
1982           break;
1983         }
1984         if ((map_it = ssa_mul_map.find(mir->ssa_rep->uses[0])) != ssa_mul_map.end() && !is_sub) {
1985           // a*b+c
1986           CombineMultiplyAdd(map_it->second, mir, true /* product is the first addend */,
1987                              true /* is_wide */, false /* is_sub */);
1988           ssa_mul_map.erase(map_it);
1989         } else if ((map_it = ssa_mul_map.find(mir->ssa_rep->uses[2])) != ssa_mul_map.end()) {
1990           // c+a*b or c-a*b
1991           CombineMultiplyAdd(map_it->second, mir, false /* product is the second addend */,
1992                              true /* is_wide */, is_sub);
1993           ssa_mul_map.erase(map_it);
1994         }
1995         break;
1996       default:
1997         if (!ssa_mul_map.empty() && CanThrow(mir)) {
1998           // Should not combine multiply and add MIRs across potential exception.
1999           ssa_mul_map.clear();
2000         }
2001         break;
2002     }
2003 
2004     // Exclude the case when an MIR writes a vreg which is previous candidate multiply MIR's uses.
2005     // It is because that current RA may allocate the same physical register to them. For this
2006     // kind of cases, the multiplier has been updated, we should not use updated value to the
2007     // multiply-add insn.
2008     if (ssa_mul_map.size() > 0) {
2009       for (auto it = ssa_mul_map.begin(); it != ssa_mul_map.end();) {
2010         MIR* mul = it->second;
2011         if (HasAntiDependency(mul, mir)) {
2012           it = ssa_mul_map.erase(it);
2013         } else {
2014           ++it;
2015         }
2016       }
2017     }
2018 
2019     if (is_candidate_multiply &&
2020         (GetRawUseCount(mir->ssa_rep->defs[0]) == 1) && (mir->next != nullptr)) {
2021       ssa_mul_map.Put(mir->ssa_rep->defs[0], mir);
2022     }
2023   }
2024 }
2025 
2026 }  // namespace art
2027