1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "dex/compiler_internals.h"
18 #include "dex/dataflow_iterator-inl.h"
19 #include "dex/quick/dex_file_method_inliner.h"
20 #include "mir_to_lir-inl.h"
21 #include "thread-inl.h"
22 
23 namespace art {
24 
ShortyToRegClass(char shorty_type)25 RegisterClass Mir2Lir::ShortyToRegClass(char shorty_type) {
26   RegisterClass res;
27   switch (shorty_type) {
28     case 'L':
29       res = kRefReg;
30       break;
31     case 'F':
32       // Expected fallthrough.
33     case 'D':
34       res = kFPReg;
35       break;
36     default:
37       res = kCoreReg;
38   }
39   return res;
40 }
41 
LocToRegClass(RegLocation loc)42 RegisterClass Mir2Lir::LocToRegClass(RegLocation loc) {
43   RegisterClass res;
44   if (loc.fp) {
45     DCHECK(!loc.ref) << "At most, one of ref/fp may be set";
46     res = kFPReg;
47   } else if (loc.ref) {
48     res = kRefReg;
49   } else {
50     res = kCoreReg;
51   }
52   return res;
53 }
54 
LockArg(int in_position,bool wide)55 void Mir2Lir::LockArg(int in_position, bool wide) {
56   RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
57   RegStorage reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) :
58       RegStorage::InvalidReg();
59 
60   if (reg_arg_low.Valid()) {
61     LockTemp(reg_arg_low);
62   }
63   if (reg_arg_high.Valid() && reg_arg_low.NotExactlyEquals(reg_arg_high)) {
64     LockTemp(reg_arg_high);
65   }
66 }
67 
68 // TODO: simplify when 32-bit targets go hard-float.
LoadArg(int in_position,RegisterClass reg_class,bool wide)69 RegStorage Mir2Lir::LoadArg(int in_position, RegisterClass reg_class, bool wide) {
70   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
71   int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
72 
73   if (cu_->instruction_set == kX86) {
74     /*
75      * When doing a call for x86, it moves the stack pointer in order to push return.
76      * Thus, we add another 4 bytes to figure out the out of caller (in of callee).
77      */
78     offset += sizeof(uint32_t);
79   }
80 
81   if (cu_->instruction_set == kX86_64) {
82     /*
83      * When doing a call for x86, it moves the stack pointer in order to push return.
84      * Thus, we add another 8 bytes to figure out the out of caller (in of callee).
85      */
86     offset += sizeof(uint64_t);
87   }
88 
89   if (cu_->target64) {
90     RegStorage reg_arg = GetArgMappingToPhysicalReg(in_position);
91     if (!reg_arg.Valid()) {
92       RegStorage new_reg =
93           wide ?  AllocTypedTempWide(false, reg_class) : AllocTypedTemp(false, reg_class);
94       LoadBaseDisp(TargetPtrReg(kSp), offset, new_reg, wide ? k64 : k32, kNotVolatile);
95       return new_reg;
96     } else {
97       // Check if we need to copy the arg to a different reg_class.
98       if (!RegClassMatches(reg_class, reg_arg)) {
99         if (wide) {
100           RegStorage new_reg = AllocTypedTempWide(false, reg_class);
101           OpRegCopyWide(new_reg, reg_arg);
102           reg_arg = new_reg;
103         } else {
104           RegStorage new_reg = AllocTypedTemp(false, reg_class);
105           OpRegCopy(new_reg, reg_arg);
106           reg_arg = new_reg;
107         }
108       }
109     }
110     return reg_arg;
111   }
112 
113   RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
114   RegStorage reg_arg_high = wide ? GetArgMappingToPhysicalReg(in_position + 1) :
115       RegStorage::InvalidReg();
116 
117   // If the VR is wide and there is no register for high part, we need to load it.
118   if (wide && !reg_arg_high.Valid()) {
119     // If the low part is not in a reg, we allocate a pair. Otherwise, we just load to high reg.
120     if (!reg_arg_low.Valid()) {
121       RegStorage new_regs = AllocTypedTempWide(false, reg_class);
122       LoadBaseDisp(TargetPtrReg(kSp), offset, new_regs, k64, kNotVolatile);
123       return new_regs;  // The reg_class is OK, we can return.
124     } else {
125       // Assume that no ABI allows splitting a wide fp reg between a narrow fp reg and memory,
126       // i.e. the low part is in a core reg. Load the second part in a core reg as well for now.
127       DCHECK(!reg_arg_low.IsFloat());
128       reg_arg_high = AllocTemp();
129       int offset_high = offset + sizeof(uint32_t);
130       Load32Disp(TargetPtrReg(kSp), offset_high, reg_arg_high);
131       // Continue below to check the reg_class.
132     }
133   }
134 
135   // If the low part is not in a register yet, we need to load it.
136   if (!reg_arg_low.Valid()) {
137     // Assume that if the low part of a wide arg is passed in memory, so is the high part,
138     // thus we don't get here for wide args as it's handled above. Big-endian ABIs could
139     // conceivably break this assumption but Android supports only little-endian architectures.
140     DCHECK(!wide);
141     reg_arg_low = AllocTypedTemp(false, reg_class);
142     Load32Disp(TargetPtrReg(kSp), offset, reg_arg_low);
143     return reg_arg_low;  // The reg_class is OK, we can return.
144   }
145 
146   RegStorage reg_arg = wide ? RegStorage::MakeRegPair(reg_arg_low, reg_arg_high) : reg_arg_low;
147   // Check if we need to copy the arg to a different reg_class.
148   if (!RegClassMatches(reg_class, reg_arg)) {
149     if (wide) {
150       RegStorage new_regs = AllocTypedTempWide(false, reg_class);
151       OpRegCopyWide(new_regs, reg_arg);
152       reg_arg = new_regs;
153     } else {
154       RegStorage new_reg = AllocTypedTemp(false, reg_class);
155       OpRegCopy(new_reg, reg_arg);
156       reg_arg = new_reg;
157     }
158   }
159   return reg_arg;
160 }
161 
162 // TODO: simpilfy when 32-bit targets go hard float.
LoadArgDirect(int in_position,RegLocation rl_dest)163 void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) {
164   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
165   int offset = StackVisitor::GetOutVROffset(in_position, cu_->instruction_set);
166   if (cu_->instruction_set == kX86) {
167     /*
168      * When doing a call for x86, it moves the stack pointer in order to push return.
169      * Thus, we add another 4 bytes to figure out the out of caller (in of callee).
170      */
171     offset += sizeof(uint32_t);
172   }
173 
174   if (cu_->instruction_set == kX86_64) {
175     /*
176      * When doing a call for x86, it moves the stack pointer in order to push return.
177      * Thus, we add another 8 bytes to figure out the out of caller (in of callee).
178      */
179     offset += sizeof(uint64_t);
180   }
181 
182   if (!rl_dest.wide) {
183     RegStorage reg = GetArgMappingToPhysicalReg(in_position);
184     if (reg.Valid()) {
185       OpRegCopy(rl_dest.reg, reg);
186     } else {
187       Load32Disp(TargetPtrReg(kSp), offset, rl_dest.reg);
188     }
189   } else {
190     if (cu_->target64) {
191       RegStorage reg = GetArgMappingToPhysicalReg(in_position);
192       if (reg.Valid()) {
193         OpRegCopy(rl_dest.reg, reg);
194       } else {
195         LoadBaseDisp(TargetPtrReg(kSp), offset, rl_dest.reg, k64, kNotVolatile);
196       }
197       return;
198     }
199 
200     RegStorage reg_arg_low = GetArgMappingToPhysicalReg(in_position);
201     RegStorage reg_arg_high = GetArgMappingToPhysicalReg(in_position + 1);
202 
203     if (cu_->instruction_set == kX86) {
204       // Can't handle double split between reg & memory.  Flush reg half to memory.
205       if (rl_dest.reg.IsDouble() && (reg_arg_low.Valid() != reg_arg_high.Valid())) {
206         DCHECK(reg_arg_low.Valid());
207         DCHECK(!reg_arg_high.Valid());
208         Store32Disp(TargetPtrReg(kSp), offset, reg_arg_low);
209         reg_arg_low = RegStorage::InvalidReg();
210       }
211     }
212 
213     if (reg_arg_low.Valid() && reg_arg_high.Valid()) {
214       OpRegCopyWide(rl_dest.reg, RegStorage::MakeRegPair(reg_arg_low, reg_arg_high));
215     } else if (reg_arg_low.Valid() && !reg_arg_high.Valid()) {
216       OpRegCopy(rl_dest.reg, reg_arg_low);
217       int offset_high = offset + sizeof(uint32_t);
218       Load32Disp(TargetPtrReg(kSp), offset_high, rl_dest.reg.GetHigh());
219     } else if (!reg_arg_low.Valid() && reg_arg_high.Valid()) {
220       OpRegCopy(rl_dest.reg.GetHigh(), reg_arg_high);
221       Load32Disp(TargetPtrReg(kSp), offset, rl_dest.reg.GetLow());
222     } else {
223       LoadBaseDisp(TargetPtrReg(kSp), offset, rl_dest.reg, k64, kNotVolatile);
224     }
225   }
226 }
227 
GenSpecialIGet(MIR * mir,const InlineMethod & special)228 bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
229   // FastInstance() already checked by DexFileMethodInliner.
230   const InlineIGetIPutData& data = special.d.ifield_data;
231   if (data.method_is_static != 0u || data.object_arg != 0u) {
232     // The object is not "this" and has to be null-checked.
233     return false;
234   }
235 
236   bool wide = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE));
237   bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
238   OpSize size = LoadStoreOpSize(wide, ref);
239 
240   // Point of no return - no aborts after this
241   GenPrintLabel(mir);
242   LockArg(data.object_arg);
243   RegStorage reg_obj = LoadArg(data.object_arg, kRefReg);
244   RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
245   RegisterClass ret_reg_class = ShortyToRegClass(cu_->shorty[0]);
246   RegLocation rl_dest = wide ? GetReturnWide(ret_reg_class) : GetReturn(ret_reg_class);
247   RegStorage r_result = rl_dest.reg;
248   if (!RegClassMatches(reg_class, r_result)) {
249     r_result = wide ? AllocTypedTempWide(rl_dest.fp, reg_class)
250                     : AllocTypedTemp(rl_dest.fp, reg_class);
251   }
252   if (ref) {
253     LoadRefDisp(reg_obj, data.field_offset, r_result, data.is_volatile ? kVolatile : kNotVolatile);
254   } else {
255     LoadBaseDisp(reg_obj, data.field_offset, r_result, size, data.is_volatile ? kVolatile :
256         kNotVolatile);
257   }
258   if (r_result.NotExactlyEquals(rl_dest.reg)) {
259     if (wide) {
260       OpRegCopyWide(rl_dest.reg, r_result);
261     } else {
262       OpRegCopy(rl_dest.reg, r_result);
263     }
264   }
265   return true;
266 }
267 
GenSpecialIPut(MIR * mir,const InlineMethod & special)268 bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) {
269   // FastInstance() already checked by DexFileMethodInliner.
270   const InlineIGetIPutData& data = special.d.ifield_data;
271   if (data.method_is_static != 0u || data.object_arg != 0u) {
272     // The object is not "this" and has to be null-checked.
273     return false;
274   }
275   if (data.return_arg_plus1 != 0u) {
276     // The setter returns a method argument which we don't support here.
277     return false;
278   }
279 
280   bool wide = (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE));
281   bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
282   OpSize size = LoadStoreOpSize(wide, ref);
283 
284   // Point of no return - no aborts after this
285   GenPrintLabel(mir);
286   LockArg(data.object_arg);
287   LockArg(data.src_arg, wide);
288   RegStorage reg_obj = LoadArg(data.object_arg, kRefReg);
289   RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
290   RegStorage reg_src = LoadArg(data.src_arg, reg_class, wide);
291   if (ref) {
292     StoreRefDisp(reg_obj, data.field_offset, reg_src, data.is_volatile ? kVolatile : kNotVolatile);
293   } else {
294     StoreBaseDisp(reg_obj, data.field_offset, reg_src, size, data.is_volatile ? kVolatile :
295         kNotVolatile);
296   }
297   if (ref) {
298     MarkGCCard(reg_src, reg_obj);
299   }
300   return true;
301 }
302 
GenSpecialIdentity(MIR * mir,const InlineMethod & special)303 bool Mir2Lir::GenSpecialIdentity(MIR* mir, const InlineMethod& special) {
304   const InlineReturnArgData& data = special.d.return_data;
305   bool wide = (data.is_wide != 0u);
306 
307   // Point of no return - no aborts after this
308   GenPrintLabel(mir);
309   LockArg(data.arg, wide);
310   RegisterClass reg_class = ShortyToRegClass(cu_->shorty[0]);
311   RegLocation rl_dest = wide ? GetReturnWide(reg_class) : GetReturn(reg_class);
312   LoadArgDirect(data.arg, rl_dest);
313   return true;
314 }
315 
316 /*
317  * Special-case code generation for simple non-throwing leaf methods.
318  */
GenSpecialCase(BasicBlock * bb,MIR * mir,const InlineMethod & special)319 bool Mir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
320   DCHECK(special.flags & kInlineSpecial);
321   current_dalvik_offset_ = mir->offset;
322   MIR* return_mir = nullptr;
323   bool successful = false;
324 
325   switch (special.opcode) {
326     case kInlineOpNop:
327       successful = true;
328       DCHECK_EQ(mir->dalvikInsn.opcode, Instruction::RETURN_VOID);
329       return_mir = mir;
330       break;
331     case kInlineOpNonWideConst: {
332       successful = true;
333       RegLocation rl_dest = GetReturn(ShortyToRegClass(cu_->shorty[0]));
334       GenPrintLabel(mir);
335       LoadConstant(rl_dest.reg, static_cast<int>(special.d.data));
336       return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
337       break;
338     }
339     case kInlineOpReturnArg:
340       successful = GenSpecialIdentity(mir, special);
341       return_mir = mir;
342       break;
343     case kInlineOpIGet:
344       successful = GenSpecialIGet(mir, special);
345       return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
346       break;
347     case kInlineOpIPut:
348       successful = GenSpecialIPut(mir, special);
349       return_mir = bb->GetNextUnconditionalMir(mir_graph_, mir);
350       break;
351     default:
352       break;
353   }
354 
355   if (successful) {
356     if (kIsDebugBuild) {
357       // Clear unreachable catch entries.
358       mir_graph_->catches_.clear();
359     }
360 
361     // Handle verbosity for return MIR.
362     if (return_mir != nullptr) {
363       current_dalvik_offset_ = return_mir->offset;
364       // Not handling special identity case because it already generated code as part
365       // of the return. The label should have been added before any code was generated.
366       if (special.opcode != kInlineOpReturnArg) {
367         GenPrintLabel(return_mir);
368       }
369     }
370     GenSpecialExitSequence();
371 
372     core_spill_mask_ = 0;
373     num_core_spills_ = 0;
374     fp_spill_mask_ = 0;
375     num_fp_spills_ = 0;
376     frame_size_ = 0;
377     core_vmap_table_.clear();
378     fp_vmap_table_.clear();
379   }
380 
381   return successful;
382 }
383 
384 /*
385  * Target-independent code generation.  Use only high-level
386  * load/store utilities here, or target-dependent genXX() handlers
387  * when necessary.
388  */
CompileDalvikInstruction(MIR * mir,BasicBlock * bb,LIR * label_list)389 void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list) {
390   RegLocation rl_src[3];
391   RegLocation rl_dest = mir_graph_->GetBadLoc();
392   RegLocation rl_result = mir_graph_->GetBadLoc();
393   Instruction::Code opcode = mir->dalvikInsn.opcode;
394   int opt_flags = mir->optimization_flags;
395   uint32_t vB = mir->dalvikInsn.vB;
396   uint32_t vC = mir->dalvikInsn.vC;
397   DCHECK(CheckCorePoolSanity()) << PrettyMethod(cu_->method_idx, *cu_->dex_file) << " @ 0x:"
398                                 << std::hex << current_dalvik_offset_;
399 
400   // Prep Src and Dest locations.
401   int next_sreg = 0;
402   int next_loc = 0;
403   uint64_t attrs = MIRGraph::GetDataFlowAttributes(opcode);
404   rl_src[0] = rl_src[1] = rl_src[2] = mir_graph_->GetBadLoc();
405   if (attrs & DF_UA) {
406     if (attrs & DF_A_WIDE) {
407       rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
408       next_sreg+= 2;
409     } else {
410       rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
411       next_sreg++;
412     }
413   }
414   if (attrs & DF_UB) {
415     if (attrs & DF_B_WIDE) {
416       rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
417       next_sreg+= 2;
418     } else {
419       rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
420       next_sreg++;
421     }
422   }
423   if (attrs & DF_UC) {
424     if (attrs & DF_C_WIDE) {
425       rl_src[next_loc++] = mir_graph_->GetSrcWide(mir, next_sreg);
426     } else {
427       rl_src[next_loc++] = mir_graph_->GetSrc(mir, next_sreg);
428     }
429   }
430   if (attrs & DF_DA) {
431     if (attrs & DF_A_WIDE) {
432       rl_dest = mir_graph_->GetDestWide(mir);
433     } else {
434       rl_dest = mir_graph_->GetDest(mir);
435     }
436   }
437   switch (opcode) {
438     case Instruction::NOP:
439       break;
440 
441     case Instruction::MOVE_EXCEPTION:
442       GenMoveException(rl_dest);
443       break;
444 
445     case Instruction::RETURN_VOID:
446       if (((cu_->access_flags & kAccConstructor) != 0) &&
447           cu_->compiler_driver->RequiresConstructorBarrier(Thread::Current(), cu_->dex_file,
448                                                           cu_->class_def_idx)) {
449         GenMemBarrier(kStoreStore);
450       }
451       if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
452         GenSuspendTest(opt_flags);
453       }
454       break;
455 
456     case Instruction::RETURN_OBJECT:
457       DCHECK(rl_src[0].ref);
458       // Intentional fallthrough.
459     case Instruction::RETURN:
460       if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
461         GenSuspendTest(opt_flags);
462       }
463       DCHECK_EQ(LocToRegClass(rl_src[0]), ShortyToRegClass(cu_->shorty[0]));
464       StoreValue(GetReturn(LocToRegClass(rl_src[0])), rl_src[0]);
465       break;
466 
467     case Instruction::RETURN_WIDE:
468       if (!kLeafOptimization || !mir_graph_->MethodIsLeaf()) {
469         GenSuspendTest(opt_flags);
470       }
471       DCHECK_EQ(LocToRegClass(rl_src[0]), ShortyToRegClass(cu_->shorty[0]));
472       StoreValueWide(GetReturnWide(LocToRegClass(rl_src[0])), rl_src[0]);
473       break;
474 
475     case Instruction::MOVE_RESULT_WIDE:
476       if ((opt_flags & MIR_INLINED) != 0) {
477         break;  // Nop - combined w/ previous invoke.
478       }
479       StoreValueWide(rl_dest, GetReturnWide(LocToRegClass(rl_dest)));
480       break;
481 
482     case Instruction::MOVE_RESULT:
483     case Instruction::MOVE_RESULT_OBJECT:
484       if ((opt_flags & MIR_INLINED) != 0) {
485         break;  // Nop - combined w/ previous invoke.
486       }
487       StoreValue(rl_dest, GetReturn(LocToRegClass(rl_dest)));
488       break;
489 
490     case Instruction::MOVE:
491     case Instruction::MOVE_OBJECT:
492     case Instruction::MOVE_16:
493     case Instruction::MOVE_OBJECT_16:
494     case Instruction::MOVE_FROM16:
495     case Instruction::MOVE_OBJECT_FROM16:
496       StoreValue(rl_dest, rl_src[0]);
497       break;
498 
499     case Instruction::MOVE_WIDE:
500     case Instruction::MOVE_WIDE_16:
501     case Instruction::MOVE_WIDE_FROM16:
502       StoreValueWide(rl_dest, rl_src[0]);
503       break;
504 
505     case Instruction::CONST:
506     case Instruction::CONST_4:
507     case Instruction::CONST_16:
508       GenConst(rl_dest, vB);
509       break;
510 
511     case Instruction::CONST_HIGH16:
512       GenConst(rl_dest, vB << 16);
513       break;
514 
515     case Instruction::CONST_WIDE_16:
516     case Instruction::CONST_WIDE_32:
517       GenConstWide(rl_dest, static_cast<int64_t>(static_cast<int32_t>(vB)));
518       break;
519 
520     case Instruction::CONST_WIDE:
521       GenConstWide(rl_dest, mir->dalvikInsn.vB_wide);
522       break;
523 
524     case Instruction::CONST_WIDE_HIGH16:
525       rl_result = EvalLoc(rl_dest, kAnyReg, true);
526       LoadConstantWide(rl_result.reg, static_cast<int64_t>(vB) << 48);
527       StoreValueWide(rl_dest, rl_result);
528       break;
529 
530     case Instruction::MONITOR_ENTER:
531       GenMonitorEnter(opt_flags, rl_src[0]);
532       break;
533 
534     case Instruction::MONITOR_EXIT:
535       GenMonitorExit(opt_flags, rl_src[0]);
536       break;
537 
538     case Instruction::CHECK_CAST: {
539       GenCheckCast(mir->offset, vB, rl_src[0]);
540       break;
541     }
542     case Instruction::INSTANCE_OF:
543       GenInstanceof(vC, rl_dest, rl_src[0]);
544       break;
545 
546     case Instruction::NEW_INSTANCE:
547       GenNewInstance(vB, rl_dest);
548       break;
549 
550     case Instruction::THROW:
551       GenThrow(rl_src[0]);
552       break;
553 
554     case Instruction::ARRAY_LENGTH:
555       int len_offset;
556       len_offset = mirror::Array::LengthOffset().Int32Value();
557       rl_src[0] = LoadValue(rl_src[0], kRefReg);
558       GenNullCheck(rl_src[0].reg, opt_flags);
559       rl_result = EvalLoc(rl_dest, kCoreReg, true);
560       Load32Disp(rl_src[0].reg, len_offset, rl_result.reg);
561       MarkPossibleNullPointerException(opt_flags);
562       StoreValue(rl_dest, rl_result);
563       break;
564 
565     case Instruction::CONST_STRING:
566     case Instruction::CONST_STRING_JUMBO:
567       GenConstString(vB, rl_dest);
568       break;
569 
570     case Instruction::CONST_CLASS:
571       GenConstClass(vB, rl_dest);
572       break;
573 
574     case Instruction::FILL_ARRAY_DATA:
575       GenFillArrayData(vB, rl_src[0]);
576       break;
577 
578     case Instruction::FILLED_NEW_ARRAY:
579       GenFilledNewArray(mir_graph_->NewMemCallInfo(bb, mir, kStatic,
580                         false /* not range */));
581       break;
582 
583     case Instruction::FILLED_NEW_ARRAY_RANGE:
584       GenFilledNewArray(mir_graph_->NewMemCallInfo(bb, mir, kStatic,
585                         true /* range */));
586       break;
587 
588     case Instruction::NEW_ARRAY:
589       GenNewArray(vC, rl_dest, rl_src[0]);
590       break;
591 
592     case Instruction::GOTO:
593     case Instruction::GOTO_16:
594     case Instruction::GOTO_32:
595       if (mir_graph_->IsBackedge(bb, bb->taken) &&
596           (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, bb->taken))) {
597         GenSuspendTestAndBranch(opt_flags, &label_list[bb->taken]);
598       } else {
599         OpUnconditionalBranch(&label_list[bb->taken]);
600       }
601       break;
602 
603     case Instruction::PACKED_SWITCH:
604       GenPackedSwitch(mir, vB, rl_src[0]);
605       break;
606 
607     case Instruction::SPARSE_SWITCH:
608       GenSparseSwitch(mir, vB, rl_src[0]);
609       break;
610 
611     case Instruction::CMPL_FLOAT:
612     case Instruction::CMPG_FLOAT:
613     case Instruction::CMPL_DOUBLE:
614     case Instruction::CMPG_DOUBLE:
615       GenCmpFP(opcode, rl_dest, rl_src[0], rl_src[1]);
616       break;
617 
618     case Instruction::CMP_LONG:
619       GenCmpLong(rl_dest, rl_src[0], rl_src[1]);
620       break;
621 
622     case Instruction::IF_EQ:
623     case Instruction::IF_NE:
624     case Instruction::IF_LT:
625     case Instruction::IF_GE:
626     case Instruction::IF_GT:
627     case Instruction::IF_LE: {
628       LIR* taken = &label_list[bb->taken];
629       LIR* fall_through = &label_list[bb->fall_through];
630       // Result known at compile time?
631       if (rl_src[0].is_const && rl_src[1].is_const) {
632         bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
633                                        mir_graph_->ConstantValue(rl_src[1].orig_sreg));
634         BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
635         if (mir_graph_->IsBackedge(bb, target_id) &&
636             (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, target_id))) {
637           GenSuspendTest(opt_flags);
638         }
639         OpUnconditionalBranch(&label_list[target_id]);
640       } else {
641         if (mir_graph_->IsBackwardsBranch(bb) &&
642             (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, bb->taken) ||
643              !mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
644           GenSuspendTest(opt_flags);
645         }
646         GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken, fall_through);
647       }
648       break;
649       }
650 
651     case Instruction::IF_EQZ:
652     case Instruction::IF_NEZ:
653     case Instruction::IF_LTZ:
654     case Instruction::IF_GEZ:
655     case Instruction::IF_GTZ:
656     case Instruction::IF_LEZ: {
657       LIR* taken = &label_list[bb->taken];
658       LIR* fall_through = &label_list[bb->fall_through];
659       // Result known at compile time?
660       if (rl_src[0].is_const) {
661         bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
662         BasicBlockId target_id = is_taken ? bb->taken : bb->fall_through;
663         if (mir_graph_->IsBackedge(bb, target_id) &&
664             (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, target_id))) {
665           GenSuspendTest(opt_flags);
666         }
667         OpUnconditionalBranch(&label_list[target_id]);
668       } else {
669         if (mir_graph_->IsBackwardsBranch(bb) &&
670             (kLeafOptimization || !mir_graph_->HasSuspendTestBetween(bb, bb->taken) ||
671              !mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
672           GenSuspendTest(opt_flags);
673         }
674         GenCompareZeroAndBranch(opcode, rl_src[0], taken, fall_through);
675       }
676       break;
677       }
678 
679     case Instruction::AGET_WIDE:
680       GenArrayGet(opt_flags, k64, rl_src[0], rl_src[1], rl_dest, 3);
681       break;
682     case Instruction::AGET_OBJECT:
683       GenArrayGet(opt_flags, kReference, rl_src[0], rl_src[1], rl_dest, 2);
684       break;
685     case Instruction::AGET:
686       GenArrayGet(opt_flags, k32, rl_src[0], rl_src[1], rl_dest, 2);
687       break;
688     case Instruction::AGET_BOOLEAN:
689       GenArrayGet(opt_flags, kUnsignedByte, rl_src[0], rl_src[1], rl_dest, 0);
690       break;
691     case Instruction::AGET_BYTE:
692       GenArrayGet(opt_flags, kSignedByte, rl_src[0], rl_src[1], rl_dest, 0);
693       break;
694     case Instruction::AGET_CHAR:
695       GenArrayGet(opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
696       break;
697     case Instruction::AGET_SHORT:
698       GenArrayGet(opt_flags, kSignedHalf, rl_src[0], rl_src[1], rl_dest, 1);
699       break;
700     case Instruction::APUT_WIDE:
701       GenArrayPut(opt_flags, k64, rl_src[1], rl_src[2], rl_src[0], 3, false);
702       break;
703     case Instruction::APUT:
704       GenArrayPut(opt_flags, k32, rl_src[1], rl_src[2], rl_src[0], 2, false);
705       break;
706     case Instruction::APUT_OBJECT: {
707       bool is_null = mir_graph_->IsConstantNullRef(rl_src[0]);
708       bool is_safe = is_null;  // Always safe to store null.
709       if (!is_safe) {
710         // Check safety from verifier type information.
711         const DexCompilationUnit* unit = mir_graph_->GetCurrentDexCompilationUnit();
712         is_safe = cu_->compiler_driver->IsSafeCast(unit, mir->offset);
713       }
714       if (is_null || is_safe) {
715         // Store of constant null doesn't require an assignability test and can be generated inline
716         // without fixed register usage or a card mark.
717         GenArrayPut(opt_flags, kReference, rl_src[1], rl_src[2], rl_src[0], 2, !is_null);
718       } else {
719         GenArrayObjPut(opt_flags, rl_src[1], rl_src[2], rl_src[0]);
720       }
721       break;
722     }
723     case Instruction::APUT_SHORT:
724     case Instruction::APUT_CHAR:
725       GenArrayPut(opt_flags, kUnsignedHalf, rl_src[1], rl_src[2], rl_src[0], 1, false);
726       break;
727     case Instruction::APUT_BYTE:
728     case Instruction::APUT_BOOLEAN:
729       GenArrayPut(opt_flags, kUnsignedByte, rl_src[1], rl_src[2], rl_src[0], 0, false);
730       break;
731 
732     case Instruction::IGET_OBJECT:
733       GenIGet(mir, opt_flags, kReference, rl_dest, rl_src[0], false, true);
734       break;
735 
736     case Instruction::IGET_WIDE:
737       GenIGet(mir, opt_flags, k64, rl_dest, rl_src[0], true, false);
738       break;
739 
740     case Instruction::IGET:
741       GenIGet(mir, opt_flags, k32, rl_dest, rl_src[0], false, false);
742       break;
743 
744     case Instruction::IGET_CHAR:
745       GenIGet(mir, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
746       break;
747 
748     case Instruction::IGET_SHORT:
749       GenIGet(mir, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
750       break;
751 
752     case Instruction::IGET_BOOLEAN:
753     case Instruction::IGET_BYTE:
754       GenIGet(mir, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
755       break;
756 
757     case Instruction::IPUT_WIDE:
758       GenIPut(mir, opt_flags, k64, rl_src[0], rl_src[1], true, false);
759       break;
760 
761     case Instruction::IPUT_OBJECT:
762       GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1], false, true);
763       break;
764 
765     case Instruction::IPUT:
766       GenIPut(mir, opt_flags, k32, rl_src[0], rl_src[1], false, false);
767       break;
768 
769     case Instruction::IPUT_BOOLEAN:
770     case Instruction::IPUT_BYTE:
771       GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
772       break;
773 
774     case Instruction::IPUT_CHAR:
775       GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
776       break;
777 
778     case Instruction::IPUT_SHORT:
779       GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
780       break;
781 
782     case Instruction::SGET_OBJECT:
783       GenSget(mir, rl_dest, false, true);
784       break;
785     case Instruction::SGET:
786     case Instruction::SGET_BOOLEAN:
787     case Instruction::SGET_BYTE:
788     case Instruction::SGET_CHAR:
789     case Instruction::SGET_SHORT:
790       GenSget(mir, rl_dest, false, false);
791       break;
792 
793     case Instruction::SGET_WIDE:
794       GenSget(mir, rl_dest, true, false);
795       break;
796 
797     case Instruction::SPUT_OBJECT:
798       GenSput(mir, rl_src[0], false, true);
799       break;
800 
801     case Instruction::SPUT:
802     case Instruction::SPUT_BOOLEAN:
803     case Instruction::SPUT_BYTE:
804     case Instruction::SPUT_CHAR:
805     case Instruction::SPUT_SHORT:
806       GenSput(mir, rl_src[0], false, false);
807       break;
808 
809     case Instruction::SPUT_WIDE:
810       GenSput(mir, rl_src[0], true, false);
811       break;
812 
813     case Instruction::INVOKE_STATIC_RANGE:
814       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, true));
815       if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
816         // If the invocation is not inlined, we can assume there is already a
817         // suspend check at the return site
818         mir_graph_->AppendGenSuspendTestList(bb);
819       }
820       break;
821     case Instruction::INVOKE_STATIC:
822       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kStatic, false));
823       if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
824         mir_graph_->AppendGenSuspendTestList(bb);
825       }
826       break;
827 
828     case Instruction::INVOKE_DIRECT:
829       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, false));
830       if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
831         mir_graph_->AppendGenSuspendTestList(bb);
832       }
833       break;
834     case Instruction::INVOKE_DIRECT_RANGE:
835       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kDirect, true));
836       if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
837         mir_graph_->AppendGenSuspendTestList(bb);
838       }
839       break;
840 
841     case Instruction::INVOKE_VIRTUAL:
842       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, false));
843       if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
844         mir_graph_->AppendGenSuspendTestList(bb);
845       }
846       break;
847     case Instruction::INVOKE_VIRTUAL_RANGE:
848       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kVirtual, true));
849       if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
850         mir_graph_->AppendGenSuspendTestList(bb);
851       }
852       break;
853 
854     case Instruction::INVOKE_SUPER:
855       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, false));
856       if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
857         mir_graph_->AppendGenSuspendTestList(bb);
858       }
859       break;
860     case Instruction::INVOKE_SUPER_RANGE:
861       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kSuper, true));
862       if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
863         mir_graph_->AppendGenSuspendTestList(bb);
864       }
865       break;
866 
867     case Instruction::INVOKE_INTERFACE:
868       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, false));
869       if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
870         mir_graph_->AppendGenSuspendTestList(bb);
871       }
872       break;
873     case Instruction::INVOKE_INTERFACE_RANGE:
874       GenInvoke(mir_graph_->NewMemCallInfo(bb, mir, kInterface, true));
875       if (!kLeafOptimization && (opt_flags & MIR_INLINED) == 0) {
876         mir_graph_->AppendGenSuspendTestList(bb);
877       }
878       break;
879 
880     case Instruction::NEG_INT:
881     case Instruction::NOT_INT:
882       GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[0]);
883       break;
884 
885     case Instruction::NEG_LONG:
886     case Instruction::NOT_LONG:
887       GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[0]);
888       break;
889 
890     case Instruction::NEG_FLOAT:
891       GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[0]);
892       break;
893 
894     case Instruction::NEG_DOUBLE:
895       GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[0]);
896       break;
897 
898     case Instruction::INT_TO_LONG:
899       GenIntToLong(rl_dest, rl_src[0]);
900       break;
901 
902     case Instruction::LONG_TO_INT:
903       rl_src[0] = UpdateLocWide(rl_src[0]);
904       rl_src[0] = NarrowRegLoc(rl_src[0]);
905       StoreValue(rl_dest, rl_src[0]);
906       break;
907 
908     case Instruction::INT_TO_BYTE:
909     case Instruction::INT_TO_SHORT:
910     case Instruction::INT_TO_CHAR:
911       GenIntNarrowing(opcode, rl_dest, rl_src[0]);
912       break;
913 
914     case Instruction::INT_TO_FLOAT:
915     case Instruction::INT_TO_DOUBLE:
916     case Instruction::LONG_TO_FLOAT:
917     case Instruction::LONG_TO_DOUBLE:
918     case Instruction::FLOAT_TO_INT:
919     case Instruction::FLOAT_TO_LONG:
920     case Instruction::FLOAT_TO_DOUBLE:
921     case Instruction::DOUBLE_TO_INT:
922     case Instruction::DOUBLE_TO_LONG:
923     case Instruction::DOUBLE_TO_FLOAT:
924       GenConversion(opcode, rl_dest, rl_src[0]);
925       break;
926 
927 
928     case Instruction::ADD_INT:
929     case Instruction::ADD_INT_2ADDR:
930     case Instruction::MUL_INT:
931     case Instruction::MUL_INT_2ADDR:
932     case Instruction::AND_INT:
933     case Instruction::AND_INT_2ADDR:
934     case Instruction::OR_INT:
935     case Instruction::OR_INT_2ADDR:
936     case Instruction::XOR_INT:
937     case Instruction::XOR_INT_2ADDR:
938       if (rl_src[0].is_const &&
939           InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[0]), opcode)) {
940         GenArithOpIntLit(opcode, rl_dest, rl_src[1],
941                              mir_graph_->ConstantValue(rl_src[0].orig_sreg));
942       } else if (rl_src[1].is_const &&
943                  InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
944         GenArithOpIntLit(opcode, rl_dest, rl_src[0],
945                              mir_graph_->ConstantValue(rl_src[1].orig_sreg));
946       } else {
947         GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
948       }
949       break;
950 
951     case Instruction::SUB_INT:
952     case Instruction::SUB_INT_2ADDR:
953     case Instruction::DIV_INT:
954     case Instruction::DIV_INT_2ADDR:
955     case Instruction::REM_INT:
956     case Instruction::REM_INT_2ADDR:
957     case Instruction::SHL_INT:
958     case Instruction::SHL_INT_2ADDR:
959     case Instruction::SHR_INT:
960     case Instruction::SHR_INT_2ADDR:
961     case Instruction::USHR_INT:
962     case Instruction::USHR_INT_2ADDR:
963       if (rl_src[1].is_const &&
964           InexpensiveConstantInt(mir_graph_->ConstantValue(rl_src[1]), opcode)) {
965         GenArithOpIntLit(opcode, rl_dest, rl_src[0], mir_graph_->ConstantValue(rl_src[1]));
966       } else {
967         GenArithOpInt(opcode, rl_dest, rl_src[0], rl_src[1]);
968       }
969       break;
970 
971     case Instruction::ADD_LONG:
972     case Instruction::SUB_LONG:
973     case Instruction::AND_LONG:
974     case Instruction::OR_LONG:
975     case Instruction::XOR_LONG:
976     case Instruction::ADD_LONG_2ADDR:
977     case Instruction::SUB_LONG_2ADDR:
978     case Instruction::AND_LONG_2ADDR:
979     case Instruction::OR_LONG_2ADDR:
980     case Instruction::XOR_LONG_2ADDR:
981       if (rl_src[0].is_const || rl_src[1].is_const) {
982         GenArithImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
983         break;
984       }
985       // Note: intentional fallthrough.
986 
987     case Instruction::MUL_LONG:
988     case Instruction::DIV_LONG:
989     case Instruction::REM_LONG:
990     case Instruction::MUL_LONG_2ADDR:
991     case Instruction::DIV_LONG_2ADDR:
992     case Instruction::REM_LONG_2ADDR:
993       GenArithOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
994       break;
995 
996     case Instruction::SHL_LONG:
997     case Instruction::SHR_LONG:
998     case Instruction::USHR_LONG:
999     case Instruction::SHL_LONG_2ADDR:
1000     case Instruction::SHR_LONG_2ADDR:
1001     case Instruction::USHR_LONG_2ADDR:
1002       if (rl_src[1].is_const) {
1003         GenShiftImmOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
1004       } else {
1005         GenShiftOpLong(opcode, rl_dest, rl_src[0], rl_src[1]);
1006       }
1007       break;
1008 
1009     case Instruction::ADD_FLOAT:
1010     case Instruction::SUB_FLOAT:
1011     case Instruction::MUL_FLOAT:
1012     case Instruction::DIV_FLOAT:
1013     case Instruction::REM_FLOAT:
1014     case Instruction::ADD_FLOAT_2ADDR:
1015     case Instruction::SUB_FLOAT_2ADDR:
1016     case Instruction::MUL_FLOAT_2ADDR:
1017     case Instruction::DIV_FLOAT_2ADDR:
1018     case Instruction::REM_FLOAT_2ADDR:
1019       GenArithOpFloat(opcode, rl_dest, rl_src[0], rl_src[1]);
1020       break;
1021 
1022     case Instruction::ADD_DOUBLE:
1023     case Instruction::SUB_DOUBLE:
1024     case Instruction::MUL_DOUBLE:
1025     case Instruction::DIV_DOUBLE:
1026     case Instruction::REM_DOUBLE:
1027     case Instruction::ADD_DOUBLE_2ADDR:
1028     case Instruction::SUB_DOUBLE_2ADDR:
1029     case Instruction::MUL_DOUBLE_2ADDR:
1030     case Instruction::DIV_DOUBLE_2ADDR:
1031     case Instruction::REM_DOUBLE_2ADDR:
1032       GenArithOpDouble(opcode, rl_dest, rl_src[0], rl_src[1]);
1033       break;
1034 
1035     case Instruction::RSUB_INT:
1036     case Instruction::ADD_INT_LIT16:
1037     case Instruction::MUL_INT_LIT16:
1038     case Instruction::DIV_INT_LIT16:
1039     case Instruction::REM_INT_LIT16:
1040     case Instruction::AND_INT_LIT16:
1041     case Instruction::OR_INT_LIT16:
1042     case Instruction::XOR_INT_LIT16:
1043     case Instruction::ADD_INT_LIT8:
1044     case Instruction::RSUB_INT_LIT8:
1045     case Instruction::MUL_INT_LIT8:
1046     case Instruction::DIV_INT_LIT8:
1047     case Instruction::REM_INT_LIT8:
1048     case Instruction::AND_INT_LIT8:
1049     case Instruction::OR_INT_LIT8:
1050     case Instruction::XOR_INT_LIT8:
1051     case Instruction::SHL_INT_LIT8:
1052     case Instruction::SHR_INT_LIT8:
1053     case Instruction::USHR_INT_LIT8:
1054       GenArithOpIntLit(opcode, rl_dest, rl_src[0], vC);
1055       break;
1056 
1057     default:
1058       LOG(FATAL) << "Unexpected opcode: " << opcode;
1059   }
1060   DCHECK(CheckCorePoolSanity());
1061 }  // NOLINT(readability/fn_size)
1062 
1063 // Process extended MIR instructions
HandleExtendedMethodMIR(BasicBlock * bb,MIR * mir)1064 void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
1065   switch (static_cast<ExtendedMIROpcode>(mir->dalvikInsn.opcode)) {
1066     case kMirOpCopy: {
1067       RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
1068       RegLocation rl_dest = mir_graph_->GetDest(mir);
1069       StoreValue(rl_dest, rl_src);
1070       break;
1071     }
1072     case kMirOpFusedCmplFloat:
1073       GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, false /*double*/);
1074       break;
1075     case kMirOpFusedCmpgFloat:
1076       GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, false /*double*/);
1077       break;
1078     case kMirOpFusedCmplDouble:
1079       GenFusedFPCmpBranch(bb, mir, false /*gt bias*/, true /*double*/);
1080       break;
1081     case kMirOpFusedCmpgDouble:
1082       GenFusedFPCmpBranch(bb, mir, true /*gt bias*/, true /*double*/);
1083       break;
1084     case kMirOpFusedCmpLong:
1085       GenFusedLongCmpBranch(bb, mir);
1086       break;
1087     case kMirOpSelect:
1088       GenSelect(bb, mir);
1089       break;
1090     case kMirOpPhi:
1091     case kMirOpNop:
1092     case kMirOpNullCheck:
1093     case kMirOpRangeCheck:
1094     case kMirOpDivZeroCheck:
1095     case kMirOpCheck:
1096     case kMirOpCheckPart2:
1097       // Ignore these known opcodes
1098       break;
1099     default:
1100       // Give the backends a chance to handle unknown extended MIR opcodes.
1101       GenMachineSpecificExtendedMethodMIR(bb, mir);
1102       break;
1103   }
1104 }
1105 
GenPrintLabel(MIR * mir)1106 void Mir2Lir::GenPrintLabel(MIR* mir) {
1107   // Mark the beginning of a Dalvik instruction for line tracking.
1108   if (cu_->verbose) {
1109      char* inst_str = mir_graph_->GetDalvikDisassembly(mir);
1110      MarkBoundary(mir->offset, inst_str);
1111   }
1112 }
1113 
1114 // Handle the content in each basic block.
MethodBlockCodeGen(BasicBlock * bb)1115 bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
1116   if (bb->block_type == kDead) return false;
1117   current_dalvik_offset_ = bb->start_offset;
1118   MIR* mir;
1119   int block_id = bb->id;
1120 
1121   block_label_list_[block_id].operands[0] = bb->start_offset;
1122 
1123   // Insert the block label.
1124   block_label_list_[block_id].opcode = kPseudoNormalBlockLabel;
1125   block_label_list_[block_id].flags.fixup = kFixupLabel;
1126   AppendLIR(&block_label_list_[block_id]);
1127 
1128   LIR* head_lir = NULL;
1129 
1130   // If this is a catch block, export the start address.
1131   if (bb->catch_entry) {
1132     head_lir = NewLIR0(kPseudoExportedPC);
1133   }
1134 
1135   // Free temp registers and reset redundant store tracking.
1136   ClobberAllTemps();
1137 
1138   if (bb->block_type == kEntryBlock) {
1139     ResetRegPool();
1140     int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
1141     GenEntrySequence(&mir_graph_->reg_location_[start_vreg],
1142                          mir_graph_->reg_location_[mir_graph_->GetMethodSReg()]);
1143   } else if (bb->block_type == kExitBlock) {
1144     ResetRegPool();
1145     GenExitSequence();
1146   }
1147 
1148   for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
1149     ResetRegPool();
1150     if (cu_->disable_opt & (1 << kTrackLiveTemps)) {
1151       ClobberAllTemps();
1152       // Reset temp allocation to minimize differences when A/B testing.
1153       reg_pool_->ResetNextTemp();
1154     }
1155 
1156     if (cu_->disable_opt & (1 << kSuppressLoads)) {
1157       ResetDefTracking();
1158     }
1159 
1160     // Reset temp tracking sanity check.
1161     if (kIsDebugBuild) {
1162       live_sreg_ = INVALID_SREG;
1163     }
1164 
1165     current_dalvik_offset_ = mir->offset;
1166     int opcode = mir->dalvikInsn.opcode;
1167 
1168     GenPrintLabel(mir);
1169 
1170     // Remember the first LIR for this block.
1171     if (head_lir == NULL) {
1172       head_lir = &block_label_list_[bb->id];
1173       // Set the first label as a scheduling barrier.
1174       DCHECK(!head_lir->flags.use_def_invalid);
1175       head_lir->u.m.def_mask = &kEncodeAll;
1176     }
1177 
1178     if (opcode == kMirOpCheck) {
1179       // Combine check and work halves of throwing instruction.
1180       MIR* work_half = mir->meta.throw_insn;
1181       mir->dalvikInsn.opcode = work_half->dalvikInsn.opcode;
1182       mir->meta = work_half->meta;  // Whatever the work_half had, we need to copy it.
1183       opcode = work_half->dalvikInsn.opcode;
1184       SSARepresentation* ssa_rep = work_half->ssa_rep;
1185       work_half->ssa_rep = mir->ssa_rep;
1186       mir->ssa_rep = ssa_rep;
1187       work_half->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpCheckPart2);
1188       work_half->meta.throw_insn = mir;
1189     }
1190 
1191     if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
1192       HandleExtendedMethodMIR(bb, mir);
1193       continue;
1194     }
1195 
1196     CompileDalvikInstruction(mir, bb, block_label_list_);
1197   }
1198 
1199   if (head_lir) {
1200     // Eliminate redundant loads/stores and delay stores into later slots.
1201     ApplyLocalOptimizations(head_lir, last_lir_insn_);
1202   }
1203   return false;
1204 }
1205 
SpecialMIR2LIR(const InlineMethod & special)1206 bool Mir2Lir::SpecialMIR2LIR(const InlineMethod& special) {
1207   cu_->NewTimingSplit("SpecialMIR2LIR");
1208   // Find the first DalvikByteCode block.
1209   int num_reachable_blocks = mir_graph_->GetNumReachableBlocks();
1210   BasicBlock*bb = NULL;
1211   for (int idx = 0; idx < num_reachable_blocks; idx++) {
1212     // TODO: no direct access of growable lists.
1213     int dfs_index = mir_graph_->GetDfsOrder()->Get(idx);
1214     bb = mir_graph_->GetBasicBlock(dfs_index);
1215     if (bb->block_type == kDalvikByteCode) {
1216       break;
1217     }
1218   }
1219   if (bb == NULL) {
1220     return false;
1221   }
1222   DCHECK_EQ(bb->start_offset, 0);
1223   DCHECK(bb->first_mir_insn != NULL);
1224 
1225   // Get the first instruction.
1226   MIR* mir = bb->first_mir_insn;
1227 
1228   // Free temp registers and reset redundant store tracking.
1229   ResetRegPool();
1230   ResetDefTracking();
1231   ClobberAllTemps();
1232 
1233   return GenSpecialCase(bb, mir, special);
1234 }
1235 
MethodMIR2LIR()1236 void Mir2Lir::MethodMIR2LIR() {
1237   cu_->NewTimingSplit("MIR2LIR");
1238 
1239   // Hold the labels of each block.
1240   block_label_list_ =
1241       static_cast<LIR*>(arena_->Alloc(sizeof(LIR) * mir_graph_->GetNumBlocks(),
1242                                       kArenaAllocLIR));
1243 
1244   PreOrderDfsIterator iter(mir_graph_);
1245   BasicBlock* curr_bb = iter.Next();
1246   BasicBlock* next_bb = iter.Next();
1247   while (curr_bb != NULL) {
1248     MethodBlockCodeGen(curr_bb);
1249     // If the fall_through block is no longer laid out consecutively, drop in a branch.
1250     BasicBlock* curr_bb_fall_through = mir_graph_->GetBasicBlock(curr_bb->fall_through);
1251     if ((curr_bb_fall_through != NULL) && (curr_bb_fall_through != next_bb)) {
1252       OpUnconditionalBranch(&block_label_list_[curr_bb->fall_through]);
1253     }
1254     curr_bb = next_bb;
1255     do {
1256       next_bb = iter.Next();
1257     } while ((next_bb != NULL) && (next_bb->block_type == kDead));
1258   }
1259   HandleSlowPaths();
1260 }
1261 
1262 //
1263 // LIR Slow Path
1264 //
1265 
GenerateTargetLabel(int opcode)1266 LIR* Mir2Lir::LIRSlowPath::GenerateTargetLabel(int opcode) {
1267   m2l_->SetCurrentDexPc(current_dex_pc_);
1268   LIR* target = m2l_->NewLIR0(opcode);
1269   fromfast_->target = target;
1270   return target;
1271 }
1272 
1273 
CheckRegStorageImpl(RegStorage rs,WidenessCheck wide,RefCheck ref,FPCheck fp,bool fail,bool report) const1274 void Mir2Lir::CheckRegStorageImpl(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp,
1275                                   bool fail, bool report)
1276     const  {
1277   if (rs.Valid()) {
1278     if (ref == RefCheck::kCheckRef) {
1279       if (cu_->target64 && !rs.Is64Bit()) {
1280         if (fail) {
1281           CHECK(false) << "Reg storage not 64b for ref.";
1282         } else if (report) {
1283           LOG(WARNING) << "Reg storage not 64b for ref.";
1284         }
1285       }
1286     }
1287     if (wide == WidenessCheck::kCheckWide) {
1288       if (!rs.Is64Bit()) {
1289         if (fail) {
1290           CHECK(false) << "Reg storage not 64b for wide.";
1291         } else if (report) {
1292           LOG(WARNING) << "Reg storage not 64b for wide.";
1293         }
1294       }
1295     }
1296     // A tighter check would be nice, but for now soft-float will not check float at all.
1297     if (fp == FPCheck::kCheckFP && cu_->instruction_set != kArm) {
1298       if (!rs.IsFloat()) {
1299         if (fail) {
1300           CHECK(false) << "Reg storage not float for fp.";
1301         } else if (report) {
1302           LOG(WARNING) << "Reg storage not float for fp.";
1303         }
1304       }
1305     } else if (fp == FPCheck::kCheckNotFP) {
1306       if (rs.IsFloat()) {
1307         if (fail) {
1308           CHECK(false) << "Reg storage float for not-fp.";
1309         } else if (report) {
1310           LOG(WARNING) << "Reg storage float for not-fp.";
1311         }
1312       }
1313     }
1314   }
1315 }
1316 
CheckRegLocationImpl(RegLocation rl,bool fail,bool report) const1317 void Mir2Lir::CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const {
1318   // Regrettably can't use the fp part of rl, as that is not really indicative of where a value
1319   // will be stored.
1320   CheckRegStorageImpl(rl.reg, rl.wide ? WidenessCheck::kCheckWide : WidenessCheck::kCheckNotWide,
1321       rl.ref ? RefCheck::kCheckRef : RefCheck::kCheckNotRef, FPCheck::kIgnoreFP, fail, report);
1322 }
1323 
GetInstructionOffset(LIR * lir)1324 size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
1325   UNIMPLEMENTED(FATAL) << "Unsuppored GetInstructionOffset()";
1326   return 0;
1327 }
1328 
1329 }  // namespace art
1330