1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "mir_to_lir-inl.h"
18 
19 #include "arm/codegen_arm.h"
20 #include "dex/compiler_ir.h"
21 #include "dex/dex_flags.h"
22 #include "dex/mir_graph.h"
23 #include "dex/quick/dex_file_method_inliner.h"
24 #include "dex/quick/dex_file_to_method_inliner_map.h"
25 #include "dex_file-inl.h"
26 #include "driver/compiler_driver.h"
27 #include "driver/compiler_options.h"
28 #include "entrypoints/quick/quick_entrypoints.h"
29 #include "invoke_type.h"
30 #include "mirror/array.h"
31 #include "mirror/class-inl.h"
32 #include "mirror/dex_cache.h"
33 #include "mirror/object_array-inl.h"
34 #include "mirror/string.h"
35 #include "scoped_thread_state_change.h"
36 
37 namespace art {
38 
39 // Shortcuts to repeatedly used long types.
40 typedef mirror::ObjectArray<mirror::Object> ObjArray;
41 
42 /*
43  * This source files contains "gen" codegen routines that should
44  * be applicable to most targets.  Only mid-level support utilities
45  * and "op" calls may be used here.
46  */
47 
AddIntrinsicSlowPath(CallInfo * info,LIR * branch,LIR * resume)48 void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) {
49   class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath {
50    public:
51     IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info_in, LIR* branch_in, LIR* resume_in)
52         : LIRSlowPath(m2l, branch_in, resume_in), info_(info_in) {
53       DCHECK_EQ(info_in->offset, current_dex_pc_);
54     }
55 
56     void Compile() {
57       m2l_->ResetRegPool();
58       m2l_->ResetDefTracking();
59       GenerateTargetLabel(kPseudoIntrinsicRetry);
60       // NOTE: GenInvokeNoInline() handles MarkSafepointPC.
61       m2l_->GenInvokeNoInline(info_);
62       if (cont_ != nullptr) {
63         m2l_->OpUnconditionalBranch(cont_);
64       }
65     }
66 
67    private:
68     CallInfo* const info_;
69   };
70 
71   AddSlowPath(new (arena_) IntrinsicSlowPathPath(this, info, branch, resume));
72 }
73 
74 /*
75  * To save scheduling time, helper calls are broken into two parts: generation of
76  * the helper target address, and the actual call to the helper.  Because x86
77  * has a memory call operation, part 1 is a NOP for x86.  For other targets,
78  * load arguments between the two parts.
79  */
80 // template <size_t pointer_size>
CallHelperSetup(QuickEntrypointEnum trampoline)81 RegStorage Mir2Lir::CallHelperSetup(QuickEntrypointEnum trampoline) {
82   if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
83     return RegStorage::InvalidReg();
84   } else {
85     return LoadHelper(trampoline);
86   }
87 }
88 
CallHelper(RegStorage r_tgt,QuickEntrypointEnum trampoline,bool safepoint_pc,bool use_link)89 LIR* Mir2Lir::CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc,
90                          bool use_link) {
91   LIR* call_inst = InvokeTrampoline(use_link ? kOpBlx : kOpBx, r_tgt, trampoline);
92 
93   if (r_tgt.Valid()) {
94     FreeTemp(r_tgt);
95   }
96 
97   if (safepoint_pc) {
98     MarkSafepointPC(call_inst);
99   }
100   return call_inst;
101 }
102 
CallRuntimeHelper(QuickEntrypointEnum trampoline,bool safepoint_pc)103 void Mir2Lir::CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc) {
104   RegStorage r_tgt = CallHelperSetup(trampoline);
105   ClobberCallerSave();
106   CallHelper(r_tgt, trampoline, safepoint_pc);
107 }
108 
CallRuntimeHelperImm(QuickEntrypointEnum trampoline,int arg0,bool safepoint_pc)109 void Mir2Lir::CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc) {
110   RegStorage r_tgt = CallHelperSetup(trampoline);
111   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
112   ClobberCallerSave();
113   CallHelper(r_tgt, trampoline, safepoint_pc);
114 }
115 
CallRuntimeHelperReg(QuickEntrypointEnum trampoline,RegStorage arg0,bool safepoint_pc)116 void Mir2Lir::CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0,
117                                    bool safepoint_pc) {
118   RegStorage r_tgt = CallHelperSetup(trampoline);
119   OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
120   ClobberCallerSave();
121   CallHelper(r_tgt, trampoline, safepoint_pc);
122 }
123 
CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline,RegLocation arg0,bool safepoint_pc)124 void Mir2Lir::CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
125                                            bool safepoint_pc) {
126   RegStorage r_tgt = CallHelperSetup(trampoline);
127   if (arg0.wide == 0) {
128     LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, arg0));
129   } else {
130     LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
131   }
132   ClobberCallerSave();
133   CallHelper(r_tgt, trampoline, safepoint_pc);
134 }
135 
CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline,int arg0,int arg1,bool safepoint_pc)136 void Mir2Lir::CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1,
137                                       bool safepoint_pc) {
138   RegStorage r_tgt = CallHelperSetup(trampoline);
139   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
140   LoadConstant(TargetReg(kArg1, kNotWide), arg1);
141   ClobberCallerSave();
142   CallHelper(r_tgt, trampoline, safepoint_pc);
143 }
144 
CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline,int arg0,RegLocation arg1,bool safepoint_pc)145 void Mir2Lir::CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0,
146                                               RegLocation arg1, bool safepoint_pc) {
147   RegStorage r_tgt = CallHelperSetup(trampoline);
148   if (arg1.wide == 0) {
149     LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
150   } else {
151     RegStorage r_tmp = TargetReg(cu_->instruction_set == kMips ? kArg2 : kArg1, kWide);
152     LoadValueDirectWideFixed(arg1, r_tmp);
153   }
154   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
155   ClobberCallerSave();
156   CallHelper(r_tgt, trampoline, safepoint_pc);
157 }
158 
CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline,RegLocation arg0,int arg1,bool safepoint_pc)159 void Mir2Lir::CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0,
160                                               int arg1, bool safepoint_pc) {
161   RegStorage r_tgt = CallHelperSetup(trampoline);
162   DCHECK(!arg0.wide);
163   LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
164   LoadConstant(TargetReg(kArg1, kNotWide), arg1);
165   ClobberCallerSave();
166   CallHelper(r_tgt, trampoline, safepoint_pc);
167 }
168 
CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline,int arg0,RegStorage arg1,bool safepoint_pc)169 void Mir2Lir::CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1,
170                                       bool safepoint_pc) {
171   RegStorage r_tgt = CallHelperSetup(trampoline);
172   OpRegCopy(TargetReg(kArg1, arg1.GetWideKind()), arg1);
173   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
174   ClobberCallerSave();
175   CallHelper(r_tgt, trampoline, safepoint_pc);
176 }
177 
CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline,RegStorage arg0,int arg1,bool safepoint_pc)178 void Mir2Lir::CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1,
179                                       bool safepoint_pc) {
180   RegStorage r_tgt = CallHelperSetup(trampoline);
181   OpRegCopy(TargetReg(kArg0, arg0.GetWideKind()), arg0);
182   LoadConstant(TargetReg(kArg1, kNotWide), arg1);
183   ClobberCallerSave();
184   CallHelper(r_tgt, trampoline, safepoint_pc);
185 }
186 
CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline,int arg0,bool safepoint_pc)187 void Mir2Lir::CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0,
188                                          bool safepoint_pc) {
189   RegStorage r_tgt = CallHelperSetup(trampoline);
190   LoadCurrMethodDirect(TargetReg(kArg1, kRef));
191   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
192   ClobberCallerSave();
193   CallHelper(r_tgt, trampoline, safepoint_pc);
194 }
195 
CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline,RegStorage arg0,bool safepoint_pc)196 void Mir2Lir::CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
197                                          bool safepoint_pc) {
198   RegStorage r_tgt = CallHelperSetup(trampoline);
199   DCHECK(!IsSameReg(TargetReg(kArg1, arg0.GetWideKind()), arg0));
200   RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
201   if (r_tmp.NotExactlyEquals(arg0)) {
202     OpRegCopy(r_tmp, arg0);
203   }
204   LoadCurrMethodDirect(TargetReg(kArg1, kRef));
205   ClobberCallerSave();
206   CallHelper(r_tgt, trampoline, safepoint_pc);
207 }
208 
CallRuntimeHelperRegRegLocationMethod(QuickEntrypointEnum trampoline,RegStorage arg0,RegLocation arg1,bool safepoint_pc)209 void Mir2Lir::CallRuntimeHelperRegRegLocationMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
210                                                     RegLocation arg1, bool safepoint_pc) {
211   RegStorage r_tgt = CallHelperSetup(trampoline);
212   DCHECK(!IsSameReg(TargetReg(kArg2, arg0.GetWideKind()), arg0));
213   RegStorage r_tmp = TargetReg(kArg0, arg0.GetWideKind());
214   if (r_tmp.NotExactlyEquals(arg0)) {
215     OpRegCopy(r_tmp, arg0);
216   }
217   LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
218   LoadCurrMethodDirect(TargetReg(kArg2, kRef));
219   ClobberCallerSave();
220   CallHelper(r_tgt, trampoline, safepoint_pc);
221 }
222 
CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline,RegLocation arg0,RegLocation arg1,bool safepoint_pc)223 void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline,
224                                                       RegLocation arg0, RegLocation arg1,
225                                                       bool safepoint_pc) {
226   RegStorage r_tgt = CallHelperSetup(trampoline);
227   if (cu_->instruction_set == kArm64 || cu_->instruction_set == kMips64 ||
228       cu_->instruction_set == kX86_64) {
229     RegStorage arg0_reg = TargetReg((arg0.fp) ? kFArg0 : kArg0, arg0);
230 
231     RegStorage arg1_reg;
232     if (arg1.fp == arg0.fp) {
233       arg1_reg = TargetReg((arg1.fp) ? kFArg1 : kArg1, arg1);
234     } else {
235       arg1_reg = TargetReg((arg1.fp) ? kFArg0 : kArg0, arg1);
236     }
237 
238     if (arg0.wide == 0) {
239       LoadValueDirectFixed(arg0, arg0_reg);
240     } else {
241       LoadValueDirectWideFixed(arg0, arg0_reg);
242     }
243 
244     if (arg1.wide == 0) {
245       LoadValueDirectFixed(arg1, arg1_reg);
246     } else {
247       LoadValueDirectWideFixed(arg1, arg1_reg);
248     }
249   } else {
250     DCHECK(!cu_->target64);
251     if (arg0.wide == 0) {
252       LoadValueDirectFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kNotWide));
253       if (arg1.wide == 0) {
254         // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
255         if (cu_->instruction_set == kMips) {
256           LoadValueDirectFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg1, kNotWide));
257         } else {
258           LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kNotWide));
259         }
260       } else {
261         // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
262         if (cu_->instruction_set == kMips) {
263           LoadValueDirectWideFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kWide));
264         } else {
265           LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg1 : kArg1, kWide));
266         }
267       }
268     } else {
269       LoadValueDirectWideFixed(arg0, TargetReg(arg0.fp ? kFArg0 : kArg0, kWide));
270       if (arg1.wide == 0) {
271         // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
272         if (cu_->instruction_set == kMips) {
273           LoadValueDirectFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kNotWide));
274         } else {
275           LoadValueDirectFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kNotWide));
276         }
277       } else {
278         // For Mips, when the 1st arg is integral, then remaining arg are passed in core reg.
279         if (cu_->instruction_set == kMips) {
280           LoadValueDirectWideFixed(arg1, TargetReg((arg1.fp && arg0.fp) ? kFArg2 : kArg2, kWide));
281         } else {
282           LoadValueDirectWideFixed(arg1, TargetReg(arg1.fp ? kFArg2 : kArg2, kWide));
283         }
284       }
285     }
286   }
287   ClobberCallerSave();
288   CallHelper(r_tgt, trampoline, safepoint_pc);
289 }
290 
CopyToArgumentRegs(RegStorage arg0,RegStorage arg1)291 void Mir2Lir::CopyToArgumentRegs(RegStorage arg0, RegStorage arg1) {
292   WideKind arg0_kind = arg0.GetWideKind();
293   WideKind arg1_kind = arg1.GetWideKind();
294   if (IsSameReg(arg1, TargetReg(kArg0, arg1_kind))) {
295     if (IsSameReg(arg0, TargetReg(kArg1, arg0_kind))) {
296       // Swap kArg0 and kArg1 with kArg2 as temp.
297       OpRegCopy(TargetReg(kArg2, arg1_kind), arg1);
298       OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
299       OpRegCopy(TargetReg(kArg1, arg1_kind), TargetReg(kArg2, arg1_kind));
300     } else {
301       OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
302       OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
303     }
304   } else {
305     OpRegCopy(TargetReg(kArg0, arg0_kind), arg0);
306     OpRegCopy(TargetReg(kArg1, arg1_kind), arg1);
307   }
308 }
309 
CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline,RegStorage arg0,RegStorage arg1,bool safepoint_pc)310 void Mir2Lir::CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0,
311                                       RegStorage arg1, bool safepoint_pc) {
312   RegStorage r_tgt = CallHelperSetup(trampoline);
313   CopyToArgumentRegs(arg0, arg1);
314   ClobberCallerSave();
315   CallHelper(r_tgt, trampoline, safepoint_pc);
316 }
317 
CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline,RegStorage arg0,RegStorage arg1,int arg2,bool safepoint_pc)318 void Mir2Lir::CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0,
319                                          RegStorage arg1, int arg2, bool safepoint_pc) {
320   RegStorage r_tgt = CallHelperSetup(trampoline);
321   CopyToArgumentRegs(arg0, arg1);
322   LoadConstant(TargetReg(kArg2, kNotWide), arg2);
323   ClobberCallerSave();
324   CallHelper(r_tgt, trampoline, safepoint_pc);
325 }
326 
CallRuntimeHelperImmRegLocationMethod(QuickEntrypointEnum trampoline,int arg0,RegLocation arg1,bool safepoint_pc)327 void Mir2Lir::CallRuntimeHelperImmRegLocationMethod(QuickEntrypointEnum trampoline, int arg0,
328                                                     RegLocation arg1, bool safepoint_pc) {
329   RegStorage r_tgt = CallHelperSetup(trampoline);
330   LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
331   LoadCurrMethodDirect(TargetReg(kArg2, kRef));
332   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
333   ClobberCallerSave();
334   CallHelper(r_tgt, trampoline, safepoint_pc);
335 }
336 
CallRuntimeHelperImmImmMethod(QuickEntrypointEnum trampoline,int arg0,int arg1,bool safepoint_pc)337 void Mir2Lir::CallRuntimeHelperImmImmMethod(QuickEntrypointEnum trampoline, int arg0, int arg1,
338                                             bool safepoint_pc) {
339   RegStorage r_tgt = CallHelperSetup(trampoline);
340   LoadCurrMethodDirect(TargetReg(kArg2, kRef));
341   LoadConstant(TargetReg(kArg1, kNotWide), arg1);
342   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
343   ClobberCallerSave();
344   CallHelper(r_tgt, trampoline, safepoint_pc);
345 }
346 
CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline,int arg0,RegLocation arg1,RegLocation arg2,bool safepoint_pc)347 void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0,
348                                                          RegLocation arg1,
349                                                          RegLocation arg2, bool safepoint_pc) {
350   RegStorage r_tgt = CallHelperSetup(trampoline);
351   DCHECK_EQ(static_cast<unsigned int>(arg1.wide), 0U);  // The static_cast works around an
352                                                         // instantiation bug in GCC.
353   LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
354   if (arg2.wide == 0) {
355     LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
356   } else {
357     LoadValueDirectWideFixed(arg2, TargetReg(kArg2, kWide));
358   }
359   LoadConstant(TargetReg(kArg0, kNotWide), arg0);
360   ClobberCallerSave();
361   CallHelper(r_tgt, trampoline, safepoint_pc);
362 }
363 
CallRuntimeHelperRegLocationRegLocationRegLocation(QuickEntrypointEnum trampoline,RegLocation arg0,RegLocation arg1,RegLocation arg2,bool safepoint_pc)364 void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(
365     QuickEntrypointEnum trampoline,
366     RegLocation arg0,
367     RegLocation arg1,
368     RegLocation arg2,
369     bool safepoint_pc) {
370   RegStorage r_tgt = CallHelperSetup(trampoline);
371   LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
372   LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
373   LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
374   ClobberCallerSave();
375   CallHelper(r_tgt, trampoline, safepoint_pc);
376 }
377 
CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation(QuickEntrypointEnum trampoline,RegLocation arg0,RegLocation arg1,RegLocation arg2,RegLocation arg3,bool safepoint_pc)378 void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation(
379     QuickEntrypointEnum trampoline, RegLocation arg0, RegLocation arg1, RegLocation arg2,
380     RegLocation arg3, bool safepoint_pc) {
381   RegStorage r_tgt = CallHelperSetup(trampoline);
382   LoadValueDirectFixed(arg0, TargetReg(kArg0, arg0));
383   LoadValueDirectFixed(arg1, TargetReg(kArg1, arg1));
384   LoadValueDirectFixed(arg2, TargetReg(kArg2, arg2));
385   LoadValueDirectFixed(arg3, TargetReg(kArg3, arg3));
386   ClobberCallerSave();
387   CallHelper(r_tgt, trampoline, safepoint_pc);
388 }
389 
390 /*
391  * If there are any ins passed in registers that have not been promoted
392  * to a callee-save register, flush them to the frame.  Perform initial
393  * assignment of promoted arguments.
394  *
395  * ArgLocs is an array of location records describing the incoming arguments
396  * with one location record per word of argument.
397  */
398 // TODO: Support 64-bit argument registers.
FlushIns(RegLocation * ArgLocs,RegLocation rl_method)399 void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
400   /*
401    * Dummy up a RegLocation for the incoming ArtMethod*
402    * It will attempt to keep kArg0 live (or copy it to home location
403    * if promoted).
404    */
405   RegLocation rl_src = rl_method;
406   rl_src.location = kLocPhysReg;
407   rl_src.reg = TargetReg(kArg0, kRef);
408   rl_src.home = false;
409   MarkLive(rl_src);
410   if (cu_->target64) {
411     DCHECK(rl_method.wide);
412     StoreValueWide(rl_method, rl_src);
413   } else {
414     StoreValue(rl_method, rl_src);
415   }
416   // If Method* has been promoted, explicitly flush
417   if (rl_method.location == kLocPhysReg) {
418     StoreBaseDisp(TargetPtrReg(kSp), 0, rl_src.reg, kWord, kNotVolatile);
419   }
420 
421   if (mir_graph_->GetNumOfInVRs() == 0) {
422     return;
423   }
424 
425   int start_vreg = mir_graph_->GetFirstInVR();
426   /*
427    * Copy incoming arguments to their proper home locations.
428    * NOTE: an older version of dx had an issue in which
429    * it would reuse static method argument registers.
430    * This could result in the same Dalvik virtual register
431    * being promoted to both core and fp regs. To account for this,
432    * we only copy to the corresponding promoted physical register
433    * if it matches the type of the SSA name for the incoming
434    * argument.  It is also possible that long and double arguments
435    * end up half-promoted.  In those cases, we must flush the promoted
436    * half to memory as well.
437    */
438   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
439   RegLocation* t_loc = nullptr;
440   EnsureInitializedArgMappingToPhysicalReg();
441   for (uint32_t i = 0; i < mir_graph_->GetNumOfInVRs(); i += t_loc->wide ? 2 : 1) {
442     // get reg corresponding to input
443     RegStorage reg = in_to_reg_storage_mapping_.GetReg(i);
444     t_loc = &ArgLocs[i];
445 
446     // If the wide input appeared as single, flush it and go
447     // as it comes from memory.
448     if (t_loc->wide && reg.Valid() && !reg.Is64Bit()) {
449       // The memory already holds the half. Don't do anything.
450       reg = RegStorage::InvalidReg();
451     }
452 
453     if (reg.Valid()) {
454       // If arriving in register.
455 
456       // We have already updated the arg location with promoted info
457       // so we can be based on it.
458       if (t_loc->location == kLocPhysReg) {
459         // Just copy it.
460         if (t_loc->wide) {
461           OpRegCopyWide(t_loc->reg, reg);
462         } else {
463           OpRegCopy(t_loc->reg, reg);
464         }
465       } else {
466         // Needs flush.
467         int offset = SRegOffset(start_vreg + i);
468         if (t_loc->ref) {
469           StoreRefDisp(TargetPtrReg(kSp), offset, reg, kNotVolatile);
470         } else {
471           StoreBaseDisp(TargetPtrReg(kSp), offset, reg, t_loc->wide ? k64 : k32, kNotVolatile);
472         }
473       }
474     } else {
475       // If arriving in frame & promoted.
476       if (t_loc->location == kLocPhysReg) {
477         int offset = SRegOffset(start_vreg + i);
478         if (t_loc->ref) {
479           LoadRefDisp(TargetPtrReg(kSp), offset, t_loc->reg, kNotVolatile);
480         } else {
481           LoadBaseDisp(TargetPtrReg(kSp), offset, t_loc->reg, t_loc->wide ? k64 : k32,
482                        kNotVolatile);
483         }
484       }
485     }
486   }
487 }
488 
CommonCallCodeLoadThisIntoArg1(const CallInfo * info,Mir2Lir * cg)489 static void CommonCallCodeLoadThisIntoArg1(const CallInfo* info, Mir2Lir* cg) {
490   RegLocation rl_arg = info->args[0];
491   cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1, kRef));
492 }
493 
CommonCallCodeLoadClassIntoArg0(const CallInfo * info,Mir2Lir * cg)494 static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) {
495   cg->GenNullCheck(cg->TargetReg(kArg1, kRef), info->opt_flags);
496   // get this->klass_ [use kArg1, set kArg0]
497   cg->LoadRefDisp(cg->TargetReg(kArg1, kRef), mirror::Object::ClassOffset().Int32Value(),
498                   cg->TargetReg(kArg0, kRef),
499                   kNotVolatile);
500   cg->MarkPossibleNullPointerException(info->opt_flags);
501 }
502 
CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage * alt_from,const CompilationUnit * cu,Mir2Lir * cg)503 static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
504                                                        const CompilationUnit* cu, Mir2Lir* cg) {
505   if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
506     int32_t offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(
507         InstructionSetPointerSize(cu->instruction_set)).Int32Value();
508     // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
509     cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, offset,
510                      cg->TargetPtrReg(kInvokeTgt));
511     return true;
512   }
513   return false;
514 }
515 
516 /*
517  * Bit of a hack here - in the absence of a real scheduling pass,
518  * emit the next instruction in a virtual invoke sequence.
519  * We can use kLr as a temp prior to target address loading
520  * Note also that we'll load the first argument ("this") into
521  * kArg1 here rather than the standard GenDalvikArgs.
522  */
NextVCallInsn(CompilationUnit * cu,CallInfo * info,int state,const MethodReference & target_method,uint32_t method_idx,uintptr_t,uintptr_t,InvokeType)523 static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
524                          int state, const MethodReference& target_method,
525                          uint32_t method_idx, uintptr_t, uintptr_t,
526                          InvokeType) {
527   UNUSED(target_method);
528   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
529   /*
530    * This is the fast path in which the target virtual method is
531    * fully resolved at compile time.
532    */
533   switch (state) {
534     case 0:
535       CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
536       break;
537     case 1:
538       CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
539                                                   // Includes a null-check.
540       break;
541     case 2: {
542       // Get this->klass_.embedded_vtable[method_idx] [usr kArg0, set kArg0]
543       const size_t pointer_size = InstructionSetPointerSize(
544           cu->compiler_driver->GetInstructionSet());
545       int32_t offset = mirror::Class::EmbeddedVTableEntryOffset(
546           method_idx, pointer_size).Uint32Value();
547       // Load target method from embedded vtable to kArg0 [use kArg0, set kArg0]
548       cg->LoadWordDisp(cg->TargetPtrReg(kArg0), offset, cg->TargetPtrReg(kArg0));
549       break;
550     }
551     case 3:
552       if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
553         break;                                    // kInvokeTgt := kArg0->entrypoint
554       }
555       DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
556       FALLTHROUGH_INTENDED;
557     default:
558       return -1;
559   }
560   return state + 1;
561 }
562 
563 /*
564  * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
565  * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
566  * more than one interface method map to the same index. Note also that we'll load the first
567  * argument ("this") into kArg1 here rather than the standard GenDalvikArgs.
568  */
NextInterfaceCallInsn(CompilationUnit * cu,CallInfo * info,int state,const MethodReference & target_method,uint32_t method_idx,uintptr_t,uintptr_t,InvokeType)569 static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
570                                  const MethodReference& target_method,
571                                  uint32_t method_idx, uintptr_t, uintptr_t, InvokeType) {
572   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
573 
574   switch (state) {
575     case 0:  // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
576       CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
577       cg->LoadConstant(cg->TargetReg(kHiddenArg, kNotWide), target_method.dex_method_index);
578       if (cu->instruction_set == kX86) {
579         cg->OpRegCopy(cg->TargetReg(kHiddenFpArg, kNotWide), cg->TargetReg(kHiddenArg, kNotWide));
580       }
581       break;
582     case 1:
583       CommonCallCodeLoadThisIntoArg1(info, cg);   // kArg1 := this
584       break;
585     case 2:
586       CommonCallCodeLoadClassIntoArg0(info, cg);  // kArg0 := kArg1->class
587                                                   // Includes a null-check.
588       break;
589     case 3: {  // Get target method [use kInvokeTgt, set kArg0]
590       const size_t pointer_size = InstructionSetPointerSize(
591           cu->compiler_driver->GetInstructionSet());
592       int32_t offset = mirror::Class::EmbeddedImTableEntryOffset(
593           method_idx % mirror::Class::kImtSize, pointer_size).Uint32Value();
594       // Load target method from embedded imtable to kArg0 [use kArg0, set kArg0]
595       cg->LoadWordDisp(cg->TargetPtrReg(kArg0), offset, cg->TargetPtrReg(kArg0));
596       break;
597     }
598     case 4:
599       if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
600         break;                                    // kInvokeTgt := kArg0->entrypoint
601       }
602       DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
603       FALLTHROUGH_INTENDED;
604     default:
605       return -1;
606   }
607   return state + 1;
608 }
609 
NextInvokeInsnSP(CompilationUnit * cu,CallInfo * info,QuickEntrypointEnum trampoline,int state,const MethodReference & target_method,uint32_t method_idx)610 static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
611                             QuickEntrypointEnum trampoline, int state,
612                             const MethodReference& target_method, uint32_t method_idx) {
613   UNUSED(info, method_idx);
614   Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
615 
616   /*
617    * This handles the case in which the base method is not fully
618    * resolved at compile time, we bail to a runtime helper.
619    */
620   if (state == 0) {
621     if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
622       // Load trampoline target
623       int32_t disp;
624       if (cu->target64) {
625         disp = GetThreadOffset<8>(trampoline).Int32Value();
626       } else {
627         disp = GetThreadOffset<4>(trampoline).Int32Value();
628       }
629       cg->LoadWordDisp(cg->TargetPtrReg(kSelf), disp, cg->TargetPtrReg(kInvokeTgt));
630     }
631     // Load kArg0 with method index
632     CHECK_EQ(cu->dex_file, target_method.dex_file);
633     cg->LoadConstant(cg->TargetReg(kArg0, kNotWide), target_method.dex_method_index);
634     return 1;
635   }
636   return -1;
637 }
638 
NextStaticCallInsnSP(CompilationUnit * cu,CallInfo * info,int state,const MethodReference & target_method,uint32_t,uintptr_t,uintptr_t,InvokeType)639 static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
640                                 int state,
641                                 const MethodReference& target_method,
642                                 uint32_t, uintptr_t, uintptr_t, InvokeType) {
643   return NextInvokeInsnSP(cu, info, kQuickInvokeStaticTrampolineWithAccessCheck, state,
644                           target_method, 0);
645 }
646 
NextDirectCallInsnSP(CompilationUnit * cu,CallInfo * info,int state,const MethodReference & target_method,uint32_t,uintptr_t,uintptr_t,InvokeType)647 static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
648                                 const MethodReference& target_method,
649                                 uint32_t, uintptr_t, uintptr_t, InvokeType) {
650   return NextInvokeInsnSP(cu, info, kQuickInvokeDirectTrampolineWithAccessCheck, state,
651                           target_method, 0);
652 }
653 
NextSuperCallInsnSP(CompilationUnit * cu,CallInfo * info,int state,const MethodReference & target_method,uint32_t,uintptr_t,uintptr_t,InvokeType)654 static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
655                                const MethodReference& target_method,
656                                uint32_t, uintptr_t, uintptr_t, InvokeType) {
657   return NextInvokeInsnSP(cu, info, kQuickInvokeSuperTrampolineWithAccessCheck, state,
658                           target_method, 0);
659 }
660 
NextVCallInsnSP(CompilationUnit * cu,CallInfo * info,int state,const MethodReference & target_method,uint32_t,uintptr_t,uintptr_t,InvokeType)661 static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
662                            const MethodReference& target_method,
663                            uint32_t, uintptr_t, uintptr_t, InvokeType) {
664   return NextInvokeInsnSP(cu, info, kQuickInvokeVirtualTrampolineWithAccessCheck, state,
665                           target_method, 0);
666 }
667 
NextInterfaceCallInsnWithAccessCheck(CompilationUnit * cu,CallInfo * info,int state,const MethodReference & target_method,uint32_t,uintptr_t,uintptr_t,InvokeType)668 static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
669                                                 CallInfo* info, int state,
670                                                 const MethodReference& target_method,
671                                                 uint32_t, uintptr_t, uintptr_t, InvokeType) {
672   return NextInvokeInsnSP(cu, info, kQuickInvokeInterfaceTrampolineWithAccessCheck, state,
673                           target_method, 0);
674 }
675 
676 // Default implementation of implicit null pointer check.
677 // Overridden by arch specific as necessary.
GenImplicitNullCheck(RegStorage reg,int opt_flags)678 void Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
679   if (!(cu_->disable_opt & (1 << kNullCheckElimination)) && (opt_flags & MIR_IGNORE_NULL_CHECK)) {
680     return;
681   }
682   RegStorage tmp = AllocTemp();
683   Load32Disp(reg, 0, tmp);
684   MarkPossibleNullPointerException(opt_flags);
685   FreeTemp(tmp);
686 }
687 
688 /**
689  * @brief Used to flush promoted registers if they are used as argument
690  * in an invocation.
691  * @param info the infromation about arguments for invocation.
692  * @param start the first argument we should start to look from.
693  */
GenDalvikArgsFlushPromoted(CallInfo * info,int start)694 void Mir2Lir::GenDalvikArgsFlushPromoted(CallInfo* info, int start) {
695   if (cu_->disable_opt & (1 << kPromoteRegs)) {
696     // This make sense only if promotion is enabled.
697     return;
698   }
699   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
700   // Scan the rest of the args - if in phys_reg flush to memory
701   for (size_t next_arg = start; next_arg < info->num_arg_words;) {
702     RegLocation loc = info->args[next_arg];
703     if (loc.wide) {
704       loc = UpdateLocWide(loc);
705       if (loc.location == kLocPhysReg) {
706         StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k64, kNotVolatile);
707       }
708       next_arg += 2;
709     } else {
710       loc = UpdateLoc(loc);
711       if (loc.location == kLocPhysReg) {
712         if (loc.ref) {
713           StoreRefDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, kNotVolatile);
714         } else {
715           StoreBaseDisp(TargetPtrReg(kSp), SRegOffset(loc.s_reg_low), loc.reg, k32,
716                         kNotVolatile);
717         }
718       }
719       next_arg++;
720     }
721   }
722 }
723 
724 /**
725  * @brief Used to optimize the copying of VRs which are arguments of invocation.
726  * Please note that you should flush promoted registers first if you copy.
727  * If implementation does copying it may skip several of the first VRs but must copy
728  * till the end. Implementation must return the number of skipped VRs
729  * (it might be all VRs).
730  * @see GenDalvikArgsFlushPromoted
731  * @param info the information about arguments for invocation.
732  * @param first the first argument we should start to look from.
733  * @param count the number of remaining arguments we can handle.
734  * @return the number of arguments which we did not handle. Unhandled arguments
735  * must be attached to the first one.
736  */
GenDalvikArgsBulkCopy(CallInfo * info,int first,int count)737 int Mir2Lir::GenDalvikArgsBulkCopy(CallInfo* info, int first, int count) {
738   // call is pretty expensive, let's use it if count is big.
739   if (count > 16) {
740     GenDalvikArgsFlushPromoted(info, first);
741     int start_offset = SRegOffset(info->args[first].s_reg_low);
742     int outs_offset = StackVisitor::GetOutVROffset(first, cu_->instruction_set);
743 
744     OpRegRegImm(kOpAdd, TargetReg(kArg0, kRef), TargetPtrReg(kSp), outs_offset);
745     OpRegRegImm(kOpAdd, TargetReg(kArg1, kRef), TargetPtrReg(kSp), start_offset);
746     CallRuntimeHelperRegRegImm(kQuickMemcpy, TargetReg(kArg0, kRef), TargetReg(kArg1, kRef),
747                                count * 4, false);
748     count = 0;
749   }
750   return count;
751 }
752 
GenDalvikArgs(CallInfo * info,int call_state,LIR ** pcrLabel,NextCallInsn next_call_insn,const MethodReference & target_method,uint32_t vtable_idx,uintptr_t direct_code,uintptr_t direct_method,InvokeType type,bool skip_this)753 int Mir2Lir::GenDalvikArgs(CallInfo* info, int call_state,
754                            LIR** pcrLabel, NextCallInsn next_call_insn,
755                            const MethodReference& target_method,
756                            uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
757                            InvokeType type, bool skip_this) {
758   // If no arguments, just return.
759   if (info->num_arg_words == 0u)
760     return call_state;
761 
762   const size_t start_index = skip_this ? 1 : 0;
763 
764   // Get architecture dependent mapping between output VRs and physical registers
765   // basing on shorty of method to call.
766   InToRegStorageMapping in_to_reg_storage_mapping(arena_);
767   {
768     const char* target_shorty = mir_graph_->GetShortyFromMethodReference(target_method);
769     ShortyIterator shorty_iterator(target_shorty, type == kStatic);
770     in_to_reg_storage_mapping.Initialize(&shorty_iterator, GetResetedInToRegStorageMapper());
771   }
772 
773   size_t stack_map_start = std::max(in_to_reg_storage_mapping.GetEndMappedIn(), start_index);
774   if ((stack_map_start < info->num_arg_words) && info->args[stack_map_start].high_word) {
775     // It is possible that the last mapped reg is 32 bit while arg is 64-bit.
776     // It will be handled together with low part mapped to register.
777     stack_map_start++;
778   }
779   size_t regs_left_to_pass_via_stack = info->num_arg_words - stack_map_start;
780 
781   // If it is a range case we can try to copy remaining VRs (not mapped to physical registers)
782   // using more optimal algorithm.
783   if (info->is_range && regs_left_to_pass_via_stack > 1) {
784     regs_left_to_pass_via_stack = GenDalvikArgsBulkCopy(info, stack_map_start,
785                                                         regs_left_to_pass_via_stack);
786   }
787 
788   // Now handle any remaining VRs mapped to stack.
789   if (in_to_reg_storage_mapping.HasArgumentsOnStack()) {
790     // Two temps but do not use kArg1, it might be this which we can skip.
791     // Separate single and wide - it can give some advantage.
792     RegStorage regRef = TargetReg(kArg3, kRef);
793     RegStorage regSingle = TargetReg(kArg3, kNotWide);
794     RegStorage regWide = TargetReg(kArg2, kWide);
795     for (size_t i = start_index; i < stack_map_start + regs_left_to_pass_via_stack; i++) {
796       RegLocation rl_arg = info->args[i];
797       rl_arg = UpdateRawLoc(rl_arg);
798       RegStorage reg = in_to_reg_storage_mapping.GetReg(i);
799       if (!reg.Valid()) {
800         int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
801         {
802           ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
803           if (rl_arg.wide) {
804             if (rl_arg.location == kLocPhysReg) {
805               StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
806             } else {
807               LoadValueDirectWideFixed(rl_arg, regWide);
808               StoreBaseDisp(TargetPtrReg(kSp), out_offset, regWide, k64, kNotVolatile);
809             }
810           } else {
811             if (rl_arg.location == kLocPhysReg) {
812               if (rl_arg.ref) {
813                 StoreRefDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, kNotVolatile);
814               } else {
815                 StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k32, kNotVolatile);
816               }
817             } else {
818               if (rl_arg.ref) {
819                 LoadValueDirectFixed(rl_arg, regRef);
820                 StoreRefDisp(TargetPtrReg(kSp), out_offset, regRef, kNotVolatile);
821               } else {
822                 LoadValueDirectFixed(rl_arg, regSingle);
823                 StoreBaseDisp(TargetPtrReg(kSp), out_offset, regSingle, k32, kNotVolatile);
824               }
825             }
826           }
827         }
828         call_state = next_call_insn(cu_, info, call_state, target_method,
829                                     vtable_idx, direct_code, direct_method, type);
830       }
831       if (rl_arg.wide) {
832         i++;
833       }
834     }
835   }
836 
837   // Finish with VRs mapped to physical registers.
838   for (size_t i = start_index; i < stack_map_start; i++) {
839     RegLocation rl_arg = info->args[i];
840     rl_arg = UpdateRawLoc(rl_arg);
841     RegStorage reg = in_to_reg_storage_mapping.GetReg(i);
842     if (reg.Valid()) {
843       if (rl_arg.wide) {
844         // if reg is not 64-bit (it is half of 64-bit) then handle it separately.
845         if (!reg.Is64Bit()) {
846           ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
847           if (rl_arg.location == kLocPhysReg) {
848             int out_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
849             // Dump it to memory.
850             StoreBaseDisp(TargetPtrReg(kSp), out_offset, rl_arg.reg, k64, kNotVolatile);
851             LoadBaseDisp(TargetPtrReg(kSp), out_offset, reg, k32, kNotVolatile);
852           } else {
853             int high_offset = StackVisitor::GetOutVROffset(i + 1, cu_->instruction_set);
854             // First, use target reg for high part.
855             LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low + 1), reg, k32,
856                          kNotVolatile);
857             StoreBaseDisp(TargetPtrReg(kSp), high_offset, reg, k32, kNotVolatile);
858             // Now, use target reg for low part.
859             LoadBaseDisp(TargetPtrReg(kSp), SRegOffset(rl_arg.s_reg_low), reg, k32, kNotVolatile);
860             int low_offset = StackVisitor::GetOutVROffset(i, cu_->instruction_set);
861             // And store it to the expected memory location.
862             StoreBaseDisp(TargetPtrReg(kSp), low_offset, reg, k32, kNotVolatile);
863           }
864         } else {
865           LoadValueDirectWideFixed(rl_arg, reg);
866         }
867       } else {
868         LoadValueDirectFixed(rl_arg, reg);
869       }
870       call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
871                                direct_code, direct_method, type);
872     }
873     if (rl_arg.wide) {
874       i++;
875     }
876   }
877 
878   call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
879                            direct_code, direct_method, type);
880   if (pcrLabel) {
881     if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
882       *pcrLabel = GenExplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
883     } else {
884       *pcrLabel = nullptr;
885       GenImplicitNullCheck(TargetReg(kArg1, kRef), info->opt_flags);
886     }
887   }
888   return call_state;
889 }
890 
EnsureInitializedArgMappingToPhysicalReg()891 void Mir2Lir::EnsureInitializedArgMappingToPhysicalReg() {
892   if (!in_to_reg_storage_mapping_.IsInitialized()) {
893     ShortyIterator shorty_iterator(cu_->shorty, cu_->invoke_type == kStatic);
894     in_to_reg_storage_mapping_.Initialize(&shorty_iterator, GetResetedInToRegStorageMapper());
895   }
896 }
897 
InlineTarget(CallInfo * info)898 RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
899   RegLocation res;
900   if (info->result.location == kLocInvalid) {
901     // If result is unused, return a sink target based on type of invoke target.
902     res = GetReturn(
903         ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
904   } else {
905     res = info->result;
906   }
907   return res;
908 }
909 
InlineTargetWide(CallInfo * info)910 RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
911   RegLocation res;
912   if (info->result.location == kLocInvalid) {
913     // If result is unused, return a sink target based on type of invoke target.
914     res = GetReturnWide(ShortyToRegClass(
915         mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]));
916   } else {
917     res = info->result;
918   }
919   return res;
920 }
921 
GenInlinedReferenceGetReferent(CallInfo * info)922 bool Mir2Lir::GenInlinedReferenceGetReferent(CallInfo* info) {
923   if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
924     // TODO: add Mips and Mips64 implementations.
925     return false;
926   }
927 
928   bool use_direct_type_ptr;
929   uintptr_t direct_type_ptr;
930   ClassReference ref;
931   if (!cu_->compiler_driver->CanEmbedReferenceTypeInCode(&ref,
932         &use_direct_type_ptr, &direct_type_ptr)) {
933     return false;
934   }
935 
936   RegStorage reg_class = TargetReg(kArg1, kRef);
937   Clobber(reg_class);
938   LockTemp(reg_class);
939   if (use_direct_type_ptr) {
940     LoadConstant(reg_class, direct_type_ptr);
941   } else {
942     uint16_t type_idx = ref.first->GetClassDef(ref.second).class_idx_;
943     LoadClassType(*ref.first, type_idx, kArg1);
944   }
945 
946   uint32_t slow_path_flag_offset = cu_->compiler_driver->GetReferenceSlowFlagOffset();
947   uint32_t disable_flag_offset = cu_->compiler_driver->GetReferenceDisableFlagOffset();
948   CHECK(slow_path_flag_offset && disable_flag_offset &&
949         (slow_path_flag_offset != disable_flag_offset));
950 
951   // intrinsic logic start.
952   RegLocation rl_obj = info->args[0];
953   rl_obj = LoadValue(rl_obj, kRefReg);
954 
955   RegStorage reg_slow_path = AllocTemp();
956   RegStorage reg_disabled = AllocTemp();
957   LoadBaseDisp(reg_class, slow_path_flag_offset, reg_slow_path, kSignedByte, kNotVolatile);
958   LoadBaseDisp(reg_class, disable_flag_offset, reg_disabled, kSignedByte, kNotVolatile);
959   FreeTemp(reg_class);
960   LIR* or_inst = OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
961   FreeTemp(reg_disabled);
962 
963   // if slow path, jump to JNI path target
964   LIR* slow_path_branch;
965   if (or_inst->u.m.def_mask->HasBit(ResourceMask::kCCode)) {
966     // Generate conditional branch only, as the OR set a condition state (we are interested in a 'Z' flag).
967     slow_path_branch = OpCondBranch(kCondNe, nullptr);
968   } else {
969     // Generate compare and branch.
970     slow_path_branch = OpCmpImmBranch(kCondNe, reg_slow_path, 0, nullptr);
971   }
972   FreeTemp(reg_slow_path);
973 
974   // slow path not enabled, simply load the referent of the reference object
975   RegLocation rl_dest = InlineTarget(info);
976   RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
977   GenNullCheck(rl_obj.reg, info->opt_flags);
978   LoadRefDisp(rl_obj.reg, mirror::Reference::ReferentOffset().Int32Value(), rl_result.reg,
979               kNotVolatile);
980   MarkPossibleNullPointerException(info->opt_flags);
981   StoreValue(rl_dest, rl_result);
982 
983   LIR* intrinsic_finish = NewLIR0(kPseudoTargetLabel);
984   AddIntrinsicSlowPath(info, slow_path_branch, intrinsic_finish);
985   ClobberCallerSave();  // We must clobber everything because slow path will return here
986   return true;
987 }
988 
GenInlinedCharAt(CallInfo * info)989 bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
990   // Location of char array data
991   int value_offset = mirror::String::ValueOffset().Int32Value();
992   // Location of count
993   int count_offset = mirror::String::CountOffset().Int32Value();
994 
995   RegLocation rl_obj = info->args[0];
996   RegLocation rl_idx = info->args[1];
997   rl_obj = LoadValue(rl_obj, kRefReg);
998   rl_idx = LoadValue(rl_idx, kCoreReg);
999   RegStorage reg_max;
1000   GenNullCheck(rl_obj.reg, info->opt_flags);
1001   bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
1002   LIR* range_check_branch = nullptr;
1003   if (range_check) {
1004     reg_max = AllocTemp();
1005     Load32Disp(rl_obj.reg, count_offset, reg_max);
1006     MarkPossibleNullPointerException(info->opt_flags);
1007     // Set up a slow path to allow retry in case of bounds violation
1008     OpRegReg(kOpCmp, rl_idx.reg, reg_max);
1009     FreeTemp(reg_max);
1010     range_check_branch = OpCondBranch(kCondUge, nullptr);
1011   }
1012   RegStorage reg_ptr = AllocTempRef();
1013   OpRegRegImm(kOpAdd, reg_ptr, rl_obj.reg, value_offset);
1014   FreeTemp(rl_obj.reg);
1015   RegLocation rl_dest = InlineTarget(info);
1016   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1017   LoadBaseIndexed(reg_ptr, rl_idx.reg, rl_result.reg, 1, kUnsignedHalf);
1018   FreeTemp(reg_ptr);
1019   StoreValue(rl_dest, rl_result);
1020   if (range_check) {
1021     DCHECK(range_check_branch != nullptr);
1022     info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've already null checked.
1023     AddIntrinsicSlowPath(info, range_check_branch);
1024   }
1025   return true;
1026 }
1027 
GenInlinedStringGetCharsNoCheck(CallInfo * info)1028 bool Mir2Lir::GenInlinedStringGetCharsNoCheck(CallInfo* info) {
1029   if (cu_->instruction_set == kMips) {
1030     // TODO - add Mips implementation
1031     return false;
1032   }
1033   size_t char_component_size = Primitive::ComponentSize(Primitive::kPrimChar);
1034   // Location of data in char array buffer
1035   int data_offset = mirror::Array::DataOffset(char_component_size).Int32Value();
1036   // Location of char array data in string
1037   int value_offset = mirror::String::ValueOffset().Int32Value();
1038 
1039   RegLocation rl_obj = info->args[0];
1040   RegLocation rl_start = info->args[1];
1041   RegLocation rl_end = info->args[2];
1042   RegLocation rl_buffer = info->args[3];
1043   RegLocation rl_index = info->args[4];
1044 
1045   ClobberCallerSave();
1046   LockCallTemps();  // Using fixed registers
1047   RegStorage reg_dst_ptr = TargetReg(kArg0, kRef);
1048   RegStorage reg_src_ptr = TargetReg(kArg1, kRef);
1049   RegStorage reg_length = TargetReg(kArg2, kNotWide);
1050   RegStorage reg_tmp = TargetReg(kArg3, kNotWide);
1051   RegStorage reg_tmp_ptr = RegStorage(RegStorage::k64BitSolo, reg_tmp.GetRawBits() & RegStorage::kRegTypeMask);
1052 
1053   LoadValueDirectFixed(rl_buffer, reg_dst_ptr);
1054   OpRegImm(kOpAdd, reg_dst_ptr, data_offset);
1055   LoadValueDirectFixed(rl_index, reg_tmp);
1056   OpRegRegImm(kOpLsl, reg_tmp, reg_tmp, 1);
1057   OpRegReg(kOpAdd, reg_dst_ptr, cu_->instruction_set == kArm64 ? reg_tmp_ptr : reg_tmp);
1058 
1059   LoadValueDirectFixed(rl_start, reg_tmp);
1060   LoadValueDirectFixed(rl_end, reg_length);
1061   OpRegReg(kOpSub, reg_length, reg_tmp);
1062   OpRegRegImm(kOpLsl, reg_length, reg_length, 1);
1063   LoadValueDirectFixed(rl_obj, reg_src_ptr);
1064 
1065   OpRegImm(kOpAdd, reg_src_ptr, value_offset);
1066   OpRegRegImm(kOpLsl, reg_tmp, reg_tmp, 1);
1067   OpRegReg(kOpAdd, reg_src_ptr, cu_->instruction_set == kArm64 ? reg_tmp_ptr : reg_tmp);
1068 
1069   RegStorage r_tgt;
1070   if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1071     r_tgt = LoadHelper(kQuickMemcpy);
1072   } else {
1073     r_tgt = RegStorage::InvalidReg();
1074   }
1075   // NOTE: not a safepoint
1076   CallHelper(r_tgt, kQuickMemcpy, false, true);
1077 
1078   return true;
1079 }
1080 
1081 // Generates an inlined String.is_empty or String.length.
GenInlinedStringIsEmptyOrLength(CallInfo * info,bool is_empty)1082 bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
1083   if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1084     // TODO: add Mips and Mips64 implementations.
1085     return false;
1086   }
1087   // dst = src.length();
1088   RegLocation rl_obj = info->args[0];
1089   rl_obj = LoadValue(rl_obj, kRefReg);
1090   RegLocation rl_dest = InlineTarget(info);
1091   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1092   GenNullCheck(rl_obj.reg, info->opt_flags);
1093   Load32Disp(rl_obj.reg, mirror::String::CountOffset().Int32Value(), rl_result.reg);
1094   MarkPossibleNullPointerException(info->opt_flags);
1095   if (is_empty) {
1096     // dst = (dst == 0);
1097     if (cu_->instruction_set == kThumb2) {
1098       RegStorage t_reg = AllocTemp();
1099       OpRegReg(kOpNeg, t_reg, rl_result.reg);
1100       OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg);
1101     } else if (cu_->instruction_set == kArm64) {
1102       OpRegImm(kOpSub, rl_result.reg, 1);
1103       OpRegRegImm(kOpLsr, rl_result.reg, rl_result.reg, 31);
1104     } else {
1105       DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
1106       OpRegImm(kOpSub, rl_result.reg, 1);
1107       OpRegImm(kOpLsr, rl_result.reg, 31);
1108     }
1109   }
1110   StoreValue(rl_dest, rl_result);
1111   return true;
1112 }
1113 
GenInlinedStringFactoryNewStringFromBytes(CallInfo * info)1114 bool Mir2Lir::GenInlinedStringFactoryNewStringFromBytes(CallInfo* info) {
1115   if (cu_->instruction_set == kMips) {
1116     // TODO - add Mips implementation
1117     return false;
1118   }
1119   RegLocation rl_data = info->args[0];
1120   RegLocation rl_high = info->args[1];
1121   RegLocation rl_offset = info->args[2];
1122   RegLocation rl_count = info->args[3];
1123   rl_data = LoadValue(rl_data, kRefReg);
1124   LIR* data_null_check_branch = OpCmpImmBranch(kCondEq, rl_data.reg, 0, nullptr);
1125   AddIntrinsicSlowPath(info, data_null_check_branch);
1126   CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation(
1127       kQuickAllocStringFromBytes, rl_data, rl_high, rl_offset, rl_count, true);
1128   RegLocation rl_return = GetReturn(kRefReg);
1129   RegLocation rl_dest = InlineTarget(info);
1130   StoreValue(rl_dest, rl_return);
1131   return true;
1132 }
1133 
GenInlinedStringFactoryNewStringFromChars(CallInfo * info)1134 bool Mir2Lir::GenInlinedStringFactoryNewStringFromChars(CallInfo* info) {
1135   if (cu_->instruction_set == kMips) {
1136     // TODO - add Mips implementation
1137     return false;
1138   }
1139   RegLocation rl_offset = info->args[0];
1140   RegLocation rl_count = info->args[1];
1141   RegLocation rl_data = info->args[2];
1142   CallRuntimeHelperRegLocationRegLocationRegLocation(
1143       kQuickAllocStringFromChars, rl_offset, rl_count, rl_data, true);
1144   RegLocation rl_return = GetReturn(kRefReg);
1145   RegLocation rl_dest = InlineTarget(info);
1146   StoreValue(rl_dest, rl_return);
1147   return true;
1148 }
1149 
GenInlinedStringFactoryNewStringFromString(CallInfo * info)1150 bool Mir2Lir::GenInlinedStringFactoryNewStringFromString(CallInfo* info) {
1151   if (cu_->instruction_set == kMips) {
1152     // TODO - add Mips implementation
1153     return false;
1154   }
1155   RegLocation rl_string = info->args[0];
1156   rl_string = LoadValue(rl_string, kRefReg);
1157   LIR* string_null_check_branch = OpCmpImmBranch(kCondEq, rl_string.reg, 0, nullptr);
1158   AddIntrinsicSlowPath(info, string_null_check_branch);
1159   CallRuntimeHelperRegLocation(kQuickAllocStringFromString, rl_string, true);
1160   RegLocation rl_return = GetReturn(kRefReg);
1161   RegLocation rl_dest = InlineTarget(info);
1162   StoreValue(rl_dest, rl_return);
1163   return true;
1164 }
1165 
GenInlinedReverseBytes(CallInfo * info,OpSize size)1166 bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
1167   if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1168     // TODO: add Mips and Mips64 implementations.
1169     return false;
1170   }
1171   RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1172   if (rl_dest.s_reg_low == INVALID_SREG) {
1173     // Result is unused, the code is dead. Inlining successful, no code generated.
1174     return true;
1175   }
1176   RegLocation rl_src_i = info->args[0];
1177   RegLocation rl_i = IsWide(size) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
1178   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1179   if (IsWide(size)) {
1180     if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
1181       OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
1182       StoreValueWide(rl_dest, rl_result);
1183       return true;
1184     }
1185     RegStorage r_i_low = rl_i.reg.GetLow();
1186     if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1187       // First REV shall clobber rl_result.reg.GetReg(), save the value in a temp for the second REV.
1188       r_i_low = AllocTemp();
1189       OpRegCopy(r_i_low, rl_i.reg);
1190     }
1191     OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
1192     OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
1193     if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
1194       FreeTemp(r_i_low);
1195     }
1196     StoreValueWide(rl_dest, rl_result);
1197   } else {
1198     DCHECK(size == k32 || size == kSignedHalf);
1199     OpKind op = (size == k32) ? kOpRev : kOpRevsh;
1200     OpRegReg(op, rl_result.reg, rl_i.reg);
1201     StoreValue(rl_dest, rl_result);
1202   }
1203   return true;
1204 }
1205 
GenInlinedAbsInt(CallInfo * info)1206 bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
1207   RegLocation rl_dest = InlineTarget(info);
1208   if (rl_dest.s_reg_low == INVALID_SREG) {
1209     // Result is unused, the code is dead. Inlining successful, no code generated.
1210     return true;
1211   }
1212   RegLocation rl_src = info->args[0];
1213   rl_src = LoadValue(rl_src, kCoreReg);
1214   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1215   RegStorage sign_reg = AllocTemp();
1216   // abs(x) = y<=x>>31, (x+y)^y.
1217   OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 31);
1218   OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1219   OpRegReg(kOpXor, rl_result.reg, sign_reg);
1220   StoreValue(rl_dest, rl_result);
1221   return true;
1222 }
1223 
GenInlinedAbsLong(CallInfo * info)1224 bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
1225   RegLocation rl_dest = InlineTargetWide(info);
1226   if (rl_dest.s_reg_low == INVALID_SREG) {
1227     // Result is unused, the code is dead. Inlining successful, no code generated.
1228     return true;
1229   }
1230   RegLocation rl_src = info->args[0];
1231   rl_src = LoadValueWide(rl_src, kCoreReg);
1232   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1233 
1234   // If on x86 or if we would clobber a register needed later, just copy the source first.
1235   if (cu_->instruction_set != kX86_64 &&
1236       (cu_->instruction_set == kX86 ||
1237        rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg())) {
1238     OpRegCopyWide(rl_result.reg, rl_src.reg);
1239     if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() &&
1240         rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() &&
1241         rl_result.reg.GetHighReg() != rl_src.reg.GetLowReg() &&
1242         rl_result.reg.GetHighReg() != rl_src.reg.GetHighReg()) {
1243       // Reuse source registers to avoid running out of temps.
1244       FreeTemp(rl_src.reg);
1245     }
1246     rl_src = rl_result;
1247   }
1248 
1249   // abs(x) = y<=x>>31, (x+y)^y.
1250   RegStorage sign_reg;
1251   if (cu_->instruction_set == kX86_64) {
1252     sign_reg = AllocTempWide();
1253     OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
1254     OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
1255     OpRegReg(kOpXor, rl_result.reg, sign_reg);
1256   } else {
1257     sign_reg = AllocTemp();
1258     OpRegRegImm(kOpAsr, sign_reg, rl_src.reg.GetHigh(), 31);
1259     OpRegRegReg(kOpAdd, rl_result.reg.GetLow(), rl_src.reg.GetLow(), sign_reg);
1260     OpRegRegReg(kOpAdc, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), sign_reg);
1261     OpRegReg(kOpXor, rl_result.reg.GetLow(), sign_reg);
1262     OpRegReg(kOpXor, rl_result.reg.GetHigh(), sign_reg);
1263   }
1264   FreeTemp(sign_reg);
1265   StoreValueWide(rl_dest, rl_result);
1266   return true;
1267 }
1268 
GenInlinedReverseBits(CallInfo * info,OpSize size)1269 bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
1270   // Currently implemented only for ARM64.
1271   UNUSED(info, size);
1272   return false;
1273 }
1274 
GenInlinedMinMaxFP(CallInfo * info,bool is_min,bool is_double)1275 bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
1276   // Currently implemented only for ARM64.
1277   UNUSED(info, is_min, is_double);
1278   return false;
1279 }
1280 
GenInlinedCeil(CallInfo * info)1281 bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
1282   UNUSED(info);
1283   return false;
1284 }
1285 
GenInlinedFloor(CallInfo * info)1286 bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
1287   UNUSED(info);
1288   return false;
1289 }
1290 
GenInlinedRint(CallInfo * info)1291 bool Mir2Lir::GenInlinedRint(CallInfo* info) {
1292   UNUSED(info);
1293   return false;
1294 }
1295 
GenInlinedRound(CallInfo * info,bool is_double)1296 bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
1297   UNUSED(info, is_double);
1298   return false;
1299 }
1300 
GenInlinedFloatCvt(CallInfo * info)1301 bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
1302   if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1303     // TODO: add Mips and Mips64 implementations.
1304     return false;
1305   }
1306   RegLocation rl_dest = InlineTarget(info);
1307   if (rl_dest.s_reg_low == INVALID_SREG) {
1308     // Result is unused, the code is dead. Inlining successful, no code generated.
1309     return true;
1310   }
1311   RegLocation rl_src = info->args[0];
1312   StoreValue(rl_dest, rl_src);
1313   return true;
1314 }
1315 
GenInlinedDoubleCvt(CallInfo * info)1316 bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
1317   if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1318     // TODO: add Mips and Mips64 implementations.
1319     return false;
1320   }
1321   RegLocation rl_dest = InlineTargetWide(info);
1322   if (rl_dest.s_reg_low == INVALID_SREG) {
1323     // Result is unused, the code is dead. Inlining successful, no code generated.
1324     return true;
1325   }
1326   RegLocation rl_src = info->args[0];
1327   StoreValueWide(rl_dest, rl_src);
1328   return true;
1329 }
1330 
GenInlinedArrayCopyCharArray(CallInfo * info)1331 bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
1332   UNUSED(info);
1333   return false;
1334 }
1335 
1336 
1337 /*
1338  * Fast String.indexOf(I) & (II).  Tests for simple case of char <= 0xFFFF,
1339  * otherwise bails to standard library code.
1340  */
GenInlinedIndexOf(CallInfo * info,bool zero_based)1341 bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
1342   RegLocation rl_obj = info->args[0];
1343   RegLocation rl_char = info->args[1];
1344   if (rl_char.is_const && (mir_graph_->ConstantValue(rl_char) & ~0xFFFF) != 0) {
1345     // Code point beyond 0xFFFF. Punt to the real String.indexOf().
1346     return false;
1347   }
1348 
1349   ClobberCallerSave();
1350   LockCallTemps();  // Using fixed registers
1351   RegStorage reg_ptr = TargetReg(kArg0, kRef);
1352   RegStorage reg_char = TargetReg(kArg1, kNotWide);
1353   RegStorage reg_start = TargetReg(kArg2, kNotWide);
1354 
1355   LoadValueDirectFixed(rl_obj, reg_ptr);
1356   LoadValueDirectFixed(rl_char, reg_char);
1357   if (zero_based) {
1358     LoadConstant(reg_start, 0);
1359   } else {
1360     RegLocation rl_start = info->args[2];     // 3rd arg only present in III flavor of IndexOf.
1361     LoadValueDirectFixed(rl_start, reg_start);
1362   }
1363   RegStorage r_tgt = LoadHelper(kQuickIndexOf);
1364   GenExplicitNullCheck(reg_ptr, info->opt_flags);
1365   LIR* high_code_point_branch =
1366       rl_char.is_const ? nullptr : OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, nullptr);
1367   // NOTE: not a safepoint
1368   OpReg(kOpBlx, r_tgt);
1369   if (!rl_char.is_const) {
1370     // Add the slow path for code points beyond 0xFFFF.
1371     DCHECK(high_code_point_branch != nullptr);
1372     LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1373     info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1374     AddIntrinsicSlowPath(info, high_code_point_branch, resume_tgt);
1375     ClobberCallerSave();  // We must clobber everything because slow path will return here
1376   } else {
1377     DCHECK_EQ(mir_graph_->ConstantValue(rl_char) & ~0xFFFF, 0);
1378     DCHECK(high_code_point_branch == nullptr);
1379   }
1380   RegLocation rl_return = GetReturn(kCoreReg);
1381   RegLocation rl_dest = InlineTarget(info);
1382   StoreValue(rl_dest, rl_return);
1383   return true;
1384 }
1385 
1386 /* Fast string.compareTo(Ljava/lang/string;)I. */
GenInlinedStringCompareTo(CallInfo * info)1387 bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
1388   if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1389     // TODO: add Mips and Mips64 implementations.
1390     return false;
1391   }
1392   ClobberCallerSave();
1393   LockCallTemps();  // Using fixed registers
1394   RegStorage reg_this = TargetReg(kArg0, kRef);
1395   RegStorage reg_cmp = TargetReg(kArg1, kRef);
1396 
1397   RegLocation rl_this = info->args[0];
1398   RegLocation rl_cmp = info->args[1];
1399   LoadValueDirectFixed(rl_this, reg_this);
1400   LoadValueDirectFixed(rl_cmp, reg_cmp);
1401   RegStorage r_tgt;
1402   if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
1403     r_tgt = LoadHelper(kQuickStringCompareTo);
1404   } else {
1405     r_tgt = RegStorage::InvalidReg();
1406   }
1407   GenExplicitNullCheck(reg_this, info->opt_flags);
1408   info->opt_flags |= MIR_IGNORE_NULL_CHECK;  // Record that we've null checked.
1409   // TUNING: check if rl_cmp.s_reg_low is already null checked
1410   LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr);
1411   AddIntrinsicSlowPath(info, cmp_null_check_branch);
1412   // NOTE: not a safepoint
1413   CallHelper(r_tgt, kQuickStringCompareTo, false, true);
1414   RegLocation rl_return = GetReturn(kCoreReg);
1415   RegLocation rl_dest = InlineTarget(info);
1416   StoreValue(rl_dest, rl_return);
1417   return true;
1418 }
1419 
GenInlinedCurrentThread(CallInfo * info)1420 bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1421   RegLocation rl_dest = InlineTarget(info);
1422 
1423   // Early exit if the result is unused.
1424   if (rl_dest.orig_sreg < 0) {
1425     return true;
1426   }
1427 
1428   RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
1429 
1430   if (cu_->target64) {
1431     LoadRefDisp(TargetPtrReg(kSelf), Thread::PeerOffset<8>().Int32Value(), rl_result.reg,
1432                 kNotVolatile);
1433   } else {
1434     Load32Disp(TargetPtrReg(kSelf), Thread::PeerOffset<4>().Int32Value(), rl_result.reg);
1435   }
1436 
1437   StoreValue(rl_dest, rl_result);
1438   return true;
1439 }
1440 
GenInlinedUnsafeGet(CallInfo * info,bool is_long,bool is_object,bool is_volatile)1441 bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1442                                   bool is_long, bool is_object, bool is_volatile) {
1443   if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1444     // TODO: add Mips and Mips64 implementations.
1445     return false;
1446   }
1447   // Unused - RegLocation rl_src_unsafe = info->args[0];
1448   RegLocation rl_src_obj = info->args[1];  // Object
1449   RegLocation rl_src_offset = info->args[2];  // long low
1450   rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1451   RegLocation rl_dest = is_long ? InlineTargetWide(info) : InlineTarget(info);  // result reg
1452 
1453   RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1454   RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1455   RegLocation rl_result = EvalLoc(rl_dest, is_object ? kRefReg : kCoreReg, true);
1456   if (is_long) {
1457     if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1458         || cu_->instruction_set == kArm64) {
1459       LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k64);
1460     } else {
1461       RegStorage rl_temp_offset = AllocTemp();
1462       OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1463       LoadBaseDisp(rl_temp_offset, 0, rl_result.reg, k64, kNotVolatile);
1464       FreeTemp(rl_temp_offset);
1465     }
1466   } else {
1467     if (rl_result.ref) {
1468       LoadRefIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0);
1469     } else {
1470       LoadBaseIndexed(rl_object.reg, rl_offset.reg, rl_result.reg, 0, k32);
1471     }
1472   }
1473 
1474   if (is_volatile) {
1475     GenMemBarrier(kLoadAny);
1476   }
1477 
1478   if (is_long) {
1479     StoreValueWide(rl_dest, rl_result);
1480   } else {
1481     StoreValue(rl_dest, rl_result);
1482   }
1483   return true;
1484 }
1485 
GenInlinedUnsafePut(CallInfo * info,bool is_long,bool is_object,bool is_volatile,bool is_ordered)1486 bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1487                                   bool is_object, bool is_volatile, bool is_ordered) {
1488   if (cu_->instruction_set == kMips || cu_->instruction_set == kMips64) {
1489     // TODO: add Mips and Mips64 implementations.
1490     return false;
1491   }
1492   // Unused - RegLocation rl_src_unsafe = info->args[0];
1493   RegLocation rl_src_obj = info->args[1];  // Object
1494   RegLocation rl_src_offset = info->args[2];  // long low
1495   rl_src_offset = NarrowRegLoc(rl_src_offset);  // ignore high half in info->args[3]
1496   RegLocation rl_src_value = info->args[4];  // value to store
1497   if (is_volatile || is_ordered) {
1498     GenMemBarrier(kAnyStore);
1499   }
1500   RegLocation rl_object = LoadValue(rl_src_obj, kRefReg);
1501   RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1502   RegLocation rl_value;
1503   if (is_long) {
1504     rl_value = LoadValueWide(rl_src_value, kCoreReg);
1505     if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64
1506         || cu_->instruction_set == kArm64) {
1507       StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k64);
1508     } else {
1509       RegStorage rl_temp_offset = AllocTemp();
1510       OpRegRegReg(kOpAdd, rl_temp_offset, rl_object.reg, rl_offset.reg);
1511       StoreBaseDisp(rl_temp_offset, 0, rl_value.reg, k64, kNotVolatile);
1512       FreeTemp(rl_temp_offset);
1513     }
1514   } else {
1515     rl_value = LoadValue(rl_src_value, is_object ? kRefReg : kCoreReg);
1516     if (rl_value.ref) {
1517       StoreRefIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0);
1518     } else {
1519       StoreBaseIndexed(rl_object.reg, rl_offset.reg, rl_value.reg, 0, k32);
1520     }
1521   }
1522 
1523   // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
1524   FreeTemp(rl_offset.reg);
1525 
1526   if (is_volatile) {
1527     // Prevent reordering with a subsequent volatile load.
1528     // May also be needed to address store atomicity issues.
1529     GenMemBarrier(kAnyAny);
1530   }
1531   if (is_object) {
1532     MarkGCCard(0, rl_value.reg, rl_object.reg);
1533   }
1534   return true;
1535 }
1536 
GenInvoke(CallInfo * info)1537 void Mir2Lir::GenInvoke(CallInfo* info) {
1538   DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1539   if (mir_graph_->GetMethodLoweringInfo(info->mir).IsIntrinsic()) {
1540     const DexFile* dex_file = info->method_ref.dex_file;
1541     auto* inliner = cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(dex_file);
1542     if (inliner->GenIntrinsic(this, info)) {
1543       return;
1544     }
1545   }
1546   GenInvokeNoInline(info);
1547 }
1548 
GenInvokeNoInline(CallInfo * info)1549 void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
1550   int call_state = 0;
1551   LIR* null_ck;
1552   LIR** p_null_ck = nullptr;
1553   NextCallInsn next_call_insn;
1554   FlushAllRegs();  /* Everything to home location */
1555   // Explicit register usage
1556   LockCallTemps();
1557 
1558   const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
1559   MethodReference target_method = method_info.GetTargetMethod();
1560   cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
1561   InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
1562   info->type = method_info.GetSharpType();
1563   bool is_string_init = false;
1564   if (method_info.IsSpecial()) {
1565     DexFileMethodInliner* inliner = cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(
1566         target_method.dex_file);
1567     if (inliner->IsStringInitMethodIndex(target_method.dex_method_index)) {
1568       is_string_init = true;
1569       size_t pointer_size = GetInstructionSetPointerSize(cu_->instruction_set);
1570       info->string_init_offset = inliner->GetOffsetForStringInit(target_method.dex_method_index,
1571                                                                  pointer_size);
1572       info->type = kStatic;
1573     }
1574   }
1575   bool fast_path = method_info.FastPath();
1576   bool skip_this;
1577 
1578   if (info->type == kInterface) {
1579     next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1580     skip_this = fast_path;
1581   } else if (info->type == kDirect) {
1582     if (fast_path) {
1583       p_null_ck = &null_ck;
1584     }
1585     next_call_insn = fast_path ? GetNextSDCallInsn() : NextDirectCallInsnSP;
1586     skip_this = false;
1587   } else if (info->type == kStatic) {
1588     next_call_insn = fast_path ? GetNextSDCallInsn() : NextStaticCallInsnSP;
1589     skip_this = false;
1590   } else if (info->type == kSuper) {
1591     DCHECK(!fast_path);  // Fast path is a direct call.
1592     next_call_insn = NextSuperCallInsnSP;
1593     skip_this = false;
1594   } else {
1595     DCHECK_EQ(info->type, kVirtual);
1596     next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1597     skip_this = fast_path;
1598   }
1599   call_state = GenDalvikArgs(info, call_state, p_null_ck,
1600                              next_call_insn, target_method, method_info.VTableIndex(),
1601                              method_info.DirectCode(), method_info.DirectMethod(),
1602                              original_type, skip_this);
1603   // Finish up any of the call sequence not interleaved in arg loading
1604   while (call_state >= 0) {
1605     call_state = next_call_insn(cu_, info, call_state, target_method, method_info.VTableIndex(),
1606                                 method_info.DirectCode(), method_info.DirectMethod(),
1607                                 original_type);
1608   }
1609   LIR* call_insn = GenCallInsn(method_info);
1610   MarkSafepointPC(call_insn);
1611 
1612   FreeCallTemps();
1613   if (info->result.location != kLocInvalid) {
1614     // We have a following MOVE_RESULT - do it now.
1615     RegisterClass reg_class = is_string_init ? kRefReg :
1616         ShortyToRegClass(mir_graph_->GetShortyFromMethodReference(info->method_ref)[0]);
1617     if (info->result.wide) {
1618       RegLocation ret_loc = GetReturnWide(reg_class);
1619       StoreValueWide(info->result, ret_loc);
1620     } else {
1621       RegLocation ret_loc = GetReturn(reg_class);
1622       StoreValue(info->result, ret_loc);
1623     }
1624   }
1625 }
1626 
1627 }  // namespace art
1628