1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
18 #define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
19 
20 #include "invoke_type.h"
21 #include "compiled_method.h"
22 #include "dex/compiler_enums.h"
23 #include "dex/compiler_ir.h"
24 #include "dex/reg_location.h"
25 #include "dex/reg_storage.h"
26 #include "dex/backend.h"
27 #include "dex/quick/resource_mask.h"
28 #include "driver/compiler_driver.h"
29 #include "instruction_set.h"
30 #include "leb128.h"
31 #include "entrypoints/quick/quick_entrypoints_enum.h"
32 #include "safe_map.h"
33 #include "utils/array_ref.h"
34 #include "utils/arena_allocator.h"
35 #include "utils/arena_containers.h"
36 #include "utils/growable_array.h"
37 #include "utils/stack_checks.h"
38 
39 namespace art {
40 
41 /*
42  * TODO: refactoring pass to move these (and other) typdefs towards usage style of runtime to
43  * add type safety (see runtime/offsets.h).
44  */
45 typedef uint32_t DexOffset;          // Dex offset in code units.
46 typedef uint16_t NarrowDexOffset;    // For use in structs, Dex offsets range from 0 .. 0xffff.
47 typedef uint32_t CodeOffset;         // Native code offset in bytes.
48 
49 // Set to 1 to measure cost of suspend check.
50 #define NO_SUSPEND 0
51 
52 #define IS_BINARY_OP         (1ULL << kIsBinaryOp)
53 #define IS_BRANCH            (1ULL << kIsBranch)
54 #define IS_IT                (1ULL << kIsIT)
55 #define IS_MOVE              (1ULL << kIsMoveOp)
56 #define IS_LOAD              (1ULL << kMemLoad)
57 #define IS_QUAD_OP           (1ULL << kIsQuadOp)
58 #define IS_QUIN_OP           (1ULL << kIsQuinOp)
59 #define IS_SEXTUPLE_OP       (1ULL << kIsSextupleOp)
60 #define IS_STORE             (1ULL << kMemStore)
61 #define IS_TERTIARY_OP       (1ULL << kIsTertiaryOp)
62 #define IS_UNARY_OP          (1ULL << kIsUnaryOp)
63 #define IS_VOLATILE          (1ULL << kMemVolatile)
64 #define NEEDS_FIXUP          (1ULL << kPCRelFixup)
65 #define NO_OPERAND           (1ULL << kNoOperand)
66 #define REG_DEF0             (1ULL << kRegDef0)
67 #define REG_DEF1             (1ULL << kRegDef1)
68 #define REG_DEF2             (1ULL << kRegDef2)
69 #define REG_DEFA             (1ULL << kRegDefA)
70 #define REG_DEFD             (1ULL << kRegDefD)
71 #define REG_DEF_FPCS_LIST0   (1ULL << kRegDefFPCSList0)
72 #define REG_DEF_FPCS_LIST2   (1ULL << kRegDefFPCSList2)
73 #define REG_DEF_LIST0        (1ULL << kRegDefList0)
74 #define REG_DEF_LIST1        (1ULL << kRegDefList1)
75 #define REG_DEF_LR           (1ULL << kRegDefLR)
76 #define REG_DEF_SP           (1ULL << kRegDefSP)
77 #define REG_USE0             (1ULL << kRegUse0)
78 #define REG_USE1             (1ULL << kRegUse1)
79 #define REG_USE2             (1ULL << kRegUse2)
80 #define REG_USE3             (1ULL << kRegUse3)
81 #define REG_USE4             (1ULL << kRegUse4)
82 #define REG_USEA             (1ULL << kRegUseA)
83 #define REG_USEC             (1ULL << kRegUseC)
84 #define REG_USED             (1ULL << kRegUseD)
85 #define REG_USEB             (1ULL << kRegUseB)
86 #define REG_USE_FPCS_LIST0   (1ULL << kRegUseFPCSList0)
87 #define REG_USE_FPCS_LIST2   (1ULL << kRegUseFPCSList2)
88 #define REG_USE_LIST0        (1ULL << kRegUseList0)
89 #define REG_USE_LIST1        (1ULL << kRegUseList1)
90 #define REG_USE_LR           (1ULL << kRegUseLR)
91 #define REG_USE_PC           (1ULL << kRegUsePC)
92 #define REG_USE_SP           (1ULL << kRegUseSP)
93 #define SETS_CCODES          (1ULL << kSetsCCodes)
94 #define USES_CCODES          (1ULL << kUsesCCodes)
95 #define USE_FP_STACK         (1ULL << kUseFpStack)
96 #define REG_USE_LO           (1ULL << kUseLo)
97 #define REG_USE_HI           (1ULL << kUseHi)
98 #define REG_DEF_LO           (1ULL << kDefLo)
99 #define REG_DEF_HI           (1ULL << kDefHi)
100 #define SCALED_OFFSET_X0     (1ULL << kMemScaledx0)
101 #define SCALED_OFFSET_X2     (1ULL << kMemScaledx2)
102 #define SCALED_OFFSET_X4     (1ULL << kMemScaledx4)
103 
104 // Special load/stores
105 #define IS_LOADX             (IS_LOAD | IS_VOLATILE)
106 #define IS_LOAD_OFF          (IS_LOAD | SCALED_OFFSET_X0)
107 #define IS_LOAD_OFF2         (IS_LOAD | SCALED_OFFSET_X2)
108 #define IS_LOAD_OFF4         (IS_LOAD | SCALED_OFFSET_X4)
109 
110 #define IS_STOREX            (IS_STORE | IS_VOLATILE)
111 #define IS_STORE_OFF         (IS_STORE | SCALED_OFFSET_X0)
112 #define IS_STORE_OFF2        (IS_STORE | SCALED_OFFSET_X2)
113 #define IS_STORE_OFF4        (IS_STORE | SCALED_OFFSET_X4)
114 
115 // Common combo register usage patterns.
116 #define REG_DEF01            (REG_DEF0 | REG_DEF1)
117 #define REG_DEF012           (REG_DEF0 | REG_DEF1 | REG_DEF2)
118 #define REG_DEF01_USE2       (REG_DEF0 | REG_DEF1 | REG_USE2)
119 #define REG_DEF0_USE01       (REG_DEF0 | REG_USE01)
120 #define REG_DEF0_USE0        (REG_DEF0 | REG_USE0)
121 #define REG_DEF0_USE12       (REG_DEF0 | REG_USE12)
122 #define REG_DEF0_USE123      (REG_DEF0 | REG_USE123)
123 #define REG_DEF0_USE1        (REG_DEF0 | REG_USE1)
124 #define REG_DEF0_USE2        (REG_DEF0 | REG_USE2)
125 #define REG_DEFAD_USEAD      (REG_DEFAD_USEA | REG_USED)
126 #define REG_DEFAD_USEA       (REG_DEFA_USEA | REG_DEFD)
127 #define REG_DEFA_USEA        (REG_DEFA | REG_USEA)
128 #define REG_USE012           (REG_USE01 | REG_USE2)
129 #define REG_USE014           (REG_USE01 | REG_USE4)
130 #define REG_USE01            (REG_USE0 | REG_USE1)
131 #define REG_USE02            (REG_USE0 | REG_USE2)
132 #define REG_USE12            (REG_USE1 | REG_USE2)
133 #define REG_USE23            (REG_USE2 | REG_USE3)
134 #define REG_USE123           (REG_USE1 | REG_USE2 | REG_USE3)
135 
136 // TODO: #includes need a cleanup
137 #ifndef INVALID_SREG
138 #define INVALID_SREG (-1)
139 #endif
140 
141 struct BasicBlock;
142 struct CallInfo;
143 struct CompilationUnit;
144 struct InlineMethod;
145 struct MIR;
146 struct LIR;
147 struct RegisterInfo;
148 class DexFileMethodInliner;
149 class MIRGraph;
150 class Mir2Lir;
151 
152 typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int,
153                             const MethodReference& target_method,
154                             uint32_t method_idx, uintptr_t direct_code,
155                             uintptr_t direct_method, InvokeType type);
156 
157 typedef std::vector<uint8_t> CodeBuffer;
158 
159 struct UseDefMasks {
160   const ResourceMask* use_mask;        // Resource mask for use.
161   const ResourceMask* def_mask;        // Resource mask for def.
162 };
163 
164 struct AssemblyInfo {
165   LIR* pcrel_next;           // Chain of LIR nodes needing pc relative fixups.
166 };
167 
168 struct LIR {
169   CodeOffset offset;             // Offset of this instruction.
170   NarrowDexOffset dalvik_offset;   // Offset of Dalvik opcode in code units (16-bit words).
171   int16_t opcode;
172   LIR* next;
173   LIR* prev;
174   LIR* target;
175   struct {
176     unsigned int alias_info:17;  // For Dalvik register disambiguation.
177     bool is_nop:1;               // LIR is optimized away.
178     unsigned int size:4;         // Note: size of encoded instruction is in bytes.
179     bool use_def_invalid:1;      // If true, masks should not be used.
180     unsigned int generation:1;   // Used to track visitation state during fixup pass.
181     unsigned int fixup:8;        // Fixup kind.
182   } flags;
183   union {
184     UseDefMasks m;               // Use & Def masks used during optimization.
185     AssemblyInfo a;              // Instruction info used during assembly phase.
186   } u;
187   int32_t operands[5];           // [0..4] = [dest, src1, src2, extra, extra2].
188 };
189 
190 // Target-specific initialization.
191 Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
192                           ArenaAllocator* const arena);
193 Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
194                             ArenaAllocator* const arena);
195 Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
196                           ArenaAllocator* const arena);
197 Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
198                           ArenaAllocator* const arena);
199 
200 // Utility macros to traverse the LIR list.
201 #define NEXT_LIR(lir) (lir->next)
202 #define PREV_LIR(lir) (lir->prev)
203 
204 // Defines for alias_info (tracks Dalvik register references).
205 #define DECODE_ALIAS_INFO_REG(X)        (X & 0xffff)
206 #define DECODE_ALIAS_INFO_WIDE_FLAG     (0x10000)
207 #define DECODE_ALIAS_INFO_WIDE(X)       ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0)
208 #define ENCODE_ALIAS_INFO(REG, ISWIDE)  (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0))
209 
210 #define ENCODE_REG_PAIR(low_reg, high_reg) ((low_reg & 0xff) | ((high_reg & 0xff) << 8))
211 #define DECODE_REG_PAIR(both_regs, low_reg, high_reg) \
212   do { \
213     low_reg = both_regs & 0xff; \
214     high_reg = (both_regs >> 8) & 0xff; \
215   } while (false)
216 
217 // Mask to denote sreg as the start of a 64-bit item.  Must not interfere with low 16 bits.
218 #define STARTING_WIDE_SREG 0x10000
219 
220 // TODO: replace these macros
221 #define SLOW_FIELD_PATH (cu_->enable_debug & (1 << kDebugSlowFieldPath))
222 #define SLOW_INVOKE_PATH (cu_->enable_debug & (1 << kDebugSlowInvokePath))
223 #define SLOW_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowStringPath))
224 #define SLOW_TYPE_PATH (cu_->enable_debug & (1 << kDebugSlowTypePath))
225 #define EXERCISE_SLOWEST_STRING_PATH (cu_->enable_debug & (1 << kDebugSlowestStringPath))
226 
227 class Mir2Lir : public Backend {
228   public:
229     static constexpr bool kFailOnSizeError = true && kIsDebugBuild;
230     static constexpr bool kReportSizeError = true && kIsDebugBuild;
231 
232     // TODO: If necessary, this could be made target-dependent.
233     static constexpr uint16_t kSmallSwitchThreshold = 5;
234 
235     /*
236      * Auxiliary information describing the location of data embedded in the Dalvik
237      * byte code stream.
238      */
239     struct EmbeddedData {
240       CodeOffset offset;        // Code offset of data block.
241       const uint16_t* table;      // Original dex data.
242       DexOffset vaddr;            // Dalvik offset of parent opcode.
243     };
244 
245     struct FillArrayData : EmbeddedData {
246       int32_t size;
247     };
248 
249     struct SwitchTable : EmbeddedData {
250       LIR* anchor;                // Reference instruction for relative offsets.
251       LIR** targets;              // Array of case targets.
252     };
253 
254     /* Static register use counts */
255     struct RefCounts {
256       int count;
257       int s_reg;
258     };
259 
260     /*
261      * Data structure tracking the mapping detween a Dalvik value (32 or 64 bits)
262      * and native register storage.  The primary purpose is to reuse previuosly
263      * loaded values, if possible, and otherwise to keep the value in register
264      * storage as long as possible.
265      *
266      * NOTE 1: wide_value refers to the width of the Dalvik value contained in
267      * this register (or pair).  For example, a 64-bit register containing a 32-bit
268      * Dalvik value would have wide_value==false even though the storage container itself
269      * is wide.  Similarly, a 32-bit register containing half of a 64-bit Dalvik value
270      * would have wide_value==true (and additionally would have its partner field set to the
271      * other half whose wide_value field would also be true.
272      *
273      * NOTE 2: In the case of a register pair, you can determine which of the partners
274      * is the low half by looking at the s_reg names.  The high s_reg will equal low_sreg + 1.
275      *
276      * NOTE 3: In the case of a 64-bit register holding a Dalvik wide value, wide_value
277      * will be true and partner==self.  s_reg refers to the low-order word of the Dalvik
278      * value, and the s_reg of the high word is implied (s_reg + 1).
279      *
280      * NOTE 4: The reg and is_temp fields should always be correct.  If is_temp is false no
281      * other fields have meaning. [perhaps not true, wide should work for promoted regs?]
282      * If is_temp==true and live==false, no other fields have
283      * meaning.  If is_temp==true and live==true, wide_value, partner, dirty, s_reg, def_start
284      * and def_end describe the relationship between the temp register/register pair and
285      * the Dalvik value[s] described by s_reg/s_reg+1.
286      *
287      * The fields used_storage, master_storage and storage_mask are used to track allocation
288      * in light of potential aliasing.  For example, consider Arm's d2, which overlaps s4 & s5.
289      * d2's storage mask would be 0x00000003, the two low-order bits denoting 64 bits of
290      * storage use.  For s4, it would be 0x0000001; for s5 0x00000002.  These values should not
291      * change once initialized.  The "used_storage" field tracks current allocation status.
292      * Although each record contains this field, only the field from the largest member of
293      * an aliased group is used.  In our case, it would be d2's.  The master_storage pointer
294      * of d2, s4 and s5 would all point to d2's used_storage field.  Each bit in a used_storage
295      * represents 32 bits of storage.  d2's used_storage would be initialized to 0xfffffffc.
296      * Then, if we wanted to determine whether s4 could be allocated, we would "and"
297      * s4's storage_mask with s4's *master_storage.  If the result is zero, s4 is free and
298      * to allocate: *master_storage |= storage_mask.  To free, *master_storage &= ~storage_mask.
299      *
300      * For an X86 vector register example, storage_mask would be:
301      *    0x00000001 for 32-bit view of xmm1
302      *    0x00000003 for 64-bit view of xmm1
303      *    0x0000000f for 128-bit view of xmm1
304      *    0x000000ff for 256-bit view of ymm1   // future expansion, if needed
305      *    0x0000ffff for 512-bit view of ymm1   // future expansion, if needed
306      *    0xffffffff for 1024-bit view of ymm1  // future expansion, if needed
307      *
308      * The "liveness" of a register is handled in a similar way.  The liveness_ storage is
309      * held in the widest member of an aliased set.  Note, though, that for a temp register to
310      * reused as live, it must both be marked live and the associated SReg() must match the
311      * desired s_reg.  This gets a little complicated when dealing with aliased registers.  All
312      * members of an aliased set will share the same liveness flags, but each will individually
313      * maintain s_reg_.  In this way we can know that at least one member of an
314      * aliased set is live, but will only fully match on the appropriate alias view.  For example,
315      * if Arm d1 is live as a double and has s_reg_ set to Dalvik v8 (which also implies v9
316      * because it is wide), its aliases s2 and s3 will show as live, but will have
317      * s_reg_ == INVALID_SREG.  An attempt to later AllocLiveReg() of v9 with a single-precision
318      * view will fail because although s3's liveness bit is set, its s_reg_ will not match v9.
319      * This will cause all members of the aliased set to be clobbered and AllocLiveReg() will
320      * report that v9 is currently not live as a single (which is what we want).
321      *
322      * NOTE: the x86 usage is still somewhat in flux.  There are competing notions of how
323      * to treat xmm registers:
324      *     1. Treat them all as 128-bits wide, but denote how much data used via bytes field.
325      *         o This more closely matches reality, but means you'd need to be able to get
326      *           to the associated RegisterInfo struct to figure out how it's being used.
327      *         o This is how 64-bit core registers will be used - always 64 bits, but the
328      *           "bytes" field will be 4 for 32-bit usage and 8 for 64-bit usage.
329      *     2. View the xmm registers based on contents.
330      *         o A single in a xmm2 register would be k32BitVector, while a double in xmm2 would
331      *           be a k64BitVector.
332      *         o Note that the two uses above would be considered distinct registers (but with
333      *           the aliasing mechanism, we could detect interference).
334      *         o This is how aliased double and single float registers will be handled on
335      *           Arm and MIPS.
336      * Working plan is, for all targets, to follow mechanism 1 for 64-bit core registers, and
337      * mechanism 2 for aliased float registers and x86 vector registers.
338      */
339     class RegisterInfo {
340      public:
341       RegisterInfo(RegStorage r, const ResourceMask& mask = kEncodeAll);
~RegisterInfo()342       ~RegisterInfo() {}
new(size_t size,ArenaAllocator * arena)343       static void* operator new(size_t size, ArenaAllocator* arena) {
344         return arena->Alloc(size, kArenaAllocRegAlloc);
345       }
346 
347       static const uint32_t k32SoloStorageMask     = 0x00000001;
348       static const uint32_t kLowSingleStorageMask  = 0x00000001;
349       static const uint32_t kHighSingleStorageMask = 0x00000002;
350       static const uint32_t k64SoloStorageMask     = 0x00000003;
351       static const uint32_t k128SoloStorageMask    = 0x0000000f;
352       static const uint32_t k256SoloStorageMask    = 0x000000ff;
353       static const uint32_t k512SoloStorageMask    = 0x0000ffff;
354       static const uint32_t k1024SoloStorageMask   = 0xffffffff;
355 
InUse()356       bool InUse() { return (storage_mask_ & master_->used_storage_) != 0; }
MarkInUse()357       void MarkInUse() { master_->used_storage_ |= storage_mask_; }
MarkFree()358       void MarkFree() { master_->used_storage_ &= ~storage_mask_; }
359       // No part of the containing storage is live in this view.
IsDead()360       bool IsDead() { return (master_->liveness_ & storage_mask_) == 0; }
361       // Liveness of this view matches.  Note: not equivalent to !IsDead().
IsLive()362       bool IsLive() { return (master_->liveness_ & storage_mask_) == storage_mask_; }
MarkLive(int s_reg)363       void MarkLive(int s_reg) {
364         // TODO: Anything useful to assert here?
365         s_reg_ = s_reg;
366         master_->liveness_ |= storage_mask_;
367       }
MarkDead()368       void MarkDead() {
369         if (SReg() != INVALID_SREG) {
370           s_reg_ = INVALID_SREG;
371           master_->liveness_ &= ~storage_mask_;
372           ResetDefBody();
373         }
374       }
GetReg()375       RegStorage GetReg() { return reg_; }
SetReg(RegStorage reg)376       void SetReg(RegStorage reg) { reg_ = reg; }
IsTemp()377       bool IsTemp() { return is_temp_; }
SetIsTemp(bool val)378       void SetIsTemp(bool val) { is_temp_ = val; }
IsWide()379       bool IsWide() { return wide_value_; }
SetIsWide(bool val)380       void SetIsWide(bool val) {
381         wide_value_ = val;
382         if (!val) {
383           // If not wide, reset partner to self.
384           SetPartner(GetReg());
385         }
386       }
IsDirty()387       bool IsDirty() { return dirty_; }
SetIsDirty(bool val)388       void SetIsDirty(bool val) { dirty_ = val; }
Partner()389       RegStorage Partner() { return partner_; }
SetPartner(RegStorage partner)390       void SetPartner(RegStorage partner) { partner_ = partner; }
SReg()391       int SReg() { return (!IsTemp() || IsLive()) ? s_reg_ : INVALID_SREG; }
DefUseMask()392       const ResourceMask& DefUseMask() { return def_use_mask_; }
SetDefUseMask(const ResourceMask & def_use_mask)393       void SetDefUseMask(const ResourceMask& def_use_mask) { def_use_mask_ = def_use_mask; }
Master()394       RegisterInfo* Master() { return master_; }
SetMaster(RegisterInfo * master)395       void SetMaster(RegisterInfo* master) {
396         master_ = master;
397         if (master != this) {
398           master_->aliased_ = true;
399           DCHECK(alias_chain_ == nullptr);
400           alias_chain_ = master_->alias_chain_;
401           master_->alias_chain_ = this;
402         }
403       }
IsAliased()404       bool IsAliased() { return aliased_; }
GetAliasChain()405       RegisterInfo* GetAliasChain() { return alias_chain_; }
StorageMask()406       uint32_t StorageMask() { return storage_mask_; }
SetStorageMask(uint32_t storage_mask)407       void SetStorageMask(uint32_t storage_mask) { storage_mask_ = storage_mask; }
DefStart()408       LIR* DefStart() { return def_start_; }
SetDefStart(LIR * def_start)409       void SetDefStart(LIR* def_start) { def_start_ = def_start; }
DefEnd()410       LIR* DefEnd() { return def_end_; }
SetDefEnd(LIR * def_end)411       void SetDefEnd(LIR* def_end) { def_end_ = def_end; }
ResetDefBody()412       void ResetDefBody() { def_start_ = def_end_ = nullptr; }
413       // Find member of aliased set matching storage_used; return nullptr if none.
FindMatchingView(uint32_t storage_used)414       RegisterInfo* FindMatchingView(uint32_t storage_used) {
415         RegisterInfo* res = Master();
416         for (; res != nullptr; res = res->GetAliasChain()) {
417           if (res->StorageMask() == storage_used)
418             break;
419         }
420         return res;
421       }
422 
423      private:
424       RegStorage reg_;
425       bool is_temp_;               // Can allocate as temp?
426       bool wide_value_;            // Holds a Dalvik wide value (either itself, or part of a pair).
427       bool dirty_;                 // If live, is it dirty?
428       bool aliased_;               // Is this the master for other aliased RegisterInfo's?
429       RegStorage partner_;         // If wide_value, other reg of pair or self if 64-bit register.
430       int s_reg_;                  // Name of live value.
431       ResourceMask def_use_mask_;  // Resources for this element.
432       uint32_t used_storage_;      // 1 bit per 4 bytes of storage. Unused by aliases.
433       uint32_t liveness_;          // 1 bit per 4 bytes of storage. Unused by aliases.
434       RegisterInfo* master_;       // Pointer to controlling storage mask.
435       uint32_t storage_mask_;      // Track allocation of sub-units.
436       LIR *def_start_;             // Starting inst in last def sequence.
437       LIR *def_end_;               // Ending inst in last def sequence.
438       RegisterInfo* alias_chain_;  // Chain of aliased registers.
439     };
440 
441     class RegisterPool {
442      public:
443       RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena,
444                    const ArrayRef<const RegStorage>& core_regs,
445                    const ArrayRef<const RegStorage>& core64_regs,
446                    const ArrayRef<const RegStorage>& sp_regs,
447                    const ArrayRef<const RegStorage>& dp_regs,
448                    const ArrayRef<const RegStorage>& reserved_regs,
449                    const ArrayRef<const RegStorage>& reserved64_regs,
450                    const ArrayRef<const RegStorage>& core_temps,
451                    const ArrayRef<const RegStorage>& core64_temps,
452                    const ArrayRef<const RegStorage>& sp_temps,
453                    const ArrayRef<const RegStorage>& dp_temps);
~RegisterPool()454       ~RegisterPool() {}
new(size_t size,ArenaAllocator * arena)455       static void* operator new(size_t size, ArenaAllocator* arena) {
456         return arena->Alloc(size, kArenaAllocRegAlloc);
457       }
ResetNextTemp()458       void ResetNextTemp() {
459         next_core_reg_ = 0;
460         next_sp_reg_ = 0;
461         next_dp_reg_ = 0;
462       }
463       GrowableArray<RegisterInfo*> core_regs_;
464       int next_core_reg_;
465       GrowableArray<RegisterInfo*> core64_regs_;
466       int next_core64_reg_;
467       GrowableArray<RegisterInfo*> sp_regs_;    // Single precision float.
468       int next_sp_reg_;
469       GrowableArray<RegisterInfo*> dp_regs_;    // Double precision float.
470       int next_dp_reg_;
471       GrowableArray<RegisterInfo*>* ref_regs_;  // Points to core_regs_ or core64_regs_
472       int* next_ref_reg_;
473 
474      private:
475       Mir2Lir* const m2l_;
476     };
477 
478     struct PromotionMap {
479       RegLocationType core_location:3;
480       uint8_t core_reg;
481       RegLocationType fp_location:3;
482       uint8_t fp_reg;
483       bool first_in_pair;
484     };
485 
486     //
487     // Slow paths.  This object is used generate a sequence of code that is executed in the
488     // slow path.  For example, resolving a string or class is slow as it will only be executed
489     // once (after that it is resolved and doesn't need to be done again).  We want slow paths
490     // to be placed out-of-line, and not require a (mispredicted, probably) conditional forward
491     // branch over them.
492     //
493     // If you want to create a slow path, declare a class derived from LIRSlowPath and provide
494     // the Compile() function that will be called near the end of the code generated by the
495     // method.
496     //
497     // The basic flow for a slow path is:
498     //
499     //     CMP reg, #value
500     //     BEQ fromfast
501     //   cont:
502     //     ...
503     //     fast path code
504     //     ...
505     //     more code
506     //     ...
507     //     RETURN
508     ///
509     //   fromfast:
510     //     ...
511     //     slow path code
512     //     ...
513     //     B cont
514     //
515     // So you see we need two labels and two branches.  The first branch (called fromfast) is
516     // the conditional branch to the slow path code.  The second label (called cont) is used
517     // as an unconditional branch target for getting back to the code after the slow path
518     // has completed.
519     //
520 
521     class LIRSlowPath {
522      public:
523       LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast,
524                   LIR* cont = nullptr) :
m2l_(m2l)525         m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) {
526           m2l->StartSlowPath(this);
527       }
~LIRSlowPath()528       virtual ~LIRSlowPath() {}
529       virtual void Compile() = 0;
530 
new(size_t size,ArenaAllocator * arena)531       static void* operator new(size_t size, ArenaAllocator* arena) {
532         return arena->Alloc(size, kArenaAllocData);
533       }
534 
GetContinuationLabel()535       LIR *GetContinuationLabel() {
536         return cont_;
537       }
538 
GetFromFast()539       LIR *GetFromFast() {
540         return fromfast_;
541       }
542 
543      protected:
544       LIR* GenerateTargetLabel(int opcode = kPseudoTargetLabel);
545 
546       Mir2Lir* const m2l_;
547       CompilationUnit* const cu_;
548       const DexOffset current_dex_pc_;
549       LIR* const fromfast_;
550       LIR* const cont_;
551     };
552 
553     // Helper class for changing mem_ref_type_ until the end of current scope. See mem_ref_type_.
554     class ScopedMemRefType {
555      public:
ScopedMemRefType(Mir2Lir * m2l,ResourceMask::ResourceBit new_mem_ref_type)556       ScopedMemRefType(Mir2Lir* m2l, ResourceMask::ResourceBit new_mem_ref_type)
557           : m2l_(m2l),
558             old_mem_ref_type_(m2l->mem_ref_type_) {
559         m2l_->mem_ref_type_ = new_mem_ref_type;
560       }
561 
~ScopedMemRefType()562       ~ScopedMemRefType() {
563         m2l_->mem_ref_type_ = old_mem_ref_type_;
564       }
565 
566      private:
567       Mir2Lir* const m2l_;
568       ResourceMask::ResourceBit old_mem_ref_type_;
569 
570       DISALLOW_COPY_AND_ASSIGN(ScopedMemRefType);
571     };
572 
~Mir2Lir()573     virtual ~Mir2Lir() {}
574 
575     /**
576      * @brief Decodes the LIR offset.
577      * @return Returns the scaled offset of LIR.
578      */
579     virtual size_t GetInstructionOffset(LIR* lir);
580 
s4FromSwitchData(const void * switch_data)581     int32_t s4FromSwitchData(const void* switch_data) {
582       return *reinterpret_cast<const int32_t*>(switch_data);
583     }
584 
585     /*
586      * TODO: this is a trace JIT vestige, and its use should be reconsidered.  At the time
587      * it was introduced, it was intended to be a quick best guess of type without having to
588      * take the time to do type analysis.  Currently, though, we have a much better idea of
589      * the types of Dalvik virtual registers.  Instead of using this for a best guess, why not
590      * just use our knowledge of type to select the most appropriate register class?
591      */
RegClassBySize(OpSize size)592     RegisterClass RegClassBySize(OpSize size) {
593       if (size == kReference) {
594         return kRefReg;
595       } else {
596         return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte ||
597                 size == kSignedByte) ? kCoreReg : kAnyReg;
598       }
599     }
600 
CodeBufferSizeInBytes()601     size_t CodeBufferSizeInBytes() {
602       return code_buffer_.size() / sizeof(code_buffer_[0]);
603     }
604 
IsPseudoLirOp(int opcode)605     static bool IsPseudoLirOp(int opcode) {
606       return (opcode < 0);
607     }
608 
609     /*
610      * LIR operands are 32-bit integers.  Sometimes, (especially for managing
611      * instructions which require PC-relative fixups), we need the operands to carry
612      * pointers.  To do this, we assign these pointers an index in pointer_storage_, and
613      * hold that index in the operand array.
614      * TUNING: If use of these utilities becomes more common on 32-bit builds, it
615      * may be worth conditionally-compiling a set of identity functions here.
616      */
WrapPointer(void * pointer)617     uint32_t WrapPointer(void* pointer) {
618       uint32_t res = pointer_storage_.Size();
619       pointer_storage_.Insert(pointer);
620       return res;
621     }
622 
UnwrapPointer(size_t index)623     void* UnwrapPointer(size_t index) {
624       return pointer_storage_.Get(index);
625     }
626 
627     // strdup(), but allocates from the arena.
ArenaStrdup(const char * str)628     char* ArenaStrdup(const char* str) {
629       size_t len = strlen(str) + 1;
630       char* res = reinterpret_cast<char*>(arena_->Alloc(len, kArenaAllocMisc));
631       if (res != NULL) {
632         strncpy(res, str, len);
633       }
634       return res;
635     }
636 
637     // Shared by all targets - implemented in codegen_util.cc
638     void AppendLIR(LIR* lir);
639     void InsertLIRBefore(LIR* current_lir, LIR* new_lir);
640     void InsertLIRAfter(LIR* current_lir, LIR* new_lir);
641 
642     /**
643      * @brief Provides the maximum number of compiler temporaries that the backend can/wants
644      * to place in a frame.
645      * @return Returns the maximum number of compiler temporaries.
646      */
647     size_t GetMaxPossibleCompilerTemps() const;
648 
649     /**
650      * @brief Provides the number of bytes needed in frame for spilling of compiler temporaries.
651      * @return Returns the size in bytes for space needed for compiler temporary spill region.
652      */
653     size_t GetNumBytesForCompilerTempSpillRegion();
654 
GetCurrentDexPc()655     DexOffset GetCurrentDexPc() const {
656       return current_dalvik_offset_;
657     }
658 
659     RegisterClass ShortyToRegClass(char shorty_type);
660     RegisterClass LocToRegClass(RegLocation loc);
661     int ComputeFrameSize();
662     virtual void Materialize();
663     virtual CompiledMethod* GetCompiledMethod();
664     void MarkSafepointPC(LIR* inst);
665     void MarkSafepointPCAfter(LIR* after);
666     void SetupResourceMasks(LIR* lir);
667     void SetMemRefType(LIR* lir, bool is_load, int mem_type);
668     void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit);
669     void SetupRegMask(ResourceMask* mask, int reg);
670     void ClearRegMask(ResourceMask* mask, int reg);
671     void DumpLIRInsn(LIR* arg, unsigned char* base_addr);
672     void EliminateLoad(LIR* lir, int reg_id);
673     void DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type);
674     void DumpPromotionMap();
675     void CodegenDump();
676     LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0,
677                 int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = NULL);
678     LIR* NewLIR0(int opcode);
679     LIR* NewLIR1(int opcode, int dest);
680     LIR* NewLIR2(int opcode, int dest, int src1);
681     LIR* NewLIR2NoDest(int opcode, int src, int info);
682     LIR* NewLIR3(int opcode, int dest, int src1, int src2);
683     LIR* NewLIR4(int opcode, int dest, int src1, int src2, int info);
684     LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2);
685     LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta);
686     LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi);
687     LIR* ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method);
688     LIR* AddWordData(LIR* *constant_list_p, int value);
689     LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi);
690     void ProcessSwitchTables();
691     void DumpSparseSwitchTable(const uint16_t* table);
692     void DumpPackedSwitchTable(const uint16_t* table);
693     void MarkBoundary(DexOffset offset, const char* inst_str);
694     void NopLIR(LIR* lir);
695     void UnlinkLIR(LIR* lir);
696     bool EvaluateBranch(Instruction::Code opcode, int src1, int src2);
697     bool IsInexpensiveConstant(RegLocation rl_src);
698     ConditionCode FlipComparisonOrder(ConditionCode before);
699     ConditionCode NegateComparison(ConditionCode before);
700     virtual void InstallLiteralPools();
701     void InstallSwitchTables();
702     void InstallFillArrayData();
703     bool VerifyCatchEntries();
704     void CreateMappingTables();
705     void CreateNativeGcMap();
706     int AssignLiteralOffset(CodeOffset offset);
707     int AssignSwitchTablesOffset(CodeOffset offset);
708     int AssignFillArrayDataOffset(CodeOffset offset);
709     virtual LIR* InsertCaseLabel(DexOffset vaddr, int keyVal);
710     void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec);
711     void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec);
712 
StartSlowPath(LIRSlowPath * slowpath)713     virtual void StartSlowPath(LIRSlowPath* slowpath) {}
BeginInvoke(CallInfo * info)714     virtual void BeginInvoke(CallInfo* info) {}
EndInvoke(CallInfo * info)715     virtual void EndInvoke(CallInfo* info) {}
716 
717 
718     // Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation.  No code generated.
719     virtual RegLocation NarrowRegLoc(RegLocation loc);
720 
721     // Shared by all targets - implemented in local_optimizations.cc
722     void ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src);
723     void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir);
724     void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir);
725     virtual void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir);
726 
727     // Shared by all targets - implemented in ralloc_util.cc
728     int GetSRegHi(int lowSreg);
729     bool LiveOut(int s_reg);
730     void SimpleRegAlloc();
731     void ResetRegPool();
732     void CompilerInitPool(RegisterInfo* info, RegStorage* regs, int num);
733     void DumpRegPool(GrowableArray<RegisterInfo*>* regs);
734     void DumpCoreRegPool();
735     void DumpFpRegPool();
736     void DumpRegPools();
737     /* Mark a temp register as dead.  Does not affect allocation state. */
738     void Clobber(RegStorage reg);
739     void ClobberSReg(int s_reg);
740     void ClobberAliases(RegisterInfo* info, uint32_t clobber_mask);
741     int SRegToPMap(int s_reg);
742     void RecordCorePromotion(RegStorage reg, int s_reg);
743     RegStorage AllocPreservedCoreReg(int s_reg);
744     void RecordFpPromotion(RegStorage reg, int s_reg);
745     RegStorage AllocPreservedFpReg(int s_reg);
746     virtual RegStorage AllocPreservedSingle(int s_reg);
747     virtual RegStorage AllocPreservedDouble(int s_reg);
748     RegStorage AllocTempBody(GrowableArray<RegisterInfo*> &regs, int* next_temp, bool required);
749     virtual RegStorage AllocTemp(bool required = true);
750     virtual RegStorage AllocTempWide(bool required = true);
751     virtual RegStorage AllocTempRef(bool required = true);
752     virtual RegStorage AllocTempSingle(bool required = true);
753     virtual RegStorage AllocTempDouble(bool required = true);
754     virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class, bool required = true);
755     virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class, bool required = true);
756     void FlushReg(RegStorage reg);
757     void FlushRegWide(RegStorage reg);
758     RegStorage AllocLiveReg(int s_reg, int reg_class, bool wide);
759     RegStorage FindLiveReg(GrowableArray<RegisterInfo*> &regs, int s_reg);
760     virtual void FreeTemp(RegStorage reg);
761     virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free);
762     virtual bool IsLive(RegStorage reg);
763     virtual bool IsTemp(RegStorage reg);
764     bool IsPromoted(RegStorage reg);
765     bool IsDirty(RegStorage reg);
766     virtual void LockTemp(RegStorage reg);
767     void ResetDef(RegStorage reg);
768     void NullifyRange(RegStorage reg, int s_reg);
769     void MarkDef(RegLocation rl, LIR *start, LIR *finish);
770     void MarkDefWide(RegLocation rl, LIR *start, LIR *finish);
771     void ResetDefLoc(RegLocation rl);
772     void ResetDefLocWide(RegLocation rl);
773     void ResetDefTracking();
774     void ClobberAllTemps();
775     void FlushSpecificReg(RegisterInfo* info);
776     void FlushAllRegs();
777     bool RegClassMatches(int reg_class, RegStorage reg);
778     void MarkLive(RegLocation loc);
779     void MarkTemp(RegStorage reg);
780     void UnmarkTemp(RegStorage reg);
781     void MarkWide(RegStorage reg);
782     void MarkNarrow(RegStorage reg);
783     void MarkClean(RegLocation loc);
784     void MarkDirty(RegLocation loc);
785     void MarkInUse(RegStorage reg);
786     bool CheckCorePoolSanity();
787     virtual RegLocation UpdateLoc(RegLocation loc);
788     virtual RegLocation UpdateLocWide(RegLocation loc);
789     RegLocation UpdateRawLoc(RegLocation loc);
790 
791     /**
792      * @brief Used to prepare a register location to receive a wide value.
793      * @see EvalLoc
794      * @param loc the location where the value will be stored.
795      * @param reg_class Type of register needed.
796      * @param update Whether the liveness information should be updated.
797      * @return Returns the properly typed temporary in physical register pairs.
798      */
799     virtual RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update);
800 
801     /**
802      * @brief Used to prepare a register location to receive a value.
803      * @param loc the location where the value will be stored.
804      * @param reg_class Type of register needed.
805      * @param update Whether the liveness information should be updated.
806      * @return Returns the properly typed temporary in physical register.
807      */
808     virtual RegLocation EvalLoc(RegLocation loc, int reg_class, bool update);
809 
810     void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs);
811     void DumpCounts(const RefCounts* arr, int size, const char* msg);
812     void DoPromotion();
813     int VRegOffset(int v_reg);
814     int SRegOffset(int s_reg);
815     RegLocation GetReturnWide(RegisterClass reg_class);
816     RegLocation GetReturn(RegisterClass reg_class);
817     RegisterInfo* GetRegInfo(RegStorage reg);
818 
819     // Shared by all targets - implemented in gen_common.cc.
820     void AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume = nullptr);
821     virtual bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div,
822                                   RegLocation rl_src, RegLocation rl_dest, int lit);
823     bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit);
824     virtual void HandleSlowPaths();
825     void GenBarrier();
826     void GenDivZeroException();
827     // c_code holds condition code that's generated from testing divisor against 0.
828     void GenDivZeroCheck(ConditionCode c_code);
829     // reg holds divisor.
830     void GenDivZeroCheck(RegStorage reg);
831     void GenArrayBoundsCheck(RegStorage index, RegStorage length);
832     void GenArrayBoundsCheck(int32_t index, RegStorage length);
833     LIR* GenNullCheck(RegStorage reg);
834     void MarkPossibleNullPointerException(int opt_flags);
835     void MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after);
836     void MarkPossibleStackOverflowException();
837     void ForceImplicitNullCheck(RegStorage reg, int opt_flags);
838     LIR* GenNullCheck(RegStorage m_reg, int opt_flags);
839     LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags);
840     virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags);
841     void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
842                              RegLocation rl_src2, LIR* taken, LIR* fall_through);
843     void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
844                                  LIR* taken, LIR* fall_through);
845     virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
846     void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
847                          RegLocation rl_src);
848     void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
849                      RegLocation rl_src);
850     void GenFilledNewArray(CallInfo* info);
851     void GenSput(MIR* mir, RegLocation rl_src,
852                  bool is_long_or_double, bool is_object);
853     void GenSget(MIR* mir, RegLocation rl_dest,
854                  bool is_long_or_double, bool is_object);
855     void GenIGet(MIR* mir, int opt_flags, OpSize size,
856                  RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
857     void GenIPut(MIR* mir, int opt_flags, OpSize size,
858                  RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
859     void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
860                         RegLocation rl_src);
861 
862     void GenConstClass(uint32_t type_idx, RegLocation rl_dest);
863     void GenConstString(uint32_t string_idx, RegLocation rl_dest);
864     void GenNewInstance(uint32_t type_idx, RegLocation rl_dest);
865     void GenThrow(RegLocation rl_src);
866     void GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src);
867     void GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src);
868     void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest,
869                       RegLocation rl_src1, RegLocation rl_src2);
870     virtual void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
871                         RegLocation rl_src1, RegLocation rl_shift);
872     void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest,
873                           RegLocation rl_src, int lit);
874     virtual void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest,
875                                 RegLocation rl_src1, RegLocation rl_src2);
876     void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src);
877     virtual void GenSuspendTest(int opt_flags);
878     virtual void GenSuspendTestAndBranch(int opt_flags, LIR* target);
879 
880     // This will be overridden by x86 implementation.
881     virtual void GenConstWide(RegLocation rl_dest, int64_t value);
882     virtual void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
883                        RegLocation rl_src1, RegLocation rl_src2);
884 
885     // Shared by all targets - implemented in gen_invoke.cc.
886     LIR* CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc,
887                     bool use_link = true);
888     RegStorage CallHelperSetup(QuickEntrypointEnum trampoline);
889 
890     void CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc);
891     void CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc);
892     void CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0, bool safepoint_pc);
893     void CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
894                                       bool safepoint_pc);
895     void CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1,
896                                  bool safepoint_pc);
897     void CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0, RegLocation arg1,
898                                          bool safepoint_pc);
899     void CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0, int arg1,
900                                          bool safepoint_pc);
901     void CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1,
902                                  bool safepoint_pc);
903     void CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1,
904                                  bool safepoint_pc);
905     void CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc);
906     void CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0,
907                                     bool safepoint_pc);
908     void CallRuntimeHelperRegMethodRegLocation(QuickEntrypointEnum trampoline, RegStorage arg0,
909                                                RegLocation arg2, bool safepoint_pc);
910     void CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0,
911                                                  RegLocation arg1, bool safepoint_pc);
912     void CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0, RegStorage arg1,
913                                  bool safepoint_pc);
914     void CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0,
915                                     RegStorage arg1, int arg2, bool safepoint_pc);
916     void CallRuntimeHelperImmMethodRegLocation(QuickEntrypointEnum trampoline, int arg0,
917                                                RegLocation arg2, bool safepoint_pc);
918     void CallRuntimeHelperImmMethodImm(QuickEntrypointEnum trampoline, int arg0, int arg2,
919                                        bool safepoint_pc);
920     void CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0,
921                                                     RegLocation arg1, RegLocation arg2,
922                                                     bool safepoint_pc);
923     void CallRuntimeHelperRegLocationRegLocationRegLocation(QuickEntrypointEnum trampoline,
924                                                             RegLocation arg0, RegLocation arg1,
925                                                             RegLocation arg2,
926                                                             bool safepoint_pc);
927     void GenInvoke(CallInfo* info);
928     void GenInvokeNoInline(CallInfo* info);
929     virtual void FlushIns(RegLocation* ArgLocs, RegLocation rl_method);
930     virtual int GenDalvikArgsNoRange(CallInfo* info, int call_state, LIR** pcrLabel,
931                              NextCallInsn next_call_insn,
932                              const MethodReference& target_method,
933                              uint32_t vtable_idx,
934                              uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
935                              bool skip_this);
936     virtual int GenDalvikArgsRange(CallInfo* info, int call_state, LIR** pcrLabel,
937                            NextCallInsn next_call_insn,
938                            const MethodReference& target_method,
939                            uint32_t vtable_idx,
940                            uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
941                            bool skip_this);
942 
943     /**
944      * @brief Used to determine the register location of destination.
945      * @details This is needed during generation of inline intrinsics because it finds destination
946      *  of return,
947      * either the physical register or the target of move-result.
948      * @param info Information about the invoke.
949      * @return Returns the destination location.
950      */
951     RegLocation InlineTarget(CallInfo* info);
952 
953     /**
954      * @brief Used to determine the wide register location of destination.
955      * @see InlineTarget
956      * @param info Information about the invoke.
957      * @return Returns the destination location.
958      */
959     RegLocation InlineTargetWide(CallInfo* info);
960 
961     bool GenInlinedReferenceGetReferent(CallInfo* info);
962     virtual bool GenInlinedCharAt(CallInfo* info);
963     bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
964     virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size);
965     bool GenInlinedReverseBytes(CallInfo* info, OpSize size);
966     bool GenInlinedAbsInt(CallInfo* info);
967     virtual bool GenInlinedAbsLong(CallInfo* info);
968     virtual bool GenInlinedAbsFloat(CallInfo* info) = 0;
969     virtual bool GenInlinedAbsDouble(CallInfo* info) = 0;
970     bool GenInlinedFloatCvt(CallInfo* info);
971     bool GenInlinedDoubleCvt(CallInfo* info);
972     virtual bool GenInlinedCeil(CallInfo* info);
973     virtual bool GenInlinedFloor(CallInfo* info);
974     virtual bool GenInlinedRint(CallInfo* info);
975     virtual bool GenInlinedRound(CallInfo* info, bool is_double);
976     virtual bool GenInlinedArrayCopyCharArray(CallInfo* info);
977     virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based);
978     bool GenInlinedStringCompareTo(CallInfo* info);
979     virtual bool GenInlinedCurrentThread(CallInfo* info);
980     bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_volatile);
981     bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object,
982                              bool is_volatile, bool is_ordered);
983     virtual int LoadArgRegs(CallInfo* info, int call_state,
984                     NextCallInsn next_call_insn,
985                     const MethodReference& target_method,
986                     uint32_t vtable_idx,
987                     uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
988                     bool skip_this);
989 
990     // Shared by all targets - implemented in gen_loadstore.cc.
991     RegLocation LoadCurrMethod();
992     void LoadCurrMethodDirect(RegStorage r_tgt);
993     virtual LIR* LoadConstant(RegStorage r_dest, int value);
994     // Natural word size.
LoadWordDisp(RegStorage r_base,int displacement,RegStorage r_dest)995     virtual LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) {
996       return LoadBaseDisp(r_base, displacement, r_dest, kWord, kNotVolatile);
997     }
998     // Load 32 bits, regardless of target.
Load32Disp(RegStorage r_base,int displacement,RegStorage r_dest)999     virtual LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest)  {
1000       return LoadBaseDisp(r_base, displacement, r_dest, k32, kNotVolatile);
1001     }
1002     // Load a reference at base + displacement and decompress into register.
LoadRefDisp(RegStorage r_base,int displacement,RegStorage r_dest,VolatileKind is_volatile)1003     virtual LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
1004                              VolatileKind is_volatile) {
1005       return LoadBaseDisp(r_base, displacement, r_dest, kReference, is_volatile);
1006     }
1007     // Load a reference at base + index and decompress into register.
LoadRefIndexed(RegStorage r_base,RegStorage r_index,RegStorage r_dest,int scale)1008     virtual LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
1009                                 int scale) {
1010       return LoadBaseIndexed(r_base, r_index, r_dest, scale, kReference);
1011     }
1012     // Load Dalvik value with 32-bit memory storage.  If compressed object reference, decompress.
1013     virtual RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind);
1014     // Same as above, but derive the target register class from the location record.
1015     virtual RegLocation LoadValue(RegLocation rl_src);
1016     // Load Dalvik value with 64-bit memory storage.
1017     virtual RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind);
1018     // Load Dalvik value with 32-bit memory storage.  If compressed object reference, decompress.
1019     virtual void LoadValueDirect(RegLocation rl_src, RegStorage r_dest);
1020     // Load Dalvik value with 32-bit memory storage.  If compressed object reference, decompress.
1021     virtual void LoadValueDirectFixed(RegLocation rl_src, RegStorage r_dest);
1022     // Load Dalvik value with 64-bit memory storage.
1023     virtual void LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest);
1024     // Load Dalvik value with 64-bit memory storage.
1025     virtual void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest);
1026     // Store an item of natural word size.
StoreWordDisp(RegStorage r_base,int displacement,RegStorage r_src)1027     virtual LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) {
1028       return StoreBaseDisp(r_base, displacement, r_src, kWord, kNotVolatile);
1029     }
1030     // Store an uncompressed reference into a compressed 32-bit container.
StoreRefDisp(RegStorage r_base,int displacement,RegStorage r_src,VolatileKind is_volatile)1031     virtual LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
1032                               VolatileKind is_volatile) {
1033       return StoreBaseDisp(r_base, displacement, r_src, kReference, is_volatile);
1034     }
1035     // Store an uncompressed reference into a compressed 32-bit container by index.
StoreRefIndexed(RegStorage r_base,RegStorage r_index,RegStorage r_src,int scale)1036     virtual LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
1037                                  int scale) {
1038       return StoreBaseIndexed(r_base, r_index, r_src, scale, kReference);
1039     }
1040     // Store 32 bits, regardless of target.
Store32Disp(RegStorage r_base,int displacement,RegStorage r_src)1041     virtual LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) {
1042       return StoreBaseDisp(r_base, displacement, r_src, k32, kNotVolatile);
1043     }
1044 
1045     /**
1046      * @brief Used to do the final store in the destination as per bytecode semantics.
1047      * @param rl_dest The destination dalvik register location.
1048      * @param rl_src The source register location. Can be either physical register or dalvik register.
1049      */
1050     virtual void StoreValue(RegLocation rl_dest, RegLocation rl_src);
1051 
1052     /**
1053      * @brief Used to do the final store in a wide destination as per bytecode semantics.
1054      * @see StoreValue
1055      * @param rl_dest The destination dalvik register location.
1056      * @param rl_src The source register location. Can be either physical register or dalvik
1057      *  register.
1058      */
1059     virtual void StoreValueWide(RegLocation rl_dest, RegLocation rl_src);
1060 
1061     /**
1062      * @brief Used to do the final store to a destination as per bytecode semantics.
1063      * @see StoreValue
1064      * @param rl_dest The destination dalvik register location.
1065      * @param rl_src The source register location. It must be kLocPhysReg
1066      *
1067      * This is used for x86 two operand computations, where we have computed the correct
1068      * register value that now needs to be properly registered.  This is used to avoid an
1069      * extra register copy that would result if StoreValue was called.
1070      */
1071     virtual void StoreFinalValue(RegLocation rl_dest, RegLocation rl_src);
1072 
1073     /**
1074      * @brief Used to do the final store in a wide destination as per bytecode semantics.
1075      * @see StoreValueWide
1076      * @param rl_dest The destination dalvik register location.
1077      * @param rl_src The source register location. It must be kLocPhysReg
1078      *
1079      * This is used for x86 two operand computations, where we have computed the correct
1080      * register values that now need to be properly registered.  This is used to avoid an
1081      * extra pair of register copies that would result if StoreValueWide was called.
1082      */
1083     virtual void StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src);
1084 
1085     // Shared by all targets - implemented in mir_to_lir.cc.
1086     void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list);
1087     virtual void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir);
1088     bool MethodBlockCodeGen(BasicBlock* bb);
1089     bool SpecialMIR2LIR(const InlineMethod& special);
1090     virtual void MethodMIR2LIR();
1091     // Update LIR for verbose listings.
1092     void UpdateLIROffsets();
1093 
1094     /*
1095      * @brief Load the address of the dex method into the register.
1096      * @param target_method The MethodReference of the method to be invoked.
1097      * @param type How the method will be invoked.
1098      * @param register that will contain the code address.
1099      * @note register will be passed to TargetReg to get physical register.
1100      */
1101     void LoadCodeAddress(const MethodReference& target_method, InvokeType type,
1102                          SpecialTargetRegister symbolic_reg);
1103 
1104     /*
1105      * @brief Load the Method* of a dex method into the register.
1106      * @param target_method The MethodReference of the method to be invoked.
1107      * @param type How the method will be invoked.
1108      * @param register that will contain the code address.
1109      * @note register will be passed to TargetReg to get physical register.
1110      */
1111     virtual void LoadMethodAddress(const MethodReference& target_method, InvokeType type,
1112                                    SpecialTargetRegister symbolic_reg);
1113 
1114     /*
1115      * @brief Load the Class* of a Dex Class type into the register.
1116      * @param type How the method will be invoked.
1117      * @param register that will contain the code address.
1118      * @note register will be passed to TargetReg to get physical register.
1119      */
1120     virtual void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg);
1121 
1122     // Load a string
1123     virtual void LoadString(uint32_t string_idx, RegStorage target_reg);
1124 
1125     // Routines that work for the generic case, but may be overriden by target.
1126     /*
1127      * @brief Compare memory to immediate, and branch if condition true.
1128      * @param cond The condition code that when true will branch to the target.
1129      * @param temp_reg A temporary register that can be used if compare to memory is not
1130      * supported by the architecture.
1131      * @param base_reg The register holding the base address.
1132      * @param offset The offset from the base.
1133      * @param check_value The immediate to compare to.
1134      * @param target branch target (or nullptr)
1135      * @param compare output for getting LIR for comparison (or nullptr)
1136      * @returns The branch instruction that was generated.
1137      */
1138     virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
1139                                    int offset, int check_value, LIR* target, LIR** compare);
1140 
1141     // Required for target - codegen helpers.
1142     virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
1143                                     RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
1144     virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0;
1145     virtual LIR* CheckSuspendUsingLoad() = 0;
1146 
1147     virtual RegStorage LoadHelper(QuickEntrypointEnum trampoline) = 0;
1148 
1149     virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
1150                               OpSize size, VolatileKind is_volatile) = 0;
1151     virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
1152                                  int scale, OpSize size) = 0;
1153     virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0;
1154     virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0;
1155     virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
1156                                OpSize size, VolatileKind is_volatile) = 0;
1157     virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
1158                                   int scale, OpSize size) = 0;
1159     virtual void MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) = 0;
1160 
1161     // Required for target - register utilities.
1162 
IsSameReg(RegStorage reg1,RegStorage reg2)1163     bool IsSameReg(RegStorage reg1, RegStorage reg2) {
1164       RegisterInfo* info1 = GetRegInfo(reg1);
1165       RegisterInfo* info2 = GetRegInfo(reg2);
1166       return (info1->Master() == info2->Master() &&
1167              (info1->StorageMask() & info2->StorageMask()) != 0);
1168     }
1169 
1170     /**
1171      * @brief Portable way of getting special registers from the backend.
1172      * @param reg Enumeration describing the purpose of the register.
1173      * @return Return the #RegStorage corresponding to the given purpose @p reg.
1174      * @note This function is currently allowed to return any suitable view of the registers
1175      *   (e.g. this could be 64-bit solo or 32-bit solo for 64-bit backends).
1176      */
1177     virtual RegStorage TargetReg(SpecialTargetRegister reg) = 0;
1178 
1179     /**
1180      * @brief Portable way of getting special registers from the backend.
1181      * @param reg Enumeration describing the purpose of the register.
1182      * @param wide_kind What kind of view of the special register is required.
1183      * @return Return the #RegStorage corresponding to the given purpose @p reg.
1184      *
1185      * @note For 32b system, wide (kWide) views only make sense for the argument registers and the
1186      *       return. In that case, this function should return a pair where the first component of
1187      *       the result will be the indicated special register.
1188      */
TargetReg(SpecialTargetRegister reg,WideKind wide_kind)1189     virtual RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) {
1190       if (wide_kind == kWide) {
1191         DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg7) || (kRet0 == reg));
1192         COMPILE_ASSERT((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) &&
1193                        (kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) &&
1194                        (kArg7 == kArg6 + 1), kargs_range_unexpected);
1195         COMPILE_ASSERT((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) &&
1196                        (kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) &&
1197                        (kFArg7 == kFArg6 + 1), kfargs_range_unexpected);
1198         COMPILE_ASSERT(kRet1 == kRet0 + 1, kret_range_unexpected);
1199         return RegStorage::MakeRegPair(TargetReg(reg),
1200                                        TargetReg(static_cast<SpecialTargetRegister>(reg + 1)));
1201       } else {
1202         return TargetReg(reg);
1203       }
1204     }
1205 
1206     /**
1207      * @brief Portable way of getting a special register for storing a pointer.
1208      * @see TargetReg()
1209      */
TargetPtrReg(SpecialTargetRegister reg)1210     virtual RegStorage TargetPtrReg(SpecialTargetRegister reg) {
1211       return TargetReg(reg);
1212     }
1213 
1214     // Get a reg storage corresponding to the wide & ref flags of the reg location.
TargetReg(SpecialTargetRegister reg,RegLocation loc)1215     virtual RegStorage TargetReg(SpecialTargetRegister reg, RegLocation loc) {
1216       if (loc.ref) {
1217         return TargetReg(reg, kRef);
1218       } else {
1219         return TargetReg(reg, loc.wide ? kWide : kNotWide);
1220       }
1221     }
1222 
1223     virtual RegStorage GetArgMappingToPhysicalReg(int arg_num) = 0;
1224     virtual RegLocation GetReturnAlt() = 0;
1225     virtual RegLocation GetReturnWideAlt() = 0;
1226     virtual RegLocation LocCReturn() = 0;
1227     virtual RegLocation LocCReturnRef() = 0;
1228     virtual RegLocation LocCReturnDouble() = 0;
1229     virtual RegLocation LocCReturnFloat() = 0;
1230     virtual RegLocation LocCReturnWide() = 0;
1231     virtual ResourceMask GetRegMaskCommon(const RegStorage& reg) const = 0;
1232     virtual void AdjustSpillMask() = 0;
1233     virtual void ClobberCallerSave() = 0;
1234     virtual void FreeCallTemps() = 0;
1235     virtual void LockCallTemps() = 0;
1236     virtual void CompilerInitializeRegAlloc() = 0;
1237 
1238     // Required for target - miscellaneous.
1239     virtual void AssembleLIR() = 0;
1240     virtual void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) = 0;
1241     virtual void SetupTargetResourceMasks(LIR* lir, uint64_t flags,
1242                                           ResourceMask* use_mask, ResourceMask* def_mask) = 0;
1243     virtual const char* GetTargetInstFmt(int opcode) = 0;
1244     virtual const char* GetTargetInstName(int opcode) = 0;
1245     virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0;
1246 
1247     // Note: This may return kEncodeNone on architectures that do not expose a PC. The caller must
1248     //       take care of this.
1249     virtual ResourceMask GetPCUseDefEncoding() const = 0;
1250     virtual uint64_t GetTargetInstFlags(int opcode) = 0;
1251     virtual size_t GetInsnSize(LIR* lir) = 0;
1252     virtual bool IsUnconditionalBranch(LIR* lir) = 0;
1253 
1254     // Get the register class for load/store of a field.
1255     virtual RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) = 0;
1256 
1257     // Required for target - Dalvik-level generators.
1258     virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
1259                                    RegLocation rl_src1, RegLocation rl_src2) = 0;
1260     virtual void GenArithOpDouble(Instruction::Code opcode,
1261                                   RegLocation rl_dest, RegLocation rl_src1,
1262                                   RegLocation rl_src2) = 0;
1263     virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
1264                                  RegLocation rl_src1, RegLocation rl_src2) = 0;
1265     virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
1266                           RegLocation rl_src1, RegLocation rl_src2) = 0;
1267     virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest,
1268                                RegLocation rl_src) = 0;
1269     virtual bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) = 0;
1270 
1271     /**
1272      * @brief Used to generate code for intrinsic java\.lang\.Math methods min and max.
1273      * @details This is also applicable for java\.lang\.StrictMath since it is a simple algorithm
1274      * that applies on integers. The generated code will write the smallest or largest value
1275      * directly into the destination register as specified by the invoke information.
1276      * @param info Information about the invoke.
1277      * @param is_min If true generates code that computes minimum. Otherwise computes maximum.
1278      * @param is_long If true the value value is Long. Otherwise the value is Int.
1279      * @return Returns true if successfully generated
1280      */
1281     virtual bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) = 0;
1282     virtual bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double);
1283 
1284     virtual bool GenInlinedSqrt(CallInfo* info) = 0;
1285     virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0;
1286     virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0;
1287     virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
1288                                   bool is_div) = 0;
1289     virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit,
1290                                      bool is_div) = 0;
1291     /*
1292      * @brief Generate an integer div or rem operation by a literal.
1293      * @param rl_dest Destination Location.
1294      * @param rl_src1 Numerator Location.
1295      * @param rl_src2 Divisor Location.
1296      * @param is_div 'true' if this is a division, 'false' for a remainder.
1297      * @param check_zero 'true' if an exception should be generated if the divisor is 0.
1298      */
1299     virtual RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
1300                                   RegLocation rl_src2, bool is_div, bool check_zero) = 0;
1301     /*
1302      * @brief Generate an integer div or rem operation by a literal.
1303      * @param rl_dest Destination Location.
1304      * @param rl_src Numerator Location.
1305      * @param lit Divisor.
1306      * @param is_div 'true' if this is a division, 'false' for a remainder.
1307      */
1308     virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
1309                                      bool is_div) = 0;
1310     virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) = 0;
1311 
1312     /**
1313      * @brief Used for generating code that throws ArithmeticException if both registers are zero.
1314      * @details This is used for generating DivideByZero checks when divisor is held in two
1315      *  separate registers.
1316      * @param reg The register holding the pair of 32-bit values.
1317      */
1318     virtual void GenDivZeroCheckWide(RegStorage reg) = 0;
1319 
1320     virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) = 0;
1321     virtual void GenExitSequence() = 0;
1322     virtual void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) = 0;
1323     virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) = 0;
1324     virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0;
1325 
1326     /*
1327      * @brief Handle Machine Specific MIR Extended opcodes.
1328      * @param bb The basic block in which the MIR is from.
1329      * @param mir The MIR whose opcode is not standard extended MIR.
1330      * @note Base class implementation will abort for unknown opcodes.
1331      */
1332     virtual void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir);
1333 
1334     /**
1335      * @brief Lowers the kMirOpSelect MIR into LIR.
1336      * @param bb The basic block in which the MIR is from.
1337      * @param mir The MIR whose opcode is kMirOpSelect.
1338      */
1339     virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0;
1340 
1341     /**
1342      * @brief Generates code to select one of the given constants depending on the given opcode.
1343      */
1344     virtual void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
1345                                   int32_t true_val, int32_t false_val, RegStorage rs_dest,
1346                                   int dest_reg_class) = 0;
1347 
1348     /**
1349      * @brief Used to generate a memory barrier in an architecture specific way.
1350      * @details The last generated LIR will be considered for use as barrier. Namely,
1351      * if the last LIR can be updated in a way where it will serve the semantics of
1352      * barrier, then it will be used as such. Otherwise, a new LIR will be generated
1353      * that can keep the semantics.
1354      * @param barrier_kind The kind of memory barrier to generate.
1355      * @return whether a new instruction was generated.
1356      */
1357     virtual bool GenMemBarrier(MemBarrierKind barrier_kind) = 0;
1358 
1359     virtual void GenMoveException(RegLocation rl_dest) = 0;
1360     virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
1361                                                int first_bit, int second_bit) = 0;
1362     virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0;
1363     virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0;
1364 
1365     // Create code for switch statements. Will decide between short and long versions below.
1366     void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
1367     void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
1368 
1369     // Potentially backend-specific versions of switch instructions for shorter switch statements.
1370     // The default implementation will create a chained compare-and-branch.
1371     virtual void GenSmallPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
1372     virtual void GenSmallSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src);
1373     // Backend-specific versions of switch instructions for longer switch statements.
1374     virtual void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0;
1375     virtual void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0;
1376 
1377     virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array,
1378                              RegLocation rl_index, RegLocation rl_dest, int scale) = 0;
1379     virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
1380                              RegLocation rl_index, RegLocation rl_src, int scale,
1381                              bool card_mark) = 0;
1382     virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
1383                                    RegLocation rl_src1, RegLocation rl_shift) = 0;
1384 
1385     // Required for target - single operation generators.
1386     virtual LIR* OpUnconditionalBranch(LIR* target) = 0;
1387     virtual LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) = 0;
1388     virtual LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value,
1389                                 LIR* target) = 0;
1390     virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0;
1391     virtual LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) = 0;
1392     virtual LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) = 0;
1393     virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0;
1394     virtual void OpEndIT(LIR* it) = 0;
1395     virtual LIR* OpMem(OpKind op, RegStorage r_base, int disp) = 0;
1396     virtual LIR* OpPcRelLoad(RegStorage reg, LIR* target) = 0;
1397     virtual LIR* OpReg(OpKind op, RegStorage r_dest_src) = 0;
1398     virtual void OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0;
1399     virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0;
1400     virtual LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) = 0;
1401     virtual LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) = 0;
1402 
1403     /**
1404      * @brief Used to generate an LIR that does a load from mem to reg.
1405      * @param r_dest The destination physical register.
1406      * @param r_base The base physical register for memory operand.
1407      * @param offset The displacement for memory operand.
1408      * @param move_type Specification on the move desired (size, alignment, register kind).
1409      * @return Returns the generate move LIR.
1410      */
1411     virtual LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
1412                              MoveType move_type) = 0;
1413 
1414     /**
1415      * @brief Used to generate an LIR that does a store from reg to mem.
1416      * @param r_base The base physical register for memory operand.
1417      * @param offset The displacement for memory operand.
1418      * @param r_src The destination physical register.
1419      * @param bytes_to_move The number of bytes to move.
1420      * @param is_aligned Whether the memory location is known to be aligned.
1421      * @return Returns the generate move LIR.
1422      */
1423     virtual LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src,
1424                              MoveType move_type) = 0;
1425 
1426     /**
1427      * @brief Used for generating a conditional register to register operation.
1428      * @param op The opcode kind.
1429      * @param cc The condition code that when true will perform the opcode.
1430      * @param r_dest The destination physical register.
1431      * @param r_src The source physical register.
1432      * @return Returns the newly created LIR or null in case of creation failure.
1433      */
1434     virtual LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) = 0;
1435 
1436     virtual LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) = 0;
1437     virtual LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1,
1438                              RegStorage r_src2) = 0;
1439     virtual LIR* OpTestSuspend(LIR* target) = 0;
1440     virtual LIR* OpVldm(RegStorage r_base, int count) = 0;
1441     virtual LIR* OpVstm(RegStorage r_base, int count) = 0;
1442     virtual void OpRegCopyWide(RegStorage dest, RegStorage src) = 0;
1443     virtual bool InexpensiveConstantInt(int32_t value) = 0;
1444     virtual bool InexpensiveConstantFloat(int32_t value) = 0;
1445     virtual bool InexpensiveConstantLong(int64_t value) = 0;
1446     virtual bool InexpensiveConstantDouble(int64_t value) = 0;
InexpensiveConstantInt(int32_t value,Instruction::Code opcode)1447     virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
1448       return InexpensiveConstantInt(value);
1449     }
1450 
1451     // May be optimized by targets.
1452     virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src);
1453     virtual void GenMonitorExit(int opt_flags, RegLocation rl_src);
1454 
1455     // Temp workaround
1456     void Workaround7250540(RegLocation rl_dest, RegStorage zero_reg);
1457 
1458     virtual LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) = 0;
1459 
1460   protected:
1461     Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
1462 
GetCompilationUnit()1463     CompilationUnit* GetCompilationUnit() {
1464       return cu_;
1465     }
1466     /*
1467      * @brief Returns the index of the lowest set bit in 'x'.
1468      * @param x Value to be examined.
1469      * @returns The bit number of the lowest bit set in the value.
1470      */
1471     int32_t LowestSetBit(uint64_t x);
1472     /*
1473      * @brief Is this value a power of two?
1474      * @param x Value to be examined.
1475      * @returns 'true' if only 1 bit is set in the value.
1476      */
1477     bool IsPowerOfTwo(uint64_t x);
1478     /*
1479      * @brief Do these SRs overlap?
1480      * @param rl_op1 One RegLocation
1481      * @param rl_op2 The other RegLocation
1482      * @return 'true' if the VR pairs overlap
1483      *
1484      * Check to see if a result pair has a misaligned overlap with an operand pair.  This
1485      * is not usual for dx to generate, but it is legal (for now).  In a future rev of
1486      * dex, we'll want to make this case illegal.
1487      */
1488     bool BadOverlap(RegLocation rl_op1, RegLocation rl_op2);
1489 
1490     /*
1491      * @brief Force a location (in a register) into a temporary register
1492      * @param loc location of result
1493      * @returns update location
1494      */
1495     virtual RegLocation ForceTemp(RegLocation loc);
1496 
1497     /*
1498      * @brief Force a wide location (in registers) into temporary registers
1499      * @param loc location of result
1500      * @returns update location
1501      */
1502     virtual RegLocation ForceTempWide(RegLocation loc);
1503 
LoadStoreOpSize(bool wide,bool ref)1504     static constexpr OpSize LoadStoreOpSize(bool wide, bool ref) {
1505       return wide ? k64 : ref ? kReference : k32;
1506     }
1507 
1508     virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
1509                                     RegLocation rl_dest, RegLocation rl_src);
1510 
1511     void AddSlowPath(LIRSlowPath* slowpath);
1512 
1513     /*
1514      *
1515      * @brief Implement Set up instanceof a class.
1516      * @param needs_access_check 'true' if we must check the access.
1517      * @param type_known_final 'true' if the type is known to be a final class.
1518      * @param type_known_abstract 'true' if the type is known to be an abstract class.
1519      * @param use_declaring_class 'true' if the type can be loaded off the current Method*.
1520      * @param can_assume_type_is_in_dex_cache 'true' if the type is known to be in the cache.
1521      * @param type_idx Type index to use if use_declaring_class is 'false'.
1522      * @param rl_dest Result to be set to 0 or 1.
1523      * @param rl_src Object to be tested.
1524      */
1525     void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final,
1526                                     bool type_known_abstract, bool use_declaring_class,
1527                                     bool can_assume_type_is_in_dex_cache,
1528                                     uint32_t type_idx, RegLocation rl_dest,
1529                                     RegLocation rl_src);
1530     /*
1531      * @brief Generate the debug_frame FDE information if possible.
1532      * @returns pointer to vector containg CFE information, or NULL.
1533      */
1534     virtual std::vector<uint8_t>* ReturnCallFrameInformation();
1535 
1536     /**
1537      * @brief Used to insert marker that can be used to associate MIR with LIR.
1538      * @details Only inserts marker if verbosity is enabled.
1539      * @param mir The mir that is currently being generated.
1540      */
1541     void GenPrintLabel(MIR* mir);
1542 
1543     /**
1544      * @brief Used to generate return sequence when there is no frame.
1545      * @details Assumes that the return registers have already been populated.
1546      */
1547     virtual void GenSpecialExitSequence() = 0;
1548 
1549     /**
1550      * @brief Used to generate code for special methods that are known to be
1551      * small enough to work in frameless mode.
1552      * @param bb The basic block of the first MIR.
1553      * @param mir The first MIR of the special method.
1554      * @param special Information about the special method.
1555      * @return Returns whether or not this was handled successfully. Returns false
1556      * if caller should punt to normal MIR2LIR conversion.
1557      */
1558     virtual bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special);
1559 
1560   protected:
1561     void ClobberBody(RegisterInfo* p);
SetCurrentDexPc(DexOffset dexpc)1562     void SetCurrentDexPc(DexOffset dexpc) {
1563       current_dalvik_offset_ = dexpc;
1564     }
1565 
1566     /**
1567      * @brief Used to lock register if argument at in_position was passed that way.
1568      * @details Does nothing if the argument is passed via stack.
1569      * @param in_position The argument number whose register to lock.
1570      * @param wide Whether the argument is wide.
1571      */
1572     void LockArg(int in_position, bool wide = false);
1573 
1574     /**
1575      * @brief Used to load VR argument to a physical register.
1576      * @details The load is only done if the argument is not already in physical register.
1577      * LockArg must have been previously called.
1578      * @param in_position The argument number to load.
1579      * @param wide Whether the argument is 64-bit or not.
1580      * @return Returns the register (or register pair) for the loaded argument.
1581      */
1582     RegStorage LoadArg(int in_position, RegisterClass reg_class, bool wide = false);
1583 
1584     /**
1585      * @brief Used to load a VR argument directly to a specified register location.
1586      * @param in_position The argument number to place in register.
1587      * @param rl_dest The register location where to place argument.
1588      */
1589     void LoadArgDirect(int in_position, RegLocation rl_dest);
1590 
1591     /**
1592      * @brief Used to generate LIR for special getter method.
1593      * @param mir The mir that represents the iget.
1594      * @param special Information about the special getter method.
1595      * @return Returns whether LIR was successfully generated.
1596      */
1597     bool GenSpecialIGet(MIR* mir, const InlineMethod& special);
1598 
1599     /**
1600      * @brief Used to generate LIR for special setter method.
1601      * @param mir The mir that represents the iput.
1602      * @param special Information about the special setter method.
1603      * @return Returns whether LIR was successfully generated.
1604      */
1605     bool GenSpecialIPut(MIR* mir, const InlineMethod& special);
1606 
1607     /**
1608      * @brief Used to generate LIR for special return-args method.
1609      * @param mir The mir that represents the return of argument.
1610      * @param special Information about the special return-args method.
1611      * @return Returns whether LIR was successfully generated.
1612      */
1613     bool GenSpecialIdentity(MIR* mir, const InlineMethod& special);
1614 
1615     void AddDivZeroCheckSlowPath(LIR* branch);
1616 
1617     // Copy arg0 and arg1 to kArg0 and kArg1 safely, possibly using
1618     // kArg2 as temp.
1619     virtual void CopyToArgumentRegs(RegStorage arg0, RegStorage arg1);
1620 
1621     /**
1622      * @brief Load Constant into RegLocation
1623      * @param rl_dest Destination RegLocation
1624      * @param value Constant value
1625      */
1626     virtual void GenConst(RegLocation rl_dest, int value);
1627 
1628     /**
1629      * Returns true iff wide GPRs are just different views on the same physical register.
1630      */
1631     virtual bool WideGPRsAreAliases() = 0;
1632 
1633     /**
1634      * Returns true iff wide FPRs are just different views on the same physical register.
1635      */
1636     virtual bool WideFPRsAreAliases() = 0;
1637 
1638 
1639     enum class WidenessCheck {  // private
1640       kIgnoreWide,
1641       kCheckWide,
1642       kCheckNotWide
1643     };
1644 
1645     enum class RefCheck {  // private
1646       kIgnoreRef,
1647       kCheckRef,
1648       kCheckNotRef
1649     };
1650 
1651     enum class FPCheck {  // private
1652       kIgnoreFP,
1653       kCheckFP,
1654       kCheckNotFP
1655     };
1656 
1657     /**
1658      * Check whether a reg storage seems well-formed, that is, if a reg storage is valid,
1659      * that it has the expected form for the flags.
1660      * A flag value of 0 means ignore. A flag value of -1 means false. A flag value of 1 means true.
1661      */
1662     void CheckRegStorageImpl(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp, bool fail,
1663                              bool report)
1664         const;
1665 
1666     /**
1667      * Check whether a reg location seems well-formed, that is, if a reg storage is encoded,
1668      * that it has the expected size.
1669      */
1670     void CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const;
1671 
1672     // See CheckRegStorageImpl. Will print or fail depending on kFailOnSizeError and
1673     // kReportSizeError.
1674     void CheckRegStorage(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp) const;
1675     // See CheckRegLocationImpl.
1676     void CheckRegLocation(RegLocation rl) const;
1677 
1678   public:
1679     // TODO: add accessors for these.
1680     LIR* literal_list_;                        // Constants.
1681     LIR* method_literal_list_;                 // Method literals requiring patching.
1682     LIR* class_literal_list_;                  // Class literals requiring patching.
1683     LIR* string_literal_list_;                 // String literals requiring patching.
1684     LIR* code_literal_list_;                   // Code literals requiring patching.
1685     LIR* first_fixup_;                         // Doubly-linked list of LIR nodes requiring fixups.
1686 
1687   protected:
1688     CompilationUnit* const cu_;
1689     MIRGraph* const mir_graph_;
1690     GrowableArray<SwitchTable*> switch_tables_;
1691     GrowableArray<FillArrayData*> fill_array_data_;
1692     GrowableArray<RegisterInfo*> tempreg_info_;
1693     GrowableArray<RegisterInfo*> reginfo_map_;
1694     GrowableArray<void*> pointer_storage_;
1695     CodeOffset current_code_offset_;    // Working byte offset of machine instructons.
1696     CodeOffset data_offset_;            // starting offset of literal pool.
1697     size_t total_size_;                   // header + code size.
1698     LIR* block_label_list_;
1699     PromotionMap* promotion_map_;
1700     /*
1701      * TODO: The code generation utilities don't have a built-in
1702      * mechanism to propagate the original Dalvik opcode address to the
1703      * associated generated instructions.  For the trace compiler, this wasn't
1704      * necessary because the interpreter handled all throws and debugging
1705      * requests.  For now we'll handle this by placing the Dalvik offset
1706      * in the CompilationUnit struct before codegen for each instruction.
1707      * The low-level LIR creation utilites will pull it from here.  Rework this.
1708      */
1709     DexOffset current_dalvik_offset_;
1710     size_t estimated_native_code_size_;     // Just an estimate; used to reserve code_buffer_ size.
1711     RegisterPool* reg_pool_;
1712     /*
1713      * Sanity checking for the register temp tracking.  The same ssa
1714      * name should never be associated with one temp register per
1715      * instruction compilation.
1716      */
1717     int live_sreg_;
1718     CodeBuffer code_buffer_;
1719     // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix.
1720     std::vector<uint8_t> encoded_mapping_table_;
1721     ArenaVector<uint32_t> core_vmap_table_;
1722     ArenaVector<uint32_t> fp_vmap_table_;
1723     std::vector<uint8_t> native_gc_map_;
1724     int num_core_spills_;
1725     int num_fp_spills_;
1726     int frame_size_;
1727     unsigned int core_spill_mask_;
1728     unsigned int fp_spill_mask_;
1729     LIR* first_lir_insn_;
1730     LIR* last_lir_insn_;
1731 
1732     GrowableArray<LIRSlowPath*> slow_paths_;
1733 
1734     // The memory reference type for new LIRs.
1735     // NOTE: Passing this as an explicit parameter by all functions that directly or indirectly
1736     // invoke RawLIR() would clutter the code and reduce the readability.
1737     ResourceMask::ResourceBit mem_ref_type_;
1738 
1739     // Each resource mask now takes 16-bytes, so having both use/def masks directly in a LIR
1740     // would consume 32 bytes per LIR. Instead, the LIR now holds only pointers to the masks
1741     // (i.e. 8 bytes on 32-bit arch, 16 bytes on 64-bit arch) and we use ResourceMaskCache
1742     // to deduplicate the masks.
1743     ResourceMaskCache mask_cache_;
1744 };  // Class Mir2Lir
1745 
1746 }  // namespace art
1747 
1748 #endif  // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_
1749