1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 18 #define ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 19 20 #include "base/arena_allocator.h" 21 #include "base/arena_containers.h" 22 #include "base/arena_object.h" 23 #include "compiled_method.h" 24 #include "dex/compiler_enums.h" 25 #include "dex/dex_flags.h" 26 #include "dex/dex_types.h" 27 #include "dex/reg_location.h" 28 #include "dex/reg_storage.h" 29 #include "dex/quick/resource_mask.h" 30 #include "entrypoints/quick/quick_entrypoints_enum.h" 31 #include "invoke_type.h" 32 #include "lazy_debug_frame_opcode_writer.h" 33 #include "leb128.h" 34 #include "primitive.h" 35 #include "safe_map.h" 36 #include "utils/array_ref.h" 37 #include "utils/dex_cache_arrays_layout.h" 38 #include "utils/stack_checks.h" 39 40 namespace art { 41 42 // Set to 1 to measure cost of suspend check. 43 #define NO_SUSPEND 0 44 45 #define IS_BINARY_OP (1ULL << kIsBinaryOp) 46 #define IS_BRANCH (1ULL << kIsBranch) 47 #define IS_IT (1ULL << kIsIT) 48 #define IS_MOVE (1ULL << kIsMoveOp) 49 #define IS_LOAD (1ULL << kMemLoad) 50 #define IS_QUAD_OP (1ULL << kIsQuadOp) 51 #define IS_QUIN_OP (1ULL << kIsQuinOp) 52 #define IS_SEXTUPLE_OP (1ULL << kIsSextupleOp) 53 #define IS_STORE (1ULL << kMemStore) 54 #define IS_TERTIARY_OP (1ULL << kIsTertiaryOp) 55 #define IS_UNARY_OP (1ULL << kIsUnaryOp) 56 #define IS_VOLATILE (1ULL << kMemVolatile) 57 #define NEEDS_FIXUP (1ULL << kPCRelFixup) 58 #define NO_OPERAND (1ULL << kNoOperand) 59 #define REG_DEF0 (1ULL << kRegDef0) 60 #define REG_DEF1 (1ULL << kRegDef1) 61 #define REG_DEF2 (1ULL << kRegDef2) 62 #define REG_DEFA (1ULL << kRegDefA) 63 #define REG_DEFD (1ULL << kRegDefD) 64 #define REG_DEF_FPCS_LIST0 (1ULL << kRegDefFPCSList0) 65 #define REG_DEF_FPCS_LIST2 (1ULL << kRegDefFPCSList2) 66 #define REG_DEF_LIST0 (1ULL << kRegDefList0) 67 #define REG_DEF_LIST1 (1ULL << kRegDefList1) 68 #define REG_DEF_LR (1ULL << kRegDefLR) 69 #define REG_DEF_SP (1ULL << kRegDefSP) 70 #define REG_USE0 (1ULL << kRegUse0) 71 #define REG_USE1 (1ULL << kRegUse1) 72 #define REG_USE2 (1ULL << kRegUse2) 73 #define REG_USE3 (1ULL << kRegUse3) 74 #define REG_USE4 (1ULL << kRegUse4) 75 #define REG_USEA (1ULL << kRegUseA) 76 #define REG_USEC (1ULL << kRegUseC) 77 #define REG_USED (1ULL << kRegUseD) 78 #define REG_USEB (1ULL << kRegUseB) 79 #define REG_USE_FPCS_LIST0 (1ULL << kRegUseFPCSList0) 80 #define REG_USE_FPCS_LIST2 (1ULL << kRegUseFPCSList2) 81 #define REG_USE_LIST0 (1ULL << kRegUseList0) 82 #define REG_USE_LIST1 (1ULL << kRegUseList1) 83 #define REG_USE_LR (1ULL << kRegUseLR) 84 #define REG_USE_PC (1ULL << kRegUsePC) 85 #define REG_USE_SP (1ULL << kRegUseSP) 86 #define SETS_CCODES (1ULL << kSetsCCodes) 87 #define USES_CCODES (1ULL << kUsesCCodes) 88 #define USE_FP_STACK (1ULL << kUseFpStack) 89 #define REG_USE_LO (1ULL << kUseLo) 90 #define REG_USE_HI (1ULL << kUseHi) 91 #define REG_DEF_LO (1ULL << kDefLo) 92 #define REG_DEF_HI (1ULL << kDefHi) 93 #define SCALED_OFFSET_X0 (1ULL << kMemScaledx0) 94 #define SCALED_OFFSET_X2 (1ULL << kMemScaledx2) 95 #define SCALED_OFFSET_X4 (1ULL << kMemScaledx4) 96 97 // Special load/stores 98 #define IS_LOADX (IS_LOAD | IS_VOLATILE) 99 #define IS_LOAD_OFF (IS_LOAD | SCALED_OFFSET_X0) 100 #define IS_LOAD_OFF2 (IS_LOAD | SCALED_OFFSET_X2) 101 #define IS_LOAD_OFF4 (IS_LOAD | SCALED_OFFSET_X4) 102 103 #define IS_STOREX (IS_STORE | IS_VOLATILE) 104 #define IS_STORE_OFF (IS_STORE | SCALED_OFFSET_X0) 105 #define IS_STORE_OFF2 (IS_STORE | SCALED_OFFSET_X2) 106 #define IS_STORE_OFF4 (IS_STORE | SCALED_OFFSET_X4) 107 108 // Common combo register usage patterns. 109 #define REG_DEF01 (REG_DEF0 | REG_DEF1) 110 #define REG_DEF012 (REG_DEF0 | REG_DEF1 | REG_DEF2) 111 #define REG_DEF01_USE2 (REG_DEF0 | REG_DEF1 | REG_USE2) 112 #define REG_DEF0_USE01 (REG_DEF0 | REG_USE01) 113 #define REG_DEF0_USE0 (REG_DEF0 | REG_USE0) 114 #define REG_DEF0_USE12 (REG_DEF0 | REG_USE12) 115 #define REG_DEF0_USE123 (REG_DEF0 | REG_USE123) 116 #define REG_DEF0_USE1 (REG_DEF0 | REG_USE1) 117 #define REG_DEF0_USE2 (REG_DEF0 | REG_USE2) 118 #define REG_DEFAD_USEAD (REG_DEFAD_USEA | REG_USED) 119 #define REG_DEFAD_USEA (REG_DEFA_USEA | REG_DEFD) 120 #define REG_DEFA_USEA (REG_DEFA | REG_USEA) 121 #define REG_USE012 (REG_USE01 | REG_USE2) 122 #define REG_USE014 (REG_USE01 | REG_USE4) 123 #define REG_USE01 (REG_USE0 | REG_USE1) 124 #define REG_USE02 (REG_USE0 | REG_USE2) 125 #define REG_USE12 (REG_USE1 | REG_USE2) 126 #define REG_USE23 (REG_USE2 | REG_USE3) 127 #define REG_USE123 (REG_USE1 | REG_USE2 | REG_USE3) 128 129 /* 130 * Assembly is an iterative process, and usually terminates within 131 * two or three passes. This should be high enough to handle bizarre 132 * cases, but detect an infinite loop bug. 133 */ 134 #define MAX_ASSEMBLER_RETRIES 50 135 136 class BasicBlock; 137 class BitVector; 138 struct CallInfo; 139 struct CompilationUnit; 140 struct CompilerTemp; 141 struct InlineMethod; 142 class MIR; 143 struct LIR; 144 struct RegisterInfo; 145 class DexFileMethodInliner; 146 class MIRGraph; 147 class MirMethodLoweringInfo; 148 class MirSFieldLoweringInfo; 149 150 typedef int (*NextCallInsn)(CompilationUnit*, CallInfo*, int, 151 const MethodReference& target_method, 152 uint32_t method_idx, uintptr_t direct_code, 153 uintptr_t direct_method, InvokeType type); 154 155 typedef ArenaVector<uint8_t> CodeBuffer; 156 typedef uint32_t CodeOffset; // Native code offset in bytes. 157 158 struct UseDefMasks { 159 const ResourceMask* use_mask; // Resource mask for use. 160 const ResourceMask* def_mask; // Resource mask for def. 161 }; 162 163 struct AssemblyInfo { 164 LIR* pcrel_next; // Chain of LIR nodes needing pc relative fixups. 165 }; 166 167 struct LIR { 168 CodeOffset offset; // Offset of this instruction. 169 NarrowDexOffset dalvik_offset; // Offset of Dalvik opcode in code units (16-bit words). 170 int16_t opcode; 171 LIR* next; 172 LIR* prev; 173 LIR* target; 174 struct { 175 unsigned int alias_info:17; // For Dalvik register disambiguation. 176 bool is_nop:1; // LIR is optimized away. 177 unsigned int size:4; // Note: size of encoded instruction is in bytes. 178 bool use_def_invalid:1; // If true, masks should not be used. 179 unsigned int generation:1; // Used to track visitation state during fixup pass. 180 unsigned int fixup:8; // Fixup kind. 181 } flags; 182 union { 183 UseDefMasks m; // Use & Def masks used during optimization. 184 AssemblyInfo a; // Instruction info used during assembly phase. 185 } u; 186 int32_t operands[5]; // [0..4] = [dest, src1, src2, extra, extra2]. 187 }; 188 189 // Utility macros to traverse the LIR list. 190 #define NEXT_LIR(lir) (lir->next) 191 #define PREV_LIR(lir) (lir->prev) 192 193 // Defines for alias_info (tracks Dalvik register references). 194 #define DECODE_ALIAS_INFO_REG(X) (X & 0xffff) 195 #define DECODE_ALIAS_INFO_WIDE_FLAG (0x10000) 196 #define DECODE_ALIAS_INFO_WIDE(X) ((X & DECODE_ALIAS_INFO_WIDE_FLAG) ? 1 : 0) 197 #define ENCODE_ALIAS_INFO(REG, ISWIDE) (REG | (ISWIDE ? DECODE_ALIAS_INFO_WIDE_FLAG : 0)) 198 199 #define ENCODE_REG_PAIR(low_reg, high_reg) ((low_reg & 0xff) | ((high_reg & 0xff) << 8)) 200 #define DECODE_REG_PAIR(both_regs, low_reg, high_reg) \ 201 do { \ 202 low_reg = both_regs & 0xff; \ 203 high_reg = (both_regs >> 8) & 0xff; \ 204 } while (false) 205 206 // Mask to denote sreg as the start of a 64-bit item. Must not interfere with low 16 bits. 207 #define STARTING_WIDE_SREG 0x10000 208 209 class Mir2Lir { 210 public: 211 static constexpr bool kFailOnSizeError = true && kIsDebugBuild; 212 static constexpr bool kReportSizeError = true && kIsDebugBuild; 213 214 // TODO: If necessary, this could be made target-dependent. 215 static constexpr uint16_t kSmallSwitchThreshold = 5; 216 217 /* 218 * Auxiliary information describing the location of data embedded in the Dalvik 219 * byte code stream. 220 */ 221 struct EmbeddedData { 222 CodeOffset offset; // Code offset of data block. 223 const uint16_t* table; // Original dex data. 224 DexOffset vaddr; // Dalvik offset of parent opcode. 225 }; 226 227 struct FillArrayData : EmbeddedData { 228 int32_t size; 229 }; 230 231 struct SwitchTable : EmbeddedData { 232 LIR* anchor; // Reference instruction for relative offsets. 233 MIR* switch_mir; // The switch mir. 234 }; 235 236 /* Static register use counts */ 237 struct RefCounts { 238 int count; 239 int s_reg; 240 }; 241 242 /* 243 * Data structure tracking the mapping detween a Dalvik value (32 or 64 bits) 244 * and native register storage. The primary purpose is to reuse previuosly 245 * loaded values, if possible, and otherwise to keep the value in register 246 * storage as long as possible. 247 * 248 * NOTE 1: wide_value refers to the width of the Dalvik value contained in 249 * this register (or pair). For example, a 64-bit register containing a 32-bit 250 * Dalvik value would have wide_value==false even though the storage container itself 251 * is wide. Similarly, a 32-bit register containing half of a 64-bit Dalvik value 252 * would have wide_value==true (and additionally would have its partner field set to the 253 * other half whose wide_value field would also be true. 254 * 255 * NOTE 2: In the case of a register pair, you can determine which of the partners 256 * is the low half by looking at the s_reg names. The high s_reg will equal low_sreg + 1. 257 * 258 * NOTE 3: In the case of a 64-bit register holding a Dalvik wide value, wide_value 259 * will be true and partner==self. s_reg refers to the low-order word of the Dalvik 260 * value, and the s_reg of the high word is implied (s_reg + 1). 261 * 262 * NOTE 4: The reg and is_temp fields should always be correct. If is_temp is false no 263 * other fields have meaning. [perhaps not true, wide should work for promoted regs?] 264 * If is_temp==true and live==false, no other fields have 265 * meaning. If is_temp==true and live==true, wide_value, partner, dirty, s_reg, def_start 266 * and def_end describe the relationship between the temp register/register pair and 267 * the Dalvik value[s] described by s_reg/s_reg+1. 268 * 269 * The fields used_storage, master_storage and storage_mask are used to track allocation 270 * in light of potential aliasing. For example, consider Arm's d2, which overlaps s4 & s5. 271 * d2's storage mask would be 0x00000003, the two low-order bits denoting 64 bits of 272 * storage use. For s4, it would be 0x0000001; for s5 0x00000002. These values should not 273 * change once initialized. The "used_storage" field tracks current allocation status. 274 * Although each record contains this field, only the field from the largest member of 275 * an aliased group is used. In our case, it would be d2's. The master_storage pointer 276 * of d2, s4 and s5 would all point to d2's used_storage field. Each bit in a used_storage 277 * represents 32 bits of storage. d2's used_storage would be initialized to 0xfffffffc. 278 * Then, if we wanted to determine whether s4 could be allocated, we would "and" 279 * s4's storage_mask with s4's *master_storage. If the result is zero, s4 is free and 280 * to allocate: *master_storage |= storage_mask. To free, *master_storage &= ~storage_mask. 281 * 282 * For an X86 vector register example, storage_mask would be: 283 * 0x00000001 for 32-bit view of xmm1 284 * 0x00000003 for 64-bit view of xmm1 285 * 0x0000000f for 128-bit view of xmm1 286 * 0x000000ff for 256-bit view of ymm1 // future expansion, if needed 287 * 0x0000ffff for 512-bit view of ymm1 // future expansion, if needed 288 * 0xffffffff for 1024-bit view of ymm1 // future expansion, if needed 289 * 290 * The "liveness" of a register is handled in a similar way. The liveness_ storage is 291 * held in the widest member of an aliased set. Note, though, that for a temp register to 292 * reused as live, it must both be marked live and the associated SReg() must match the 293 * desired s_reg. This gets a little complicated when dealing with aliased registers. All 294 * members of an aliased set will share the same liveness flags, but each will individually 295 * maintain s_reg_. In this way we can know that at least one member of an 296 * aliased set is live, but will only fully match on the appropriate alias view. For example, 297 * if Arm d1 is live as a double and has s_reg_ set to Dalvik v8 (which also implies v9 298 * because it is wide), its aliases s2 and s3 will show as live, but will have 299 * s_reg_ == INVALID_SREG. An attempt to later AllocLiveReg() of v9 with a single-precision 300 * view will fail because although s3's liveness bit is set, its s_reg_ will not match v9. 301 * This will cause all members of the aliased set to be clobbered and AllocLiveReg() will 302 * report that v9 is currently not live as a single (which is what we want). 303 * 304 * NOTE: the x86 usage is still somewhat in flux. There are competing notions of how 305 * to treat xmm registers: 306 * 1. Treat them all as 128-bits wide, but denote how much data used via bytes field. 307 * o This more closely matches reality, but means you'd need to be able to get 308 * to the associated RegisterInfo struct to figure out how it's being used. 309 * o This is how 64-bit core registers will be used - always 64 bits, but the 310 * "bytes" field will be 4 for 32-bit usage and 8 for 64-bit usage. 311 * 2. View the xmm registers based on contents. 312 * o A single in a xmm2 register would be k32BitVector, while a double in xmm2 would 313 * be a k64BitVector. 314 * o Note that the two uses above would be considered distinct registers (but with 315 * the aliasing mechanism, we could detect interference). 316 * o This is how aliased double and single float registers will be handled on 317 * Arm and MIPS. 318 * Working plan is, for all targets, to follow mechanism 1 for 64-bit core registers, and 319 * mechanism 2 for aliased float registers and x86 vector registers. 320 */ 321 class RegisterInfo : public ArenaObject<kArenaAllocRegAlloc> { 322 public: 323 RegisterInfo(RegStorage r, const ResourceMask& mask = kEncodeAll); ~RegisterInfo()324 ~RegisterInfo() {} 325 326 static const uint32_t k32SoloStorageMask = 0x00000001; 327 static const uint32_t kLowSingleStorageMask = 0x00000001; 328 static const uint32_t kHighSingleStorageMask = 0x00000002; 329 static const uint32_t k64SoloStorageMask = 0x00000003; 330 static const uint32_t k128SoloStorageMask = 0x0000000f; 331 static const uint32_t k256SoloStorageMask = 0x000000ff; 332 static const uint32_t k512SoloStorageMask = 0x0000ffff; 333 static const uint32_t k1024SoloStorageMask = 0xffffffff; 334 InUse()335 bool InUse() { return (storage_mask_ & master_->used_storage_) != 0; } MarkInUse()336 void MarkInUse() { master_->used_storage_ |= storage_mask_; } MarkFree()337 void MarkFree() { master_->used_storage_ &= ~storage_mask_; } 338 // No part of the containing storage is live in this view. IsDead()339 bool IsDead() { return (master_->liveness_ & storage_mask_) == 0; } 340 // Liveness of this view matches. Note: not equivalent to !IsDead(). IsLive()341 bool IsLive() { return (master_->liveness_ & storage_mask_) == storage_mask_; } MarkLive(int s_reg)342 void MarkLive(int s_reg) { 343 // TODO: Anything useful to assert here? 344 s_reg_ = s_reg; 345 master_->liveness_ |= storage_mask_; 346 } MarkDead()347 void MarkDead() { 348 if (SReg() != INVALID_SREG) { 349 s_reg_ = INVALID_SREG; 350 master_->liveness_ &= ~storage_mask_; 351 ResetDefBody(); 352 } 353 } GetReg()354 RegStorage GetReg() { return reg_; } SetReg(RegStorage reg)355 void SetReg(RegStorage reg) { reg_ = reg; } IsTemp()356 bool IsTemp() { return is_temp_; } SetIsTemp(bool val)357 void SetIsTemp(bool val) { is_temp_ = val; } IsWide()358 bool IsWide() { return wide_value_; } SetIsWide(bool val)359 void SetIsWide(bool val) { 360 wide_value_ = val; 361 if (!val) { 362 // If not wide, reset partner to self. 363 SetPartner(GetReg()); 364 } 365 } IsDirty()366 bool IsDirty() { return dirty_; } SetIsDirty(bool val)367 void SetIsDirty(bool val) { dirty_ = val; } Partner()368 RegStorage Partner() { return partner_; } SetPartner(RegStorage partner)369 void SetPartner(RegStorage partner) { partner_ = partner; } SReg()370 int SReg() { return (!IsTemp() || IsLive()) ? s_reg_ : INVALID_SREG; } DefUseMask()371 const ResourceMask& DefUseMask() { return def_use_mask_; } SetDefUseMask(const ResourceMask & def_use_mask)372 void SetDefUseMask(const ResourceMask& def_use_mask) { def_use_mask_ = def_use_mask; } Master()373 RegisterInfo* Master() { return master_; } SetMaster(RegisterInfo * master)374 void SetMaster(RegisterInfo* master) { 375 master_ = master; 376 if (master != this) { 377 master_->aliased_ = true; 378 DCHECK(alias_chain_ == nullptr); 379 alias_chain_ = master_->alias_chain_; 380 master_->alias_chain_ = this; 381 } 382 } IsAliased()383 bool IsAliased() { return aliased_; } GetAliasChain()384 RegisterInfo* GetAliasChain() { return alias_chain_; } StorageMask()385 uint32_t StorageMask() { return storage_mask_; } SetStorageMask(uint32_t storage_mask)386 void SetStorageMask(uint32_t storage_mask) { storage_mask_ = storage_mask; } DefStart()387 LIR* DefStart() { return def_start_; } SetDefStart(LIR * def_start)388 void SetDefStart(LIR* def_start) { def_start_ = def_start; } DefEnd()389 LIR* DefEnd() { return def_end_; } SetDefEnd(LIR * def_end)390 void SetDefEnd(LIR* def_end) { def_end_ = def_end; } ResetDefBody()391 void ResetDefBody() { def_start_ = def_end_ = nullptr; } 392 // Find member of aliased set matching storage_used; return null if none. FindMatchingView(uint32_t storage_used)393 RegisterInfo* FindMatchingView(uint32_t storage_used) { 394 RegisterInfo* res = Master(); 395 for (; res != nullptr; res = res->GetAliasChain()) { 396 if (res->StorageMask() == storage_used) 397 break; 398 } 399 return res; 400 } 401 402 private: 403 RegStorage reg_; 404 bool is_temp_; // Can allocate as temp? 405 bool wide_value_; // Holds a Dalvik wide value (either itself, or part of a pair). 406 bool dirty_; // If live, is it dirty? 407 bool aliased_; // Is this the master for other aliased RegisterInfo's? 408 RegStorage partner_; // If wide_value, other reg of pair or self if 64-bit register. 409 int s_reg_; // Name of live value. 410 ResourceMask def_use_mask_; // Resources for this element. 411 uint32_t used_storage_; // 1 bit per 4 bytes of storage. Unused by aliases. 412 uint32_t liveness_; // 1 bit per 4 bytes of storage. Unused by aliases. 413 RegisterInfo* master_; // Pointer to controlling storage mask. 414 uint32_t storage_mask_; // Track allocation of sub-units. 415 LIR *def_start_; // Starting inst in last def sequence. 416 LIR *def_end_; // Ending inst in last def sequence. 417 RegisterInfo* alias_chain_; // Chain of aliased registers. 418 }; 419 420 class RegisterPool : public DeletableArenaObject<kArenaAllocRegAlloc> { 421 public: 422 RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena, 423 const ArrayRef<const RegStorage>& core_regs, 424 const ArrayRef<const RegStorage>& core64_regs, 425 const ArrayRef<const RegStorage>& sp_regs, 426 const ArrayRef<const RegStorage>& dp_regs, 427 const ArrayRef<const RegStorage>& reserved_regs, 428 const ArrayRef<const RegStorage>& reserved64_regs, 429 const ArrayRef<const RegStorage>& core_temps, 430 const ArrayRef<const RegStorage>& core64_temps, 431 const ArrayRef<const RegStorage>& sp_temps, 432 const ArrayRef<const RegStorage>& dp_temps); ~RegisterPool()433 ~RegisterPool() {} ResetNextTemp()434 void ResetNextTemp() { 435 next_core_reg_ = 0; 436 next_sp_reg_ = 0; 437 next_dp_reg_ = 0; 438 } 439 ArenaVector<RegisterInfo*> core_regs_; 440 int next_core_reg_; 441 ArenaVector<RegisterInfo*> core64_regs_; 442 int next_core64_reg_; 443 ArenaVector<RegisterInfo*> sp_regs_; // Single precision float. 444 int next_sp_reg_; 445 ArenaVector<RegisterInfo*> dp_regs_; // Double precision float. 446 int next_dp_reg_; 447 ArenaVector<RegisterInfo*>* ref_regs_; // Points to core_regs_ or core64_regs_ 448 int* next_ref_reg_; 449 450 private: 451 Mir2Lir* const m2l_; 452 }; 453 454 struct PromotionMap { 455 RegLocationType core_location:3; 456 uint8_t core_reg; 457 RegLocationType fp_location:3; 458 uint8_t fp_reg; 459 bool first_in_pair; 460 }; 461 462 // 463 // Slow paths. This object is used generate a sequence of code that is executed in the 464 // slow path. For example, resolving a string or class is slow as it will only be executed 465 // once (after that it is resolved and doesn't need to be done again). We want slow paths 466 // to be placed out-of-line, and not require a (mispredicted, probably) conditional forward 467 // branch over them. 468 // 469 // If you want to create a slow path, declare a class derived from LIRSlowPath and provide 470 // the Compile() function that will be called near the end of the code generated by the 471 // method. 472 // 473 // The basic flow for a slow path is: 474 // 475 // CMP reg, #value 476 // BEQ fromfast 477 // cont: 478 // ... 479 // fast path code 480 // ... 481 // more code 482 // ... 483 // RETURN 484 /// 485 // fromfast: 486 // ... 487 // slow path code 488 // ... 489 // B cont 490 // 491 // So you see we need two labels and two branches. The first branch (called fromfast) is 492 // the conditional branch to the slow path code. The second label (called cont) is used 493 // as an unconditional branch target for getting back to the code after the slow path 494 // has completed. 495 // 496 497 class LIRSlowPath : public ArenaObject<kArenaAllocSlowPaths> { 498 public: 499 LIRSlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont = nullptr) m2l_(m2l)500 : m2l_(m2l), cu_(m2l->cu_), 501 current_dex_pc_(m2l->current_dalvik_offset_), current_mir_(m2l->current_mir_), 502 fromfast_(fromfast), cont_(cont) { 503 } ~LIRSlowPath()504 virtual ~LIRSlowPath() {} 505 virtual void Compile() = 0; 506 GetContinuationLabel()507 LIR *GetContinuationLabel() { 508 return cont_; 509 } 510 GetFromFast()511 LIR *GetFromFast() { 512 return fromfast_; 513 } 514 515 protected: 516 LIR* GenerateTargetLabel(int opcode = kPseudoTargetLabel); 517 518 Mir2Lir* const m2l_; 519 CompilationUnit* const cu_; 520 const DexOffset current_dex_pc_; 521 MIR* current_mir_; 522 LIR* const fromfast_; 523 LIR* const cont_; 524 }; 525 526 class SuspendCheckSlowPath; 527 class SpecialSuspendCheckSlowPath; 528 529 // Helper class for changing mem_ref_type_ until the end of current scope. See mem_ref_type_. 530 class ScopedMemRefType { 531 public: ScopedMemRefType(Mir2Lir * m2l,ResourceMask::ResourceBit new_mem_ref_type)532 ScopedMemRefType(Mir2Lir* m2l, ResourceMask::ResourceBit new_mem_ref_type) 533 : m2l_(m2l), 534 old_mem_ref_type_(m2l->mem_ref_type_) { 535 m2l_->mem_ref_type_ = new_mem_ref_type; 536 } 537 ~ScopedMemRefType()538 ~ScopedMemRefType() { 539 m2l_->mem_ref_type_ = old_mem_ref_type_; 540 } 541 542 private: 543 Mir2Lir* const m2l_; 544 ResourceMask::ResourceBit old_mem_ref_type_; 545 546 DISALLOW_COPY_AND_ASSIGN(ScopedMemRefType); 547 }; 548 ~Mir2Lir()549 virtual ~Mir2Lir() {} 550 551 /** 552 * @brief Decodes the LIR offset. 553 * @return Returns the scaled offset of LIR. 554 */ 555 virtual size_t GetInstructionOffset(LIR* lir); 556 s4FromSwitchData(const void * switch_data)557 int32_t s4FromSwitchData(const void* switch_data) { 558 return *reinterpret_cast<const int32_t*>(switch_data); 559 } 560 561 /* 562 * TODO: this is a trace JIT vestige, and its use should be reconsidered. At the time 563 * it was introduced, it was intended to be a quick best guess of type without having to 564 * take the time to do type analysis. Currently, though, we have a much better idea of 565 * the types of Dalvik virtual registers. Instead of using this for a best guess, why not 566 * just use our knowledge of type to select the most appropriate register class? 567 */ RegClassBySize(OpSize size)568 RegisterClass RegClassBySize(OpSize size) { 569 if (size == kReference) { 570 return kRefReg; 571 } else { 572 return (size == kUnsignedHalf || size == kSignedHalf || size == kUnsignedByte || 573 size == kSignedByte) ? kCoreReg : kAnyReg; 574 } 575 } 576 CodeBufferSizeInBytes()577 size_t CodeBufferSizeInBytes() { 578 return code_buffer_.size() / sizeof(code_buffer_[0]); 579 } 580 IsPseudoLirOp(int opcode)581 static bool IsPseudoLirOp(int opcode) { 582 return (opcode < 0); 583 } 584 585 /* 586 * LIR operands are 32-bit integers. Sometimes, (especially for managing 587 * instructions which require PC-relative fixups), we need the operands to carry 588 * pointers. To do this, we assign these pointers an index in pointer_storage_, and 589 * hold that index in the operand array. 590 * TUNING: If use of these utilities becomes more common on 32-bit builds, it 591 * may be worth conditionally-compiling a set of identity functions here. 592 */ 593 template <typename T> WrapPointer(const T * pointer)594 uint32_t WrapPointer(const T* pointer) { 595 uint32_t res = pointer_storage_.size(); 596 pointer_storage_.push_back(pointer); 597 return res; 598 } 599 600 template <typename T> UnwrapPointer(size_t index)601 const T* UnwrapPointer(size_t index) { 602 return reinterpret_cast<const T*>(pointer_storage_[index]); 603 } 604 605 // strdup(), but allocates from the arena. ArenaStrdup(const char * str)606 char* ArenaStrdup(const char* str) { 607 size_t len = strlen(str) + 1; 608 char* res = arena_->AllocArray<char>(len, kArenaAllocMisc); 609 if (res != nullptr) { 610 strncpy(res, str, len); 611 } 612 return res; 613 } 614 615 // Shared by all targets - implemented in codegen_util.cc 616 void AppendLIR(LIR* lir); 617 void InsertLIRBefore(LIR* current_lir, LIR* new_lir); 618 void InsertLIRAfter(LIR* current_lir, LIR* new_lir); 619 620 /** 621 * @brief Provides the maximum number of compiler temporaries that the backend can/wants 622 * to place in a frame. 623 * @return Returns the maximum number of compiler temporaries. 624 */ 625 size_t GetMaxPossibleCompilerTemps() const; 626 627 /** 628 * @brief Provides the number of bytes needed in frame for spilling of compiler temporaries. 629 * @return Returns the size in bytes for space needed for compiler temporary spill region. 630 */ 631 size_t GetNumBytesForCompilerTempSpillRegion(); 632 GetCurrentDexPc()633 DexOffset GetCurrentDexPc() const { 634 return current_dalvik_offset_; 635 } 636 637 RegisterClass ShortyToRegClass(char shorty_type); 638 int ComputeFrameSize(); 639 void Materialize(); 640 virtual CompiledMethod* GetCompiledMethod(); 641 void MarkSafepointPC(LIR* inst); 642 void MarkSafepointPCAfter(LIR* after); 643 void SetupResourceMasks(LIR* lir); 644 void SetMemRefType(LIR* lir, bool is_load, int mem_type); 645 void AnnotateDalvikRegAccess(LIR* lir, int reg_id, bool is_load, bool is64bit); 646 void SetupRegMask(ResourceMask* mask, int reg); 647 void ClearRegMask(ResourceMask* mask, int reg); 648 void DumpLIRInsn(LIR* arg, unsigned char* base_addr); 649 void EliminateLoad(LIR* lir, int reg_id); 650 void DumpDependentInsnPair(LIR* check_lir, LIR* this_lir, const char* type); 651 void DumpPromotionMap(); 652 void CodegenDump(); 653 LIR* RawLIR(DexOffset dalvik_offset, int opcode, int op0 = 0, int op1 = 0, 654 int op2 = 0, int op3 = 0, int op4 = 0, LIR* target = nullptr); 655 LIR* NewLIR0(int opcode); 656 LIR* NewLIR1(int opcode, int dest); 657 LIR* NewLIR2(int opcode, int dest, int src1); 658 LIR* NewLIR2NoDest(int opcode, int src, int info); 659 LIR* NewLIR3(int opcode, int dest, int src1, int src2); 660 LIR* NewLIR4(int opcode, int dest, int src1, int src2, int info); 661 LIR* NewLIR5(int opcode, int dest, int src1, int src2, int info1, int info2); 662 LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta); 663 LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi); 664 LIR* ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method); 665 LIR* ScanLiteralPoolClass(LIR* data_target, const DexFile& dex_file, uint32_t type_idx); 666 LIR* AddWordData(LIR* *constant_list_p, int value); 667 LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi); 668 void DumpSparseSwitchTable(const uint16_t* table); 669 void DumpPackedSwitchTable(const uint16_t* table); 670 void MarkBoundary(DexOffset offset, const char* inst_str); 671 void NopLIR(LIR* lir); 672 void UnlinkLIR(LIR* lir); 673 bool IsInexpensiveConstant(RegLocation rl_src); 674 ConditionCode FlipComparisonOrder(ConditionCode before); 675 ConditionCode NegateComparison(ConditionCode before); 676 virtual void InstallLiteralPools(); 677 void InstallSwitchTables(); 678 void InstallFillArrayData(); 679 bool VerifyCatchEntries(); 680 void CreateMappingTables(); 681 void CreateNativeGcMap(); 682 void CreateNativeGcMapWithoutRegisterPromotion(); 683 int AssignLiteralOffset(CodeOffset offset); 684 int AssignSwitchTablesOffset(CodeOffset offset); 685 int AssignFillArrayDataOffset(CodeOffset offset); 686 LIR* InsertCaseLabel(uint32_t bbid, int keyVal); 687 688 // Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation. No code generated. 689 virtual RegLocation NarrowRegLoc(RegLocation loc); 690 691 // Shared by all targets - implemented in local_optimizations.cc 692 void ConvertMemOpIntoMove(LIR* orig_lir, RegStorage dest, RegStorage src); 693 void ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir); 694 void ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir); 695 virtual void ApplyLocalOptimizations(LIR* head_lir, LIR* tail_lir); 696 697 // Shared by all targets - implemented in ralloc_util.cc 698 int GetSRegHi(int lowSreg); 699 bool LiveOut(int s_reg); 700 void SimpleRegAlloc(); 701 void ResetRegPool(); 702 void CompilerInitPool(RegisterInfo* info, RegStorage* regs, int num); 703 void DumpRegPool(ArenaVector<RegisterInfo*>* regs); 704 void DumpCoreRegPool(); 705 void DumpFpRegPool(); 706 void DumpRegPools(); 707 /* Mark a temp register as dead. Does not affect allocation state. */ 708 void Clobber(RegStorage reg); 709 void ClobberSReg(int s_reg); 710 void ClobberAliases(RegisterInfo* info, uint32_t clobber_mask); 711 int SRegToPMap(int s_reg); 712 void RecordCorePromotion(RegStorage reg, int s_reg); 713 RegStorage AllocPreservedCoreReg(int s_reg); 714 void RecordFpPromotion(RegStorage reg, int s_reg); 715 RegStorage AllocPreservedFpReg(int s_reg); 716 virtual RegStorage AllocPreservedSingle(int s_reg); 717 virtual RegStorage AllocPreservedDouble(int s_reg); 718 RegStorage AllocTempBody(ArenaVector<RegisterInfo*>& regs, int* next_temp, bool required); 719 virtual RegStorage AllocTemp(bool required = true); 720 virtual RegStorage AllocTempWide(bool required = true); 721 virtual RegStorage AllocTempRef(bool required = true); 722 virtual RegStorage AllocTempSingle(bool required = true); 723 virtual RegStorage AllocTempDouble(bool required = true); 724 virtual RegStorage AllocTypedTemp(bool fp_hint, int reg_class, bool required = true); 725 virtual RegStorage AllocTypedTempWide(bool fp_hint, int reg_class, bool required = true); 726 void FlushReg(RegStorage reg); 727 void FlushRegWide(RegStorage reg); 728 RegStorage AllocLiveReg(int s_reg, int reg_class, bool wide); 729 RegStorage FindLiveReg(ArenaVector<RegisterInfo*>& regs, int s_reg); 730 virtual void FreeTemp(RegStorage reg); 731 virtual void FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free); 732 virtual bool IsLive(RegStorage reg); 733 virtual bool IsTemp(RegStorage reg); 734 bool IsPromoted(RegStorage reg); 735 bool IsDirty(RegStorage reg); 736 virtual void LockTemp(RegStorage reg); 737 void ResetDef(RegStorage reg); 738 void NullifyRange(RegStorage reg, int s_reg); 739 void MarkDef(RegLocation rl, LIR *start, LIR *finish); 740 void MarkDefWide(RegLocation rl, LIR *start, LIR *finish); 741 void ResetDefLoc(RegLocation rl); 742 void ResetDefLocWide(RegLocation rl); 743 void ResetDefTracking(); 744 void ClobberAllTemps(); 745 void FlushSpecificReg(RegisterInfo* info); 746 void FlushAllRegs(); 747 bool RegClassMatches(int reg_class, RegStorage reg); 748 void MarkLive(RegLocation loc); 749 void MarkTemp(RegStorage reg); 750 void UnmarkTemp(RegStorage reg); 751 void MarkWide(RegStorage reg); 752 void MarkNarrow(RegStorage reg); 753 void MarkClean(RegLocation loc); 754 void MarkDirty(RegLocation loc); 755 void MarkInUse(RegStorage reg); 756 bool CheckCorePoolSanity(); 757 virtual RegLocation UpdateLoc(RegLocation loc); 758 virtual RegLocation UpdateLocWide(RegLocation loc); 759 RegLocation UpdateRawLoc(RegLocation loc); 760 761 /** 762 * @brief Used to prepare a register location to receive a wide value. 763 * @see EvalLoc 764 * @param loc the location where the value will be stored. 765 * @param reg_class Type of register needed. 766 * @param update Whether the liveness information should be updated. 767 * @return Returns the properly typed temporary in physical register pairs. 768 */ 769 virtual RegLocation EvalLocWide(RegLocation loc, int reg_class, bool update); 770 771 /** 772 * @brief Used to prepare a register location to receive a value. 773 * @param loc the location where the value will be stored. 774 * @param reg_class Type of register needed. 775 * @param update Whether the liveness information should be updated. 776 * @return Returns the properly typed temporary in physical register. 777 */ 778 virtual RegLocation EvalLoc(RegLocation loc, int reg_class, bool update); 779 780 virtual void AnalyzeMIR(RefCounts* core_counts, MIR* mir, uint32_t weight); 781 virtual void CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs); 782 void DumpCounts(const RefCounts* arr, int size, const char* msg); 783 virtual void DoPromotion(); 784 int VRegOffset(int v_reg); 785 int SRegOffset(int s_reg); 786 RegLocation GetReturnWide(RegisterClass reg_class); 787 RegLocation GetReturn(RegisterClass reg_class); 788 RegisterInfo* GetRegInfo(RegStorage reg); 789 790 // Shared by all targets - implemented in gen_common.cc. 791 void AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume = nullptr); 792 virtual bool HandleEasyDivRem(Instruction::Code dalvik_opcode, bool is_div, 793 RegLocation rl_src, RegLocation rl_dest, int lit); 794 bool HandleEasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit); 795 bool HandleEasyFloatingPointDiv(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2); 796 virtual void HandleSlowPaths(); 797 void GenBarrier(); 798 void GenDivZeroException(); 799 // c_code holds condition code that's generated from testing divisor against 0. 800 void GenDivZeroCheck(ConditionCode c_code); 801 // reg holds divisor. 802 void GenDivZeroCheck(RegStorage reg); 803 void GenArrayBoundsCheck(RegStorage index, RegStorage length); 804 void GenArrayBoundsCheck(int32_t index, RegStorage length); 805 LIR* GenNullCheck(RegStorage reg); 806 void MarkPossibleNullPointerException(int opt_flags); 807 void MarkPossibleNullPointerExceptionAfter(int opt_flags, LIR* after); 808 void MarkPossibleStackOverflowException(); 809 void ForceImplicitNullCheck(RegStorage reg, int opt_flags); 810 LIR* GenNullCheck(RegStorage m_reg, int opt_flags); 811 LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags); 812 virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags); 813 void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2, 814 LIR* taken); 815 void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken); 816 virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src); 817 virtual void GenLongToInt(RegLocation rl_dest, RegLocation rl_src); 818 void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest, 819 RegLocation rl_src); 820 void GenNewArray(uint32_t type_idx, RegLocation rl_dest, 821 RegLocation rl_src); 822 void GenFilledNewArray(CallInfo* info); 823 void GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src); 824 void GenSput(MIR* mir, RegLocation rl_src, OpSize size); 825 // Get entrypoints are specific for types, size alone is not sufficient to safely infer 826 // entrypoint. 827 void GenSget(MIR* mir, RegLocation rl_dest, OpSize size, Primitive::Type type); 828 void GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type, 829 RegLocation rl_dest, RegLocation rl_obj); 830 void GenIPut(MIR* mir, int opt_flags, OpSize size, 831 RegLocation rl_src, RegLocation rl_obj); 832 void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index, 833 RegLocation rl_src); 834 835 void GenConstClass(uint32_t type_idx, RegLocation rl_dest); 836 void GenConstString(uint32_t string_idx, RegLocation rl_dest); 837 void GenNewInstance(uint32_t type_idx, RegLocation rl_dest); 838 void GenThrow(RegLocation rl_src); 839 void GenInstanceof(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src); 840 void GenCheckCast(int opt_flags, uint32_t insn_idx, uint32_t type_idx, RegLocation rl_src); 841 void GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_dest, 842 RegLocation rl_src1, RegLocation rl_src2); 843 virtual void GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, 844 RegLocation rl_src1, RegLocation rl_shift); 845 void GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, 846 RegLocation rl_src, int lit); 847 virtual void GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, 848 RegLocation rl_src1, RegLocation rl_src2, int flags); 849 void GenConversionCall(QuickEntrypointEnum trampoline, RegLocation rl_dest, RegLocation rl_src, 850 RegisterClass return_reg_class); 851 void GenSuspendTest(int opt_flags); 852 void GenSuspendTestAndBranch(int opt_flags, LIR* target); 853 854 // This will be overridden by x86 implementation. 855 virtual void GenConstWide(RegLocation rl_dest, int64_t value); 856 virtual void GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, 857 RegLocation rl_src1, RegLocation rl_src2, int flags); 858 859 // Shared by all targets - implemented in gen_invoke.cc. 860 LIR* CallHelper(RegStorage r_tgt, QuickEntrypointEnum trampoline, bool safepoint_pc, 861 bool use_link = true); 862 RegStorage CallHelperSetup(QuickEntrypointEnum trampoline); 863 864 void CallRuntimeHelper(QuickEntrypointEnum trampoline, bool safepoint_pc); 865 void CallRuntimeHelperImm(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc); 866 void CallRuntimeHelperReg(QuickEntrypointEnum trampoline, RegStorage arg0, bool safepoint_pc); 867 void CallRuntimeHelperRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0, 868 bool safepoint_pc); 869 void CallRuntimeHelperImmImm(QuickEntrypointEnum trampoline, int arg0, int arg1, 870 bool safepoint_pc); 871 void CallRuntimeHelperImmRegLocation(QuickEntrypointEnum trampoline, int arg0, RegLocation arg1, 872 bool safepoint_pc); 873 void CallRuntimeHelperRegLocationImm(QuickEntrypointEnum trampoline, RegLocation arg0, int arg1, 874 bool safepoint_pc); 875 void CallRuntimeHelperImmReg(QuickEntrypointEnum trampoline, int arg0, RegStorage arg1, 876 bool safepoint_pc); 877 void CallRuntimeHelperRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, int arg1, 878 bool safepoint_pc); 879 void CallRuntimeHelperImmMethod(QuickEntrypointEnum trampoline, int arg0, bool safepoint_pc); 880 void CallRuntimeHelperRegMethod(QuickEntrypointEnum trampoline, RegStorage arg0, 881 bool safepoint_pc); 882 void CallRuntimeHelperRegRegLocationMethod(QuickEntrypointEnum trampoline, RegStorage arg0, 883 RegLocation arg1, bool safepoint_pc); 884 void CallRuntimeHelperRegLocationRegLocation(QuickEntrypointEnum trampoline, RegLocation arg0, 885 RegLocation arg1, bool safepoint_pc); 886 void CallRuntimeHelperRegReg(QuickEntrypointEnum trampoline, RegStorage arg0, RegStorage arg1, 887 bool safepoint_pc); 888 void CallRuntimeHelperRegRegImm(QuickEntrypointEnum trampoline, RegStorage arg0, 889 RegStorage arg1, int arg2, bool safepoint_pc); 890 void CallRuntimeHelperImmRegLocationMethod(QuickEntrypointEnum trampoline, int arg0, 891 RegLocation arg1, bool safepoint_pc); 892 void CallRuntimeHelperImmImmMethod(QuickEntrypointEnum trampoline, int arg0, int arg1, 893 bool safepoint_pc); 894 void CallRuntimeHelperImmRegLocationRegLocation(QuickEntrypointEnum trampoline, int arg0, 895 RegLocation arg1, RegLocation arg2, 896 bool safepoint_pc); 897 void CallRuntimeHelperRegLocationRegLocationRegLocation(QuickEntrypointEnum trampoline, 898 RegLocation arg0, RegLocation arg1, 899 RegLocation arg2, 900 bool safepoint_pc); 901 void CallRuntimeHelperRegLocationRegLocationRegLocationRegLocation( 902 QuickEntrypointEnum trampoline, RegLocation arg0, RegLocation arg1, 903 RegLocation arg2, RegLocation arg3, bool safepoint_pc); 904 905 void GenInvoke(CallInfo* info); 906 void GenInvokeNoInline(CallInfo* info); 907 virtual NextCallInsn GetNextSDCallInsn() = 0; 908 909 /* 910 * @brief Generate the actual call insn based on the method info. 911 * @param method_info the lowering info for the method call. 912 * @returns Call instruction 913 */ 914 virtual LIR* GenCallInsn(const MirMethodLoweringInfo& method_info) = 0; 915 916 virtual void FlushIns(RegLocation* ArgLocs, RegLocation rl_method); 917 virtual int GenDalvikArgs(CallInfo* info, int call_state, LIR** pcrLabel, 918 NextCallInsn next_call_insn, 919 const MethodReference& target_method, 920 uint32_t vtable_idx, 921 uintptr_t direct_code, uintptr_t direct_method, InvokeType type, 922 bool skip_this); 923 virtual int GenDalvikArgsBulkCopy(CallInfo* info, int first, int count); 924 virtual void GenDalvikArgsFlushPromoted(CallInfo* info, int start); 925 /** 926 * @brief Used to determine the register location of destination. 927 * @details This is needed during generation of inline intrinsics because it finds destination 928 * of return, 929 * either the physical register or the target of move-result. 930 * @param info Information about the invoke. 931 * @return Returns the destination location. 932 */ 933 RegLocation InlineTarget(CallInfo* info); 934 935 /** 936 * @brief Used to determine the wide register location of destination. 937 * @see InlineTarget 938 * @param info Information about the invoke. 939 * @return Returns the destination location. 940 */ 941 RegLocation InlineTargetWide(CallInfo* info); 942 943 bool GenInlinedReferenceGetReferent(CallInfo* info); 944 virtual bool GenInlinedCharAt(CallInfo* info); 945 bool GenInlinedStringGetCharsNoCheck(CallInfo* info); 946 bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty); 947 bool GenInlinedStringFactoryNewStringFromBytes(CallInfo* info); 948 bool GenInlinedStringFactoryNewStringFromChars(CallInfo* info); 949 bool GenInlinedStringFactoryNewStringFromString(CallInfo* info); 950 virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size); 951 bool GenInlinedReverseBytes(CallInfo* info, OpSize size); 952 virtual bool GenInlinedAbsInt(CallInfo* info); 953 virtual bool GenInlinedAbsLong(CallInfo* info); 954 virtual bool GenInlinedAbsFloat(CallInfo* info) = 0; 955 virtual bool GenInlinedAbsDouble(CallInfo* info) = 0; 956 bool GenInlinedFloatCvt(CallInfo* info); 957 bool GenInlinedDoubleCvt(CallInfo* info); 958 virtual bool GenInlinedCeil(CallInfo* info); 959 virtual bool GenInlinedFloor(CallInfo* info); 960 virtual bool GenInlinedRint(CallInfo* info); 961 virtual bool GenInlinedRound(CallInfo* info, bool is_double); 962 virtual bool GenInlinedArrayCopyCharArray(CallInfo* info); 963 virtual bool GenInlinedIndexOf(CallInfo* info, bool zero_based); 964 bool GenInlinedStringCompareTo(CallInfo* info); 965 virtual bool GenInlinedCurrentThread(CallInfo* info); 966 bool GenInlinedUnsafeGet(CallInfo* info, bool is_long, bool is_object, bool is_volatile); 967 bool GenInlinedUnsafePut(CallInfo* info, bool is_long, bool is_object, 968 bool is_volatile, bool is_ordered); 969 970 // Shared by all targets - implemented in gen_loadstore.cc. 971 RegLocation LoadCurrMethod(); 972 void LoadCurrMethodDirect(RegStorage r_tgt); 973 RegStorage LoadCurrMethodWithHint(RegStorage r_hint); 974 virtual LIR* LoadConstant(RegStorage r_dest, int value); 975 // Natural word size. LoadWordDisp(RegStorage r_base,int displacement,RegStorage r_dest)976 LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) { 977 return LoadBaseDisp(r_base, displacement, r_dest, kWord, kNotVolatile); 978 } 979 // Load 32 bits, regardless of target. Load32Disp(RegStorage r_base,int displacement,RegStorage r_dest)980 LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest) { 981 return LoadBaseDisp(r_base, displacement, r_dest, k32, kNotVolatile); 982 } 983 // Load a reference at base + displacement and decompress into register. LoadRefDisp(RegStorage r_base,int displacement,RegStorage r_dest,VolatileKind is_volatile)984 LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest, 985 VolatileKind is_volatile) { 986 return LoadBaseDisp(r_base, displacement, r_dest, kReference, is_volatile); 987 } 988 // Load a reference at base + index and decompress into register. LoadRefIndexed(RegStorage r_base,RegStorage r_index,RegStorage r_dest,int scale)989 LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale) { 990 return LoadBaseIndexed(r_base, r_index, r_dest, scale, kReference); 991 } 992 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 993 virtual RegLocation LoadValue(RegLocation rl_src, RegisterClass op_kind); 994 // Load Dalvik value with 64-bit memory storage. 995 virtual RegLocation LoadValueWide(RegLocation rl_src, RegisterClass op_kind); 996 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 997 virtual void LoadValueDirect(RegLocation rl_src, RegStorage r_dest); 998 // Load Dalvik value with 32-bit memory storage. If compressed object reference, decompress. 999 virtual void LoadValueDirectFixed(RegLocation rl_src, RegStorage r_dest); 1000 // Load Dalvik value with 64-bit memory storage. 1001 virtual void LoadValueDirectWide(RegLocation rl_src, RegStorage r_dest); 1002 // Load Dalvik value with 64-bit memory storage. 1003 virtual void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest); 1004 // Store an item of natural word size. StoreWordDisp(RegStorage r_base,int displacement,RegStorage r_src)1005 LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) { 1006 return StoreBaseDisp(r_base, displacement, r_src, kWord, kNotVolatile); 1007 } 1008 // Store an uncompressed reference into a compressed 32-bit container. StoreRefDisp(RegStorage r_base,int displacement,RegStorage r_src,VolatileKind is_volatile)1009 LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src, 1010 VolatileKind is_volatile) { 1011 return StoreBaseDisp(r_base, displacement, r_src, kReference, is_volatile); 1012 } 1013 // Store an uncompressed reference into a compressed 32-bit container by index. StoreRefIndexed(RegStorage r_base,RegStorage r_index,RegStorage r_src,int scale)1014 LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale) { 1015 return StoreBaseIndexed(r_base, r_index, r_src, scale, kReference); 1016 } 1017 // Store 32 bits, regardless of target. Store32Disp(RegStorage r_base,int displacement,RegStorage r_src)1018 LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) { 1019 return StoreBaseDisp(r_base, displacement, r_src, k32, kNotVolatile); 1020 } 1021 1022 /** 1023 * @brief Used to do the final store in the destination as per bytecode semantics. 1024 * @param rl_dest The destination dalvik register location. 1025 * @param rl_src The source register location. Can be either physical register or dalvik register. 1026 */ 1027 virtual void StoreValue(RegLocation rl_dest, RegLocation rl_src); 1028 1029 /** 1030 * @brief Used to do the final store in a wide destination as per bytecode semantics. 1031 * @see StoreValue 1032 * @param rl_dest The destination dalvik register location. 1033 * @param rl_src The source register location. Can be either physical register or dalvik 1034 * register. 1035 */ 1036 virtual void StoreValueWide(RegLocation rl_dest, RegLocation rl_src); 1037 1038 /** 1039 * @brief Used to do the final store to a destination as per bytecode semantics. 1040 * @see StoreValue 1041 * @param rl_dest The destination dalvik register location. 1042 * @param rl_src The source register location. It must be kLocPhysReg 1043 * 1044 * This is used for x86 two operand computations, where we have computed the correct 1045 * register value that now needs to be properly registered. This is used to avoid an 1046 * extra register copy that would result if StoreValue was called. 1047 */ 1048 virtual void StoreFinalValue(RegLocation rl_dest, RegLocation rl_src); 1049 1050 /** 1051 * @brief Used to do the final store in a wide destination as per bytecode semantics. 1052 * @see StoreValueWide 1053 * @param rl_dest The destination dalvik register location. 1054 * @param rl_src The source register location. It must be kLocPhysReg 1055 * 1056 * This is used for x86 two operand computations, where we have computed the correct 1057 * register values that now need to be properly registered. This is used to avoid an 1058 * extra pair of register copies that would result if StoreValueWide was called. 1059 */ 1060 virtual void StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src); 1061 1062 // Shared by all targets - implemented in mir_to_lir.cc. 1063 void CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list); 1064 virtual void HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir); 1065 bool MethodBlockCodeGen(BasicBlock* bb); 1066 bool SpecialMIR2LIR(const InlineMethod& special); 1067 virtual void MethodMIR2LIR(); 1068 // Update LIR for verbose listings. 1069 void UpdateLIROffsets(); 1070 1071 /** 1072 * @brief Mark a garbage collection card. Skip if the stored value is null. 1073 * @param val_reg the register holding the stored value to check against null. 1074 * @param tgt_addr_reg the address of the object or array where the value was stored. 1075 * @param opt_flags the optimization flags which may indicate that the value is non-null. 1076 */ 1077 void MarkGCCard(int opt_flags, RegStorage val_reg, RegStorage tgt_addr_reg); 1078 1079 /* 1080 * @brief Load the address of the dex method into the register. 1081 * @param target_method The MethodReference of the method to be invoked. 1082 * @param type How the method will be invoked. 1083 * @param register that will contain the code address. 1084 * @note register will be passed to TargetReg to get physical register. 1085 */ 1086 void LoadCodeAddress(const MethodReference& target_method, InvokeType type, 1087 SpecialTargetRegister symbolic_reg); 1088 1089 /* 1090 * @brief Load the Method* of a dex method into the register. 1091 * @param target_method The MethodReference of the method to be invoked. 1092 * @param type How the method will be invoked. 1093 * @param register that will contain the code address. 1094 * @note register will be passed to TargetReg to get physical register. 1095 */ 1096 virtual void LoadMethodAddress(const MethodReference& target_method, InvokeType type, 1097 SpecialTargetRegister symbolic_reg); 1098 1099 /* 1100 * @brief Load the Class* of a Dex Class type into the register. 1101 * @param dex DexFile that contains the class type. 1102 * @param type How the method will be invoked. 1103 * @param register that will contain the code address. 1104 * @note register will be passed to TargetReg to get physical register. 1105 */ 1106 virtual void LoadClassType(const DexFile& dex_file, uint32_t type_idx, 1107 SpecialTargetRegister symbolic_reg); 1108 1109 // TODO: Support PC-relative dex cache array loads on all platforms and 1110 // replace CanUseOpPcRelDexCacheArrayLoad() with dex_cache_arrays_layout_.Valid(). 1111 virtual bool CanUseOpPcRelDexCacheArrayLoad() const; 1112 1113 /* 1114 * @brief Load an element of one of the dex cache arrays. 1115 * @param dex_file the dex file associated with the target dex cache. 1116 * @param offset the offset of the element in the fixed dex cache arrays' layout. 1117 * @param r_dest the register where to load the element. 1118 * @param wide, load 64 bits if true, otherwise 32 bits. 1119 */ 1120 virtual void OpPcRelDexCacheArrayLoad(const DexFile* dex_file, int offset, RegStorage r_dest, 1121 bool wide); 1122 1123 // Routines that work for the generic case, but may be overriden by target. 1124 /* 1125 * @brief Compare memory to immediate, and branch if condition true. 1126 * @param cond The condition code that when true will branch to the target. 1127 * @param temp_reg A temporary register that can be used if compare to memory is not 1128 * supported by the architecture. 1129 * @param base_reg The register holding the base address. 1130 * @param offset The offset from the base. 1131 * @param check_value The immediate to compare to. 1132 * @param target branch target (or null) 1133 * @param compare output for getting LIR for comparison (or null) 1134 * @returns The branch instruction that was generated. 1135 */ 1136 virtual LIR* OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg, 1137 int offset, int check_value, LIR* target, LIR** compare); 1138 1139 // Required for target - codegen helpers. 1140 virtual bool SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div, 1141 RegLocation rl_src, RegLocation rl_dest, int lit) = 0; 1142 virtual bool EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) = 0; 1143 virtual void GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1, 1144 int32_t constant) = 0; 1145 virtual void GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1, 1146 int64_t constant) = 0; 1147 virtual LIR* CheckSuspendUsingLoad() = 0; 1148 1149 virtual RegStorage LoadHelper(QuickEntrypointEnum trampoline) = 0; 1150 1151 virtual LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest, 1152 OpSize size, VolatileKind is_volatile) = 0; 1153 virtual LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, 1154 int scale, OpSize size) = 0; 1155 virtual LIR* LoadConstantNoClobber(RegStorage r_dest, int value) = 0; 1156 virtual LIR* LoadConstantWide(RegStorage r_dest, int64_t value) = 0; 1157 virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, 1158 OpSize size, VolatileKind is_volatile) = 0; 1159 virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, 1160 int scale, OpSize size) = 0; 1161 1162 /** 1163 * @brief Unconditionally mark a garbage collection card. 1164 * @param tgt_addr_reg the address of the object or array where the value was stored. 1165 */ 1166 virtual void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) = 0; 1167 1168 // Required for target - register utilities. 1169 IsSameReg(RegStorage reg1,RegStorage reg2)1170 bool IsSameReg(RegStorage reg1, RegStorage reg2) { 1171 RegisterInfo* info1 = GetRegInfo(reg1); 1172 RegisterInfo* info2 = GetRegInfo(reg2); 1173 return (info1->Master() == info2->Master() && 1174 (info1->StorageMask() & info2->StorageMask()) != 0); 1175 } 1176 IsWide(OpSize size)1177 static constexpr bool IsWide(OpSize size) { 1178 return size == k64 || size == kDouble; 1179 } 1180 IsRef(OpSize size)1181 static constexpr bool IsRef(OpSize size) { 1182 return size == kReference; 1183 } 1184 1185 /** 1186 * @brief Portable way of getting special registers from the backend. 1187 * @param reg Enumeration describing the purpose of the register. 1188 * @return Return the #RegStorage corresponding to the given purpose @p reg. 1189 * @note This function is currently allowed to return any suitable view of the registers 1190 * (e.g. this could be 64-bit solo or 32-bit solo for 64-bit backends). 1191 */ 1192 virtual RegStorage TargetReg(SpecialTargetRegister reg) = 0; 1193 1194 /** 1195 * @brief Portable way of getting special registers from the backend. 1196 * @param reg Enumeration describing the purpose of the register. 1197 * @param wide_kind What kind of view of the special register is required. 1198 * @return Return the #RegStorage corresponding to the given purpose @p reg. 1199 * 1200 * @note For 32b system, wide (kWide) views only make sense for the argument registers and the 1201 * return. In that case, this function should return a pair where the first component of 1202 * the result will be the indicated special register. 1203 */ TargetReg(SpecialTargetRegister reg,WideKind wide_kind)1204 virtual RegStorage TargetReg(SpecialTargetRegister reg, WideKind wide_kind) { 1205 if (wide_kind == kWide) { 1206 DCHECK((kArg0 <= reg && reg < kArg7) || (kFArg0 <= reg && reg < kFArg15) || (kRet0 == reg)); 1207 static_assert((kArg1 == kArg0 + 1) && (kArg2 == kArg1 + 1) && (kArg3 == kArg2 + 1) && 1208 (kArg4 == kArg3 + 1) && (kArg5 == kArg4 + 1) && (kArg6 == kArg5 + 1) && 1209 (kArg7 == kArg6 + 1), "kargs range unexpected"); 1210 static_assert((kFArg1 == kFArg0 + 1) && (kFArg2 == kFArg1 + 1) && (kFArg3 == kFArg2 + 1) && 1211 (kFArg4 == kFArg3 + 1) && (kFArg5 == kFArg4 + 1) && (kFArg6 == kFArg5 + 1) && 1212 (kFArg7 == kFArg6 + 1) && (kFArg8 == kFArg7 + 1) && (kFArg9 == kFArg8 + 1) && 1213 (kFArg10 == kFArg9 + 1) && (kFArg11 == kFArg10 + 1) && 1214 (kFArg12 == kFArg11 + 1) && (kFArg13 == kFArg12 + 1) && 1215 (kFArg14 == kFArg13 + 1) && (kFArg15 == kFArg14 + 1), 1216 "kfargs range unexpected"); 1217 static_assert(kRet1 == kRet0 + 1, "kret range unexpected"); 1218 return RegStorage::MakeRegPair(TargetReg(reg), 1219 TargetReg(static_cast<SpecialTargetRegister>(reg + 1))); 1220 } else { 1221 return TargetReg(reg); 1222 } 1223 } 1224 1225 /** 1226 * @brief Portable way of getting a special register for storing a pointer. 1227 * @see TargetReg() 1228 */ TargetPtrReg(SpecialTargetRegister reg)1229 virtual RegStorage TargetPtrReg(SpecialTargetRegister reg) { 1230 return TargetReg(reg); 1231 } 1232 1233 // Get a reg storage corresponding to the wide & ref flags of the reg location. TargetReg(SpecialTargetRegister reg,RegLocation loc)1234 virtual RegStorage TargetReg(SpecialTargetRegister reg, RegLocation loc) { 1235 if (loc.ref) { 1236 return TargetReg(reg, kRef); 1237 } else { 1238 return TargetReg(reg, loc.wide ? kWide : kNotWide); 1239 } 1240 } 1241 1242 void EnsureInitializedArgMappingToPhysicalReg(); 1243 virtual RegLocation GetReturnAlt() = 0; 1244 virtual RegLocation GetReturnWideAlt() = 0; 1245 virtual RegLocation LocCReturn() = 0; 1246 virtual RegLocation LocCReturnRef() = 0; 1247 virtual RegLocation LocCReturnDouble() = 0; 1248 virtual RegLocation LocCReturnFloat() = 0; 1249 virtual RegLocation LocCReturnWide() = 0; 1250 virtual ResourceMask GetRegMaskCommon(const RegStorage& reg) const = 0; 1251 virtual void AdjustSpillMask() = 0; 1252 virtual void ClobberCallerSave() = 0; 1253 virtual void FreeCallTemps() = 0; 1254 virtual void LockCallTemps() = 0; 1255 virtual void CompilerInitializeRegAlloc() = 0; 1256 1257 // Required for target - miscellaneous. 1258 virtual void AssembleLIR() = 0; 1259 virtual void DumpResourceMask(LIR* lir, const ResourceMask& mask, const char* prefix) = 0; 1260 virtual void SetupTargetResourceMasks(LIR* lir, uint64_t flags, 1261 ResourceMask* use_mask, ResourceMask* def_mask) = 0; 1262 virtual const char* GetTargetInstFmt(int opcode) = 0; 1263 virtual const char* GetTargetInstName(int opcode) = 0; 1264 virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr) = 0; 1265 1266 // Note: This may return kEncodeNone on architectures that do not expose a PC. The caller must 1267 // take care of this. 1268 virtual ResourceMask GetPCUseDefEncoding() const = 0; 1269 virtual uint64_t GetTargetInstFlags(int opcode) = 0; 1270 virtual size_t GetInsnSize(LIR* lir) = 0; 1271 virtual bool IsUnconditionalBranch(LIR* lir) = 0; 1272 1273 // Get the register class for load/store of a field. 1274 virtual RegisterClass RegClassForFieldLoadStore(OpSize size, bool is_volatile) = 0; 1275 1276 // Required for target - Dalvik-level generators. 1277 virtual void GenArithImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1278 RegLocation rl_src1, RegLocation rl_src2, int flags) = 0; 1279 virtual void GenArithOpDouble(Instruction::Code opcode, 1280 RegLocation rl_dest, RegLocation rl_src1, 1281 RegLocation rl_src2) = 0; 1282 virtual void GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest, 1283 RegLocation rl_src1, RegLocation rl_src2) = 0; 1284 virtual void GenCmpFP(Instruction::Code opcode, RegLocation rl_dest, 1285 RegLocation rl_src1, RegLocation rl_src2) = 0; 1286 virtual void GenConversion(Instruction::Code opcode, RegLocation rl_dest, 1287 RegLocation rl_src) = 0; 1288 virtual bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) = 0; 1289 1290 /** 1291 * @brief Used to generate code for intrinsic java\.lang\.Math methods min and max. 1292 * @details This is also applicable for java\.lang\.StrictMath since it is a simple algorithm 1293 * that applies on integers. The generated code will write the smallest or largest value 1294 * directly into the destination register as specified by the invoke information. 1295 * @param info Information about the invoke. 1296 * @param is_min If true generates code that computes minimum. Otherwise computes maximum. 1297 * @param is_long If true the value value is Long. Otherwise the value is Int. 1298 * @return Returns true if successfully generated 1299 */ 1300 virtual bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) = 0; 1301 virtual bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double); 1302 1303 virtual bool GenInlinedSqrt(CallInfo* info) = 0; 1304 virtual bool GenInlinedPeek(CallInfo* info, OpSize size) = 0; 1305 virtual bool GenInlinedPoke(CallInfo* info, OpSize size) = 0; 1306 virtual RegLocation GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi, 1307 bool is_div) = 0; 1308 virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, 1309 bool is_div) = 0; 1310 /* 1311 * @brief Generate an integer div or rem operation by a literal. 1312 * @param rl_dest Destination Location. 1313 * @param rl_src1 Numerator Location. 1314 * @param rl_src2 Divisor Location. 1315 * @param is_div 'true' if this is a division, 'false' for a remainder. 1316 * @param flags The instruction optimization flags. It can include information 1317 * if exception check can be elided. 1318 */ 1319 virtual RegLocation GenDivRem(RegLocation rl_dest, RegLocation rl_src1, 1320 RegLocation rl_src2, bool is_div, int flags) = 0; 1321 /* 1322 * @brief Generate an integer div or rem operation by a literal. 1323 * @param rl_dest Destination Location. 1324 * @param rl_src Numerator Location. 1325 * @param lit Divisor. 1326 * @param is_div 'true' if this is a division, 'false' for a remainder. 1327 */ 1328 virtual RegLocation GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, 1329 bool is_div) = 0; 1330 virtual void GenCmpLong(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) = 0; 1331 1332 /** 1333 * @brief Used for generating code that throws ArithmeticException if both registers are zero. 1334 * @details This is used for generating DivideByZero checks when divisor is held in two 1335 * separate registers. 1336 * @param reg The register holding the pair of 32-bit values. 1337 */ 1338 virtual void GenDivZeroCheckWide(RegStorage reg) = 0; 1339 1340 virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) = 0; 1341 virtual void GenExitSequence() = 0; 1342 virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) = 0; 1343 virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0; 1344 1345 /* 1346 * @brief Handle Machine Specific MIR Extended opcodes. 1347 * @param bb The basic block in which the MIR is from. 1348 * @param mir The MIR whose opcode is not standard extended MIR. 1349 * @note Base class implementation will abort for unknown opcodes. 1350 */ 1351 virtual void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir); 1352 1353 /** 1354 * @brief Lowers the kMirOpSelect MIR into LIR. 1355 * @param bb The basic block in which the MIR is from. 1356 * @param mir The MIR whose opcode is kMirOpSelect. 1357 */ 1358 virtual void GenSelect(BasicBlock* bb, MIR* mir) = 0; 1359 1360 /** 1361 * @brief Generates code to select one of the given constants depending on the given opcode. 1362 */ 1363 virtual void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code, 1364 int32_t true_val, int32_t false_val, RegStorage rs_dest, 1365 RegisterClass dest_reg_class) = 0; 1366 1367 /** 1368 * @brief Used to generate a memory barrier in an architecture specific way. 1369 * @details The last generated LIR will be considered for use as barrier. Namely, 1370 * if the last LIR can be updated in a way where it will serve the semantics of 1371 * barrier, then it will be used as such. Otherwise, a new LIR will be generated 1372 * that can keep the semantics. 1373 * @param barrier_kind The kind of memory barrier to generate. 1374 * @return whether a new instruction was generated. 1375 */ 1376 virtual bool GenMemBarrier(MemBarrierKind barrier_kind) = 0; 1377 1378 virtual void GenMoveException(RegLocation rl_dest) = 0; 1379 virtual void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit, 1380 int first_bit, int second_bit) = 0; 1381 virtual void GenNegDouble(RegLocation rl_dest, RegLocation rl_src) = 0; 1382 virtual void GenNegFloat(RegLocation rl_dest, RegLocation rl_src) = 0; 1383 1384 // Create code for switch statements. Will decide between short and long versions below. 1385 void GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src); 1386 void GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src); 1387 1388 // Potentially backend-specific versions of switch instructions for shorter switch statements. 1389 // The default implementation will create a chained compare-and-branch. 1390 virtual void GenSmallPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src); 1391 virtual void GenSmallSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src); 1392 // Backend-specific versions of switch instructions for longer switch statements. 1393 virtual void GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0; 1394 virtual void GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0; 1395 1396 virtual void GenArrayGet(int opt_flags, OpSize size, RegLocation rl_array, 1397 RegLocation rl_index, RegLocation rl_dest, int scale) = 0; 1398 virtual void GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array, 1399 RegLocation rl_index, RegLocation rl_src, int scale, 1400 bool card_mark) = 0; 1401 virtual void GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest, 1402 RegLocation rl_src1, RegLocation rl_shift, int flags) = 0; 1403 1404 // Required for target - single operation generators. 1405 virtual LIR* OpUnconditionalBranch(LIR* target) = 0; 1406 virtual LIR* OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage src2, LIR* target) = 0; 1407 virtual LIR* OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_value, 1408 LIR* target) = 0; 1409 virtual LIR* OpCondBranch(ConditionCode cc, LIR* target) = 0; 1410 virtual LIR* OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* target) = 0; 1411 virtual LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) = 0; 1412 virtual LIR* OpIT(ConditionCode cond, const char* guide) = 0; 1413 virtual void OpEndIT(LIR* it) = 0; 1414 virtual LIR* OpMem(OpKind op, RegStorage r_base, int disp) = 0; 1415 virtual void OpPcRelLoad(RegStorage reg, LIR* target) = 0; 1416 virtual LIR* OpReg(OpKind op, RegStorage r_dest_src) = 0; 1417 virtual void OpRegCopy(RegStorage r_dest, RegStorage r_src) = 0; 1418 virtual LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) = 0; 1419 virtual LIR* OpRegImm(OpKind op, RegStorage r_dest_src1, int value) = 0; 1420 virtual LIR* OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2) = 0; 1421 1422 /** 1423 * @brief Used to generate an LIR that does a load from mem to reg. 1424 * @param r_dest The destination physical register. 1425 * @param r_base The base physical register for memory operand. 1426 * @param offset The displacement for memory operand. 1427 * @param move_type Specification on the move desired (size, alignment, register kind). 1428 * @return Returns the generate move LIR. 1429 */ 1430 virtual LIR* OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, 1431 MoveType move_type) = 0; 1432 1433 /** 1434 * @brief Used to generate an LIR that does a store from reg to mem. 1435 * @param r_base The base physical register for memory operand. 1436 * @param offset The displacement for memory operand. 1437 * @param r_src The destination physical register. 1438 * @param bytes_to_move The number of bytes to move. 1439 * @param is_aligned Whether the memory location is known to be aligned. 1440 * @return Returns the generate move LIR. 1441 */ 1442 virtual LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, 1443 MoveType move_type) = 0; 1444 1445 /** 1446 * @brief Used for generating a conditional register to register operation. 1447 * @param op The opcode kind. 1448 * @param cc The condition code that when true will perform the opcode. 1449 * @param r_dest The destination physical register. 1450 * @param r_src The source physical register. 1451 * @return Returns the newly created LIR or null in case of creation failure. 1452 */ 1453 virtual LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) = 0; 1454 1455 virtual LIR* OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) = 0; 1456 virtual LIR* OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, 1457 RegStorage r_src2) = 0; 1458 virtual LIR* OpTestSuspend(LIR* target) = 0; 1459 virtual LIR* OpVldm(RegStorage r_base, int count) = 0; 1460 virtual LIR* OpVstm(RegStorage r_base, int count) = 0; 1461 virtual void OpRegCopyWide(RegStorage dest, RegStorage src) = 0; 1462 virtual bool InexpensiveConstantInt(int32_t value) = 0; 1463 virtual bool InexpensiveConstantFloat(int32_t value) = 0; 1464 virtual bool InexpensiveConstantLong(int64_t value) = 0; 1465 virtual bool InexpensiveConstantDouble(int64_t value) = 0; InexpensiveConstantInt(int32_t value,Instruction::Code opcode)1466 virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) { 1467 UNUSED(opcode); 1468 return InexpensiveConstantInt(value); 1469 } 1470 1471 // May be optimized by targets. 1472 virtual void GenMonitorEnter(int opt_flags, RegLocation rl_src); 1473 virtual void GenMonitorExit(int opt_flags, RegLocation rl_src); 1474 1475 virtual LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) = 0; 1476 1477 // Queries for backend support for vectors 1478 /* 1479 * Return the number of bits in a vector register. 1480 * @return 0 if vector registers are not supported, or the 1481 * number of bits in the vector register if supported. 1482 */ VectorRegisterSize()1483 virtual int VectorRegisterSize() { 1484 return 0; 1485 } 1486 1487 /* 1488 * Return the number of reservable vector registers supported 1489 * @param long_or_fp, true if floating point computations will be 1490 * executed or the operations will be long type while vector 1491 * registers are reserved. 1492 * @return the number of vector registers that are available 1493 * @note The backend should ensure that sufficient vector registers 1494 * are held back to generate scalar code without exhausting vector 1495 * registers, if scalar code also uses the vector registers. 1496 */ NumReservableVectorRegisters(bool long_or_fp ATTRIBUTE_UNUSED)1497 virtual int NumReservableVectorRegisters(bool long_or_fp ATTRIBUTE_UNUSED) { 1498 return 0; 1499 } 1500 1501 /** 1502 * @brief Buffer of DWARF's Call Frame Information opcodes. 1503 * @details It is used by debuggers and other tools to unwind the call stack. 1504 */ cfi()1505 dwarf::LazyDebugFrameOpCodeWriter& cfi() { return cfi_; } 1506 1507 protected: 1508 Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena); 1509 GetCompilationUnit()1510 CompilationUnit* GetCompilationUnit() { 1511 return cu_; 1512 } 1513 /* 1514 * @brief Do these SRs overlap? 1515 * @param rl_op1 One RegLocation 1516 * @param rl_op2 The other RegLocation 1517 * @return 'true' if the VR pairs overlap 1518 * 1519 * Check to see if a result pair has a misaligned overlap with an operand pair. This 1520 * is not usual for dx to generate, but it is legal (for now). In a future rev of 1521 * dex, we'll want to make this case illegal. 1522 */ 1523 bool PartiallyIntersects(RegLocation rl_op1, RegLocation rl_op2); 1524 1525 /* 1526 * @brief Do these SRs intersect? 1527 * @param rl_op1 One RegLocation 1528 * @param rl_op2 The other RegLocation 1529 * @return 'true' if the VR pairs intersect 1530 * 1531 * Check to see if a result pair has misaligned overlap or 1532 * full overlap with an operand pair. 1533 */ 1534 bool Intersects(RegLocation rl_op1, RegLocation rl_op2); 1535 1536 /* 1537 * @brief Force a location (in a register) into a temporary register 1538 * @param loc location of result 1539 * @returns update location 1540 */ 1541 virtual RegLocation ForceTemp(RegLocation loc); 1542 1543 /* 1544 * @brief Force a wide location (in registers) into temporary registers 1545 * @param loc location of result 1546 * @returns update location 1547 */ 1548 virtual RegLocation ForceTempWide(RegLocation loc); 1549 1550 virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, 1551 RegLocation rl_dest, RegLocation rl_src); 1552 1553 void AddSlowPath(LIRSlowPath* slowpath); 1554 1555 /* 1556 * 1557 * @brief Implement Set up instanceof a class. 1558 * @param needs_access_check 'true' if we must check the access. 1559 * @param type_known_final 'true' if the type is known to be a final class. 1560 * @param type_known_abstract 'true' if the type is known to be an abstract class. 1561 * @param use_declaring_class 'true' if the type can be loaded off the current Method*. 1562 * @param can_assume_type_is_in_dex_cache 'true' if the type is known to be in the cache. 1563 * @param type_idx Type index to use if use_declaring_class is 'false'. 1564 * @param rl_dest Result to be set to 0 or 1. 1565 * @param rl_src Object to be tested. 1566 */ 1567 void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, 1568 bool type_known_abstract, bool use_declaring_class, 1569 bool can_assume_type_is_in_dex_cache, 1570 uint32_t type_idx, RegLocation rl_dest, 1571 RegLocation rl_src); 1572 1573 /** 1574 * @brief Used to insert marker that can be used to associate MIR with LIR. 1575 * @details Only inserts marker if verbosity is enabled. 1576 * @param mir The mir that is currently being generated. 1577 */ 1578 void GenPrintLabel(MIR* mir); 1579 1580 /** 1581 * @brief Used to generate return sequence when there is no frame. 1582 * @details Assumes that the return registers have already been populated. 1583 */ 1584 virtual void GenSpecialExitSequence() = 0; 1585 1586 /** 1587 * @brief Used to generate stack frame for suspend path of special methods. 1588 */ 1589 virtual void GenSpecialEntryForSuspend() = 0; 1590 1591 /** 1592 * @brief Used to pop the stack frame for suspend path of special methods. 1593 */ 1594 virtual void GenSpecialExitForSuspend() = 0; 1595 1596 /** 1597 * @brief Used to generate code for special methods that are known to be 1598 * small enough to work in frameless mode. 1599 * @param bb The basic block of the first MIR. 1600 * @param mir The first MIR of the special method. 1601 * @param special Information about the special method. 1602 * @return Returns whether or not this was handled successfully. Returns false 1603 * if caller should punt to normal MIR2LIR conversion. 1604 */ 1605 virtual bool GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special); 1606 1607 void ClobberBody(RegisterInfo* p); SetCurrentDexPc(DexOffset dexpc)1608 void SetCurrentDexPc(DexOffset dexpc) { 1609 current_dalvik_offset_ = dexpc; 1610 } 1611 1612 /** 1613 * @brief Used to lock register if argument at in_position was passed that way. 1614 * @details Does nothing if the argument is passed via stack. 1615 * @param in_position The argument number whose register to lock. 1616 */ 1617 void LockArg(size_t in_position); 1618 1619 /** 1620 * @brief Used to load VR argument to a physical register. 1621 * @details The load is only done if the argument is not already in physical register. 1622 * LockArg must have been previously called. 1623 * @param in_position The argument number to load. 1624 * @param wide Whether the argument is 64-bit or not. 1625 * @return Returns the register (or register pair) for the loaded argument. 1626 */ 1627 RegStorage LoadArg(size_t in_position, RegisterClass reg_class, bool wide = false); 1628 1629 /** 1630 * @brief Used to load a VR argument directly to a specified register location. 1631 * @param in_position The argument number to place in register. 1632 * @param rl_dest The register location where to place argument. 1633 */ 1634 void LoadArgDirect(size_t in_position, RegLocation rl_dest); 1635 1636 /** 1637 * @brief Used to spill register if argument at in_position was passed that way. 1638 * @details Does nothing if the argument is passed via stack. 1639 * @param in_position The argument number whose register to spill. 1640 */ 1641 void SpillArg(size_t in_position); 1642 1643 /** 1644 * @brief Used to unspill register if argument at in_position was passed that way. 1645 * @details Does nothing if the argument is passed via stack. 1646 * @param in_position The argument number whose register to spill. 1647 */ 1648 void UnspillArg(size_t in_position); 1649 1650 /** 1651 * @brief Generate suspend test in a special method. 1652 */ 1653 SpecialSuspendCheckSlowPath* GenSpecialSuspendTest(); 1654 1655 /** 1656 * @brief Used to generate LIR for special getter method. 1657 * @param mir The mir that represents the iget. 1658 * @param special Information about the special getter method. 1659 * @return Returns whether LIR was successfully generated. 1660 */ 1661 bool GenSpecialIGet(MIR* mir, const InlineMethod& special); 1662 1663 /** 1664 * @brief Used to generate LIR for special setter method. 1665 * @param mir The mir that represents the iput. 1666 * @param special Information about the special setter method. 1667 * @return Returns whether LIR was successfully generated. 1668 */ 1669 bool GenSpecialIPut(MIR* mir, const InlineMethod& special); 1670 1671 /** 1672 * @brief Used to generate LIR for special return-args method. 1673 * @param mir The mir that represents the return of argument. 1674 * @param special Information about the special return-args method. 1675 * @return Returns whether LIR was successfully generated. 1676 */ 1677 bool GenSpecialIdentity(MIR* mir, const InlineMethod& special); 1678 1679 /** 1680 * @brief Generate code to check if result is null and, if it is, call helper to load it. 1681 * @param r_result the result register. 1682 * @param trampoline the helper to call in slow path. 1683 * @param imm the immediate passed to the helper. 1684 */ 1685 void GenIfNullUseHelperImm(RegStorage r_result, QuickEntrypointEnum trampoline, int imm); 1686 1687 /** 1688 * @brief Generate code to retrieve Class* for another type to be used by SGET/SPUT. 1689 * @param field_info information about the field to be accessed. 1690 * @param opt_flags the optimization flags of the MIR. 1691 */ 1692 RegStorage GenGetOtherTypeForSgetSput(const MirSFieldLoweringInfo& field_info, int opt_flags); 1693 1694 void AddDivZeroCheckSlowPath(LIR* branch); 1695 1696 // Copy arg0 and arg1 to kArg0 and kArg1 safely, possibly using 1697 // kArg2 as temp. 1698 virtual void CopyToArgumentRegs(RegStorage arg0, RegStorage arg1); 1699 1700 /** 1701 * @brief Load Constant into RegLocation 1702 * @param rl_dest Destination RegLocation 1703 * @param value Constant value 1704 */ 1705 virtual void GenConst(RegLocation rl_dest, int value); 1706 1707 /** 1708 * Returns true iff wide GPRs are just different views on the same physical register. 1709 */ 1710 virtual bool WideGPRsAreAliases() const = 0; 1711 1712 /** 1713 * Returns true iff wide FPRs are just different views on the same physical register. 1714 */ 1715 virtual bool WideFPRsAreAliases() const = 0; 1716 1717 1718 enum class WidenessCheck { // private 1719 kIgnoreWide, 1720 kCheckWide, 1721 kCheckNotWide 1722 }; 1723 1724 enum class RefCheck { // private 1725 kIgnoreRef, 1726 kCheckRef, 1727 kCheckNotRef 1728 }; 1729 1730 enum class FPCheck { // private 1731 kIgnoreFP, 1732 kCheckFP, 1733 kCheckNotFP 1734 }; 1735 1736 /** 1737 * Check whether a reg storage seems well-formed, that is, if a reg storage is valid, 1738 * that it has the expected form for the flags. 1739 * A flag value of 0 means ignore. A flag value of -1 means false. A flag value of 1 means true. 1740 */ 1741 void CheckRegStorageImpl(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp, bool fail, 1742 bool report) 1743 const; 1744 1745 /** 1746 * Check whether a reg location seems well-formed, that is, if a reg storage is encoded, 1747 * that it has the expected size. 1748 */ 1749 void CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const; 1750 1751 // See CheckRegStorageImpl. Will print or fail depending on kFailOnSizeError and 1752 // kReportSizeError. 1753 void CheckRegStorage(RegStorage rs, WidenessCheck wide, RefCheck ref, FPCheck fp) const; 1754 // See CheckRegLocationImpl. 1755 void CheckRegLocation(RegLocation rl) const; 1756 1757 // Find the references at the beginning of a basic block (for generating GC maps). 1758 void InitReferenceVRegs(BasicBlock* bb, BitVector* references); 1759 1760 // Update references from prev_mir to mir in the same BB. If mir is null or before 1761 // prev_mir, report failure (return false) and update references to the end of the BB. 1762 bool UpdateReferenceVRegsLocal(MIR* mir, MIR* prev_mir, BitVector* references); 1763 1764 // Update references from prev_mir to mir. 1765 void UpdateReferenceVRegs(MIR* mir, MIR* prev_mir, BitVector* references); 1766 1767 /** 1768 * Returns true if the frame spills the given core register. 1769 */ CoreSpillMaskContains(int reg)1770 bool CoreSpillMaskContains(int reg) { 1771 return (core_spill_mask_ & (1u << reg)) != 0; 1772 } 1773 1774 public: 1775 // TODO: add accessors for these. 1776 LIR* literal_list_; // Constants. 1777 LIR* method_literal_list_; // Method literals requiring patching. 1778 LIR* class_literal_list_; // Class literals requiring patching. 1779 LIR* code_literal_list_; // Code literals requiring patching. 1780 LIR* first_fixup_; // Doubly-linked list of LIR nodes requiring fixups. 1781 1782 protected: 1783 ArenaAllocator* const arena_; 1784 CompilationUnit* const cu_; 1785 MIRGraph* const mir_graph_; 1786 ArenaVector<SwitchTable*> switch_tables_; 1787 ArenaVector<FillArrayData*> fill_array_data_; 1788 ArenaVector<RegisterInfo*> tempreg_info_; 1789 ArenaVector<RegisterInfo*> reginfo_map_; 1790 ArenaVector<const void*> pointer_storage_; 1791 CodeOffset data_offset_; // starting offset of literal pool. 1792 size_t total_size_; // header + code size. 1793 LIR* block_label_list_; 1794 PromotionMap* promotion_map_; 1795 /* 1796 * TODO: The code generation utilities don't have a built-in 1797 * mechanism to propagate the original Dalvik opcode address to the 1798 * associated generated instructions. For the trace compiler, this wasn't 1799 * necessary because the interpreter handled all throws and debugging 1800 * requests. For now we'll handle this by placing the Dalvik offset 1801 * in the CompilationUnit struct before codegen for each instruction. 1802 * The low-level LIR creation utilites will pull it from here. Rework this. 1803 */ 1804 DexOffset current_dalvik_offset_; 1805 MIR* current_mir_; 1806 size_t estimated_native_code_size_; // Just an estimate; used to reserve code_buffer_ size. 1807 std::unique_ptr<RegisterPool> reg_pool_; 1808 /* 1809 * Sanity checking for the register temp tracking. The same ssa 1810 * name should never be associated with one temp register per 1811 * instruction compilation. 1812 */ 1813 int live_sreg_; 1814 CodeBuffer code_buffer_; 1815 // The source mapping table data (pc -> dex). More entries than in encoded_mapping_table_ 1816 DefaultSrcMap src_mapping_table_; 1817 // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix. 1818 ArenaVector<uint8_t> encoded_mapping_table_; 1819 ArenaVector<uint32_t> core_vmap_table_; 1820 ArenaVector<uint32_t> fp_vmap_table_; 1821 ArenaVector<uint8_t> native_gc_map_; 1822 ArenaVector<LinkerPatch> patches_; 1823 int num_core_spills_; 1824 int num_fp_spills_; 1825 int frame_size_; 1826 unsigned int core_spill_mask_; 1827 unsigned int fp_spill_mask_; 1828 LIR* first_lir_insn_; 1829 LIR* last_lir_insn_; 1830 1831 ArenaVector<LIRSlowPath*> slow_paths_; 1832 1833 // The memory reference type for new LIRs. 1834 // NOTE: Passing this as an explicit parameter by all functions that directly or indirectly 1835 // invoke RawLIR() would clutter the code and reduce the readability. 1836 ResourceMask::ResourceBit mem_ref_type_; 1837 1838 // Each resource mask now takes 16-bytes, so having both use/def masks directly in a LIR 1839 // would consume 32 bytes per LIR. Instead, the LIR now holds only pointers to the masks 1840 // (i.e. 8 bytes on 32-bit arch, 16 bytes on 64-bit arch) and we use ResourceMaskCache 1841 // to deduplicate the masks. 1842 ResourceMaskCache mask_cache_; 1843 1844 // Record the MIR that generated a given safepoint (null for prologue safepoints). 1845 ArenaVector<std::pair<LIR*, MIR*>> safepoints_; 1846 1847 // The layout of the cu_->dex_file's dex cache arrays for PC-relative addressing. 1848 const DexCacheArraysLayout dex_cache_arrays_layout_; 1849 1850 // For architectures that don't have true PC-relative addressing, we can promote 1851 // a PC of an instruction (or another PC-relative address such as a pointer to 1852 // the dex cache arrays if supported) to a register. This is indicated to the 1853 // register promotion by allocating a backend temp. 1854 CompilerTemp* pc_rel_temp_; 1855 1856 // For architectures that don't have true PC-relative addressing (see pc_rel_temp_ 1857 // above) and also have a limited range of offsets for loads, it's be useful to 1858 // know the minimum offset into the dex cache arrays, so we calculate that as well 1859 // if pc_rel_temp_ isn't null. 1860 uint32_t dex_cache_arrays_min_offset_; 1861 1862 dwarf::LazyDebugFrameOpCodeWriter cfi_; 1863 1864 // ABI support 1865 class ShortyArg { 1866 public: ShortyArg(char type)1867 explicit ShortyArg(char type) : type_(type) { } IsFP()1868 bool IsFP() { return type_ == 'F' || type_ == 'D'; } IsWide()1869 bool IsWide() { return type_ == 'J' || type_ == 'D'; } IsRef()1870 bool IsRef() { return type_ == 'L'; } GetType()1871 char GetType() { return type_; } 1872 private: 1873 char type_; 1874 }; 1875 1876 class ShortyIterator { 1877 public: 1878 ShortyIterator(const char* shorty, bool is_static); 1879 bool Next(); GetArg()1880 ShortyArg GetArg() { return ShortyArg(pending_this_ ? 'L' : *cur_); } 1881 private: 1882 const char* cur_; 1883 bool pending_this_; 1884 bool initialized_; 1885 }; 1886 1887 class InToRegStorageMapper { 1888 public: 1889 virtual RegStorage GetNextReg(ShortyArg arg) = 0; ~InToRegStorageMapper()1890 virtual ~InToRegStorageMapper() {} 1891 virtual void Reset() = 0; 1892 }; 1893 1894 class InToRegStorageMapping { 1895 public: InToRegStorageMapping(ArenaAllocator * arena)1896 explicit InToRegStorageMapping(ArenaAllocator* arena) 1897 : mapping_(arena->Adapter()), 1898 end_mapped_in_(0u), has_arguments_on_stack_(false), initialized_(false) {} 1899 void Initialize(ShortyIterator* shorty, InToRegStorageMapper* mapper); 1900 /** 1901 * @return the past-the-end index of VRs mapped to physical registers. 1902 * In other words any VR starting from this index is mapped to memory. 1903 */ GetEndMappedIn()1904 size_t GetEndMappedIn() { return end_mapped_in_; } HasArgumentsOnStack()1905 bool HasArgumentsOnStack() { return has_arguments_on_stack_; } 1906 RegStorage GetReg(size_t in_position); 1907 ShortyArg GetShorty(size_t in_position); IsInitialized()1908 bool IsInitialized() { return initialized_; } 1909 private: 1910 static constexpr char kInvalidShorty = '-'; 1911 ArenaVector<std::pair<ShortyArg, RegStorage>> mapping_; 1912 size_t end_mapped_in_; 1913 bool has_arguments_on_stack_; 1914 bool initialized_; 1915 }; 1916 1917 // Cached mapping of method input to reg storage according to ABI. 1918 InToRegStorageMapping in_to_reg_storage_mapping_; 1919 virtual InToRegStorageMapper* GetResetedInToRegStorageMapper() = 0; 1920 1921 private: 1922 static bool SizeMatchesTypeForEntrypoint(OpSize size, Primitive::Type type); 1923 1924 friend class QuickCFITest; 1925 }; // Class Mir2Lir 1926 1927 } // namespace art 1928 1929 #endif // ART_COMPILER_DEX_QUICK_MIR_TO_LIR_H_ 1930