1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "assembler_arm.h"
18 
19 #include <algorithm>
20 
21 #include "base/bit_utils.h"
22 #include "base/logging.h"
23 #include "entrypoints/quick/quick_entrypoints.h"
24 #include "offsets.h"
25 #include "thread.h"
26 
27 namespace art {
28 namespace arm {
29 
30 const char* kRegisterNames[] = {
31   "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
32   "fp", "ip", "sp", "lr", "pc"
33 };
34 
35 const char* kConditionNames[] = {
36   "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT",
37   "LE", "AL",
38 };
39 
operator <<(std::ostream & os,const Register & rhs)40 std::ostream& operator<<(std::ostream& os, const Register& rhs) {
41   if (rhs >= R0 && rhs <= PC) {
42     os << kRegisterNames[rhs];
43   } else {
44     os << "Register[" << static_cast<int>(rhs) << "]";
45   }
46   return os;
47 }
48 
49 
operator <<(std::ostream & os,const SRegister & rhs)50 std::ostream& operator<<(std::ostream& os, const SRegister& rhs) {
51   if (rhs >= S0 && rhs < kNumberOfSRegisters) {
52     os << "s" << static_cast<int>(rhs);
53   } else {
54     os << "SRegister[" << static_cast<int>(rhs) << "]";
55   }
56   return os;
57 }
58 
59 
operator <<(std::ostream & os,const DRegister & rhs)60 std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
61   if (rhs >= D0 && rhs < kNumberOfDRegisters) {
62     os << "d" << static_cast<int>(rhs);
63   } else {
64     os << "DRegister[" << static_cast<int>(rhs) << "]";
65   }
66   return os;
67 }
68 
operator <<(std::ostream & os,const Condition & rhs)69 std::ostream& operator<<(std::ostream& os, const Condition& rhs) {
70   if (rhs >= EQ && rhs <= AL) {
71     os << kConditionNames[rhs];
72   } else {
73     os << "Condition[" << static_cast<int>(rhs) << "]";
74   }
75   return os;
76 }
77 
ShifterOperand(uint32_t immed)78 ShifterOperand::ShifterOperand(uint32_t immed)
79     : type_(kImmediate), rm_(kNoRegister), rs_(kNoRegister),
80       is_rotate_(false), is_shift_(false), shift_(kNoShift), rotate_(0), immed_(immed) {
81   CHECK(immed < (1u << 12) || ArmAssembler::ModifiedImmediate(immed) != kInvalidModifiedImmediate);
82 }
83 
84 
encodingArm() const85 uint32_t ShifterOperand::encodingArm() const {
86   CHECK(is_valid());
87   switch (type_) {
88     case kImmediate:
89       if (is_rotate_) {
90         return (rotate_ << kRotateShift) | (immed_ << kImmed8Shift);
91       } else {
92         return immed_;
93       }
94     case kRegister:
95       if (is_shift_) {
96         uint32_t shift_type;
97         switch (shift_) {
98           case arm::Shift::ROR:
99             shift_type = static_cast<uint32_t>(shift_);
100             CHECK_NE(immed_, 0U);
101             break;
102           case arm::Shift::RRX:
103             shift_type = static_cast<uint32_t>(arm::Shift::ROR);  // Same encoding as ROR.
104             CHECK_EQ(immed_, 0U);
105             break;
106           default:
107             shift_type = static_cast<uint32_t>(shift_);
108         }
109         // Shifted immediate or register.
110         if (rs_ == kNoRegister) {
111           // Immediate shift.
112           return immed_ << kShiftImmShift |
113                           shift_type << kShiftShift |
114                           static_cast<uint32_t>(rm_);
115         } else {
116           // Register shift.
117           return static_cast<uint32_t>(rs_) << kShiftRegisterShift |
118               shift_type << kShiftShift | (1 << 4) |
119               static_cast<uint32_t>(rm_);
120         }
121       } else {
122         // Simple register
123         return static_cast<uint32_t>(rm_);
124       }
125     default:
126       // Can't get here.
127       LOG(FATAL) << "Invalid shifter operand for ARM";
128       return 0;
129   }
130 }
131 
encodingThumb() const132 uint32_t ShifterOperand::encodingThumb() const {
133   switch (type_) {
134     case kImmediate:
135       return immed_;
136     case kRegister:
137       if (is_shift_) {
138         // Shifted immediate or register.
139         if (rs_ == kNoRegister) {
140           // Immediate shift.
141           if (shift_ == RRX) {
142             DCHECK_EQ(immed_, 0u);
143             // RRX is encoded as an ROR with imm 0.
144             return ROR << 4 | static_cast<uint32_t>(rm_);
145           } else {
146             DCHECK((1 <= immed_ && immed_ <= 31) ||
147                    (immed_ == 0u && shift_ == LSL) ||
148                    (immed_ == 32u && (shift_ == ASR || shift_ == LSR)));
149             uint32_t imm3 = (immed_ >> 2) & 7 /* 0b111*/;
150             uint32_t imm2 = immed_ & 3U /* 0b11 */;
151 
152             return imm3 << 12 | imm2 << 6 | shift_ << 4 |
153                 static_cast<uint32_t>(rm_);
154           }
155         } else {
156           LOG(FATAL) << "No register-shifted register instruction available in thumb";
157           return 0;
158         }
159       } else {
160         // Simple register
161         return static_cast<uint32_t>(rm_);
162       }
163     default:
164       // Can't get here.
165       LOG(FATAL) << "Invalid shifter operand for thumb";
166       UNREACHABLE();
167   }
168 }
169 
encodingArm() const170 uint32_t Address::encodingArm() const {
171   CHECK(IsAbsoluteUint<12>(offset_));
172   uint32_t encoding;
173   if (is_immed_offset_) {
174     if (offset_ < 0) {
175       encoding = (am_ ^ (1 << kUShift)) | -offset_;  // Flip U to adjust sign.
176     } else {
177       encoding =  am_ | offset_;
178     }
179   } else {
180     uint32_t shift = shift_;
181     if (shift == RRX) {
182       CHECK_EQ(offset_, 0);
183       shift = ROR;
184     }
185     encoding = am_ | static_cast<uint32_t>(rm_) | shift << 5 | offset_ << 7 | B25;
186   }
187   encoding |= static_cast<uint32_t>(rn_) << kRnShift;
188   return encoding;
189 }
190 
191 
encodingThumb(bool is_32bit) const192 uint32_t Address::encodingThumb(bool is_32bit) const {
193   uint32_t encoding = 0;
194   if (is_immed_offset_) {
195     encoding = static_cast<uint32_t>(rn_) << 16;
196     // Check for the T3/T4 encoding.
197     // PUW must Offset for T3
198     // Convert ARM PU0W to PUW
199     // The Mode is in ARM encoding format which is:
200     // |P|U|0|W|
201     // we need this in thumb2 mode:
202     // |P|U|W|
203 
204     uint32_t am = am_;
205     int32_t offset = offset_;
206     if (offset < 0) {
207       am ^= 1 << kUShift;
208       offset = -offset;
209     }
210     if (offset_ < 0 || (offset >= 0 && offset < 256 &&
211         am_ != Mode::Offset)) {
212       // T4 encoding.
213       uint32_t PUW = am >> 21;   // Move down to bottom of word.
214       PUW = (PUW >> 1) | (PUW & 1);   // Bits 3, 2 and 0.
215       // If P is 0 then W must be 1 (Different from ARM).
216       if ((PUW & 4U /* 0b100 */) == 0) {
217         PUW |= 1U /* 0b1 */;
218       }
219       encoding |= B11 | PUW << 8 | offset;
220     } else {
221       // T3 encoding (also sets op1 to 0b01).
222       encoding |= B23 | offset_;
223     }
224   } else {
225     // Register offset, possibly shifted.
226     // Need to choose between encoding T1 (16 bit) or T2.
227     // Only Offset mode is supported.  Shift must be LSL and the count
228     // is only 2 bits.
229     CHECK_EQ(shift_, LSL);
230     CHECK_LE(offset_, 4);
231     CHECK_EQ(am_, Offset);
232     bool is_t2 = is_32bit;
233     if (ArmAssembler::IsHighRegister(rn_) || ArmAssembler::IsHighRegister(rm_)) {
234       is_t2 = true;
235     } else if (offset_ != 0) {
236       is_t2 = true;
237     }
238     if (is_t2) {
239       encoding = static_cast<uint32_t>(rn_) << 16 | static_cast<uint32_t>(rm_) |
240           offset_ << 4;
241     } else {
242       encoding = static_cast<uint32_t>(rn_) << 3 | static_cast<uint32_t>(rm_) << 6;
243     }
244   }
245   return encoding;
246 }
247 
248 // This is very like the ARM encoding except the offset is 10 bits.
encodingThumbLdrdStrd() const249 uint32_t Address::encodingThumbLdrdStrd() const {
250   DCHECK(IsImmediate());
251   uint32_t encoding;
252   uint32_t am = am_;
253   // If P is 0 then W must be 1 (Different from ARM).
254   uint32_t PU1W = am_ >> 21;   // Move down to bottom of word.
255   if ((PU1W & 8U /* 0b1000 */) == 0) {
256     am |= 1 << 21;      // Set W bit.
257   }
258   if (offset_ < 0) {
259     int32_t off = -offset_;
260     CHECK_LT(off, 1024);
261     CHECK_ALIGNED(off, 4);
262     encoding = (am ^ (1 << kUShift)) | off >> 2;  // Flip U to adjust sign.
263   } else {
264     CHECK_LT(offset_, 1024);
265     CHECK_ALIGNED(offset_, 4);
266     encoding =  am | offset_ >> 2;
267   }
268   encoding |= static_cast<uint32_t>(rn_) << 16;
269   return encoding;
270 }
271 
272 // Encoding for ARM addressing mode 3.
encoding3() const273 uint32_t Address::encoding3() const {
274   const uint32_t offset_mask = (1 << 12) - 1;
275   uint32_t encoding = encodingArm();
276   uint32_t offset = encoding & offset_mask;
277   CHECK_LT(offset, 256u);
278   return (encoding & ~offset_mask) | ((offset & 0xf0) << 4) | (offset & 0xf);
279 }
280 
281 // Encoding for vfp load/store addressing.
vencoding() const282 uint32_t Address::vencoding() const {
283   CHECK(IsAbsoluteUint<10>(offset_));  // In the range -1020 to +1020.
284   CHECK_ALIGNED(offset_, 2);  // Multiple of 4.
285 
286   const uint32_t offset_mask = (1 << 12) - 1;
287   uint32_t encoding = encodingArm();
288   uint32_t offset = encoding & offset_mask;
289   CHECK((am_ == Offset) || (am_ == NegOffset));
290   uint32_t vencoding_value = (encoding & (0xf << kRnShift)) | (offset >> 2);
291   if (am_ == Offset) {
292     vencoding_value |= 1 << 23;
293   }
294   return vencoding_value;
295 }
296 
297 
CanHoldLoadOffsetArm(LoadOperandType type,int offset)298 bool Address::CanHoldLoadOffsetArm(LoadOperandType type, int offset) {
299   switch (type) {
300     case kLoadSignedByte:
301     case kLoadSignedHalfword:
302     case kLoadUnsignedHalfword:
303     case kLoadWordPair:
304       return IsAbsoluteUint<8>(offset);  // Addressing mode 3.
305     case kLoadUnsignedByte:
306     case kLoadWord:
307       return IsAbsoluteUint<12>(offset);  // Addressing mode 2.
308     case kLoadSWord:
309     case kLoadDWord:
310       return IsAbsoluteUint<10>(offset);  // VFP addressing mode.
311     default:
312       LOG(FATAL) << "UNREACHABLE";
313       UNREACHABLE();
314   }
315 }
316 
317 
CanHoldStoreOffsetArm(StoreOperandType type,int offset)318 bool Address::CanHoldStoreOffsetArm(StoreOperandType type, int offset) {
319   switch (type) {
320     case kStoreHalfword:
321     case kStoreWordPair:
322       return IsAbsoluteUint<8>(offset);  // Addressing mode 3.
323     case kStoreByte:
324     case kStoreWord:
325       return IsAbsoluteUint<12>(offset);  // Addressing mode 2.
326     case kStoreSWord:
327     case kStoreDWord:
328       return IsAbsoluteUint<10>(offset);  // VFP addressing mode.
329     default:
330       LOG(FATAL) << "UNREACHABLE";
331       UNREACHABLE();
332   }
333 }
334 
CanHoldLoadOffsetThumb(LoadOperandType type,int offset)335 bool Address::CanHoldLoadOffsetThumb(LoadOperandType type, int offset) {
336   switch (type) {
337     case kLoadSignedByte:
338     case kLoadSignedHalfword:
339     case kLoadUnsignedHalfword:
340     case kLoadUnsignedByte:
341     case kLoadWord:
342       return IsAbsoluteUint<12>(offset);
343     case kLoadSWord:
344     case kLoadDWord:
345       return IsAbsoluteUint<10>(offset) && (offset & 3) == 0;  // VFP addressing mode.
346     case kLoadWordPair:
347       return IsAbsoluteUint<10>(offset) && (offset & 3) == 0;
348     default:
349       LOG(FATAL) << "UNREACHABLE";
350       UNREACHABLE();
351   }
352 }
353 
354 
CanHoldStoreOffsetThumb(StoreOperandType type,int offset)355 bool Address::CanHoldStoreOffsetThumb(StoreOperandType type, int offset) {
356   switch (type) {
357     case kStoreHalfword:
358     case kStoreByte:
359     case kStoreWord:
360       return IsAbsoluteUint<12>(offset);
361     case kStoreSWord:
362     case kStoreDWord:
363       return IsAbsoluteUint<10>(offset) && (offset & 3) == 0;  // VFP addressing mode.
364     case kStoreWordPair:
365       return IsAbsoluteUint<10>(offset) && (offset & 3) == 0;
366     default:
367       LOG(FATAL) << "UNREACHABLE";
368       UNREACHABLE();
369   }
370 }
371 
Pad(uint32_t bytes)372 void ArmAssembler::Pad(uint32_t bytes) {
373   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
374   for (uint32_t i = 0; i < bytes; ++i) {
375     buffer_.Emit<uint8_t>(0);
376   }
377 }
378 
DWARFReg(Register reg)379 static dwarf::Reg DWARFReg(Register reg) {
380   return dwarf::Reg::ArmCore(static_cast<int>(reg));
381 }
382 
DWARFReg(SRegister reg)383 static dwarf::Reg DWARFReg(SRegister reg) {
384   return dwarf::Reg::ArmFp(static_cast<int>(reg));
385 }
386 
387 constexpr size_t kFramePointerSize = kArmPointerSize;
388 
BuildFrame(size_t frame_size,ManagedRegister method_reg,const std::vector<ManagedRegister> & callee_save_regs,const ManagedRegisterEntrySpills & entry_spills)389 void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
390                               const std::vector<ManagedRegister>& callee_save_regs,
391                               const ManagedRegisterEntrySpills& entry_spills) {
392   CHECK_EQ(buffer_.Size(), 0U);  // Nothing emitted yet
393   CHECK_ALIGNED(frame_size, kStackAlignment);
394   CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
395 
396   // Push callee saves and link register.
397   RegList core_spill_mask = 1 << LR;
398   uint32_t fp_spill_mask = 0;
399   for (const ManagedRegister& reg : callee_save_regs) {
400     if (reg.AsArm().IsCoreRegister()) {
401       core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
402     } else {
403       fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
404     }
405   }
406   PushList(core_spill_mask);
407   cfi_.AdjustCFAOffset(POPCOUNT(core_spill_mask) * kFramePointerSize);
408   cfi_.RelOffsetForMany(DWARFReg(Register(0)), 0, core_spill_mask, kFramePointerSize);
409   if (fp_spill_mask != 0) {
410     vpushs(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
411     cfi_.AdjustCFAOffset(POPCOUNT(fp_spill_mask) * kFramePointerSize);
412     cfi_.RelOffsetForMany(DWARFReg(SRegister(0)), 0, fp_spill_mask, kFramePointerSize);
413   }
414 
415   // Increase frame to required size.
416   int pushed_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
417   CHECK_GT(frame_size, pushed_values * kFramePointerSize);  // Must at least have space for Method*.
418   IncreaseFrameSize(frame_size - pushed_values * kFramePointerSize);  // handles CFI as well.
419 
420   // Write out Method*.
421   StoreToOffset(kStoreWord, R0, SP, 0);
422 
423   // Write out entry spills.
424   int32_t offset = frame_size + kFramePointerSize;
425   for (size_t i = 0; i < entry_spills.size(); ++i) {
426     ArmManagedRegister reg = entry_spills.at(i).AsArm();
427     if (reg.IsNoRegister()) {
428       // only increment stack offset.
429       ManagedRegisterSpill spill = entry_spills.at(i);
430       offset += spill.getSize();
431     } else if (reg.IsCoreRegister()) {
432       StoreToOffset(kStoreWord, reg.AsCoreRegister(), SP, offset);
433       offset += 4;
434     } else if (reg.IsSRegister()) {
435       StoreSToOffset(reg.AsSRegister(), SP, offset);
436       offset += 4;
437     } else if (reg.IsDRegister()) {
438       StoreDToOffset(reg.AsDRegister(), SP, offset);
439       offset += 8;
440     }
441   }
442 }
443 
RemoveFrame(size_t frame_size,const std::vector<ManagedRegister> & callee_save_regs)444 void ArmAssembler::RemoveFrame(size_t frame_size,
445                               const std::vector<ManagedRegister>& callee_save_regs) {
446   CHECK_ALIGNED(frame_size, kStackAlignment);
447   cfi_.RememberState();
448 
449   // Compute callee saves to pop and PC.
450   RegList core_spill_mask = 1 << PC;
451   uint32_t fp_spill_mask = 0;
452   for (const ManagedRegister& reg : callee_save_regs) {
453     if (reg.AsArm().IsCoreRegister()) {
454       core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
455     } else {
456       fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
457     }
458   }
459 
460   // Decrease frame to start of callee saves.
461   int pop_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
462   CHECK_GT(frame_size, pop_values * kFramePointerSize);
463   DecreaseFrameSize(frame_size - (pop_values * kFramePointerSize));  // handles CFI as well.
464 
465   if (fp_spill_mask != 0) {
466     vpops(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
467     cfi_.AdjustCFAOffset(-kFramePointerSize * POPCOUNT(fp_spill_mask));
468     cfi_.RestoreMany(DWARFReg(SRegister(0)), fp_spill_mask);
469   }
470 
471   // Pop callee saves and PC.
472   PopList(core_spill_mask);
473 
474   // The CFI should be restored for any code that follows the exit block.
475   cfi_.RestoreState();
476   cfi_.DefCFAOffset(frame_size);
477 }
478 
IncreaseFrameSize(size_t adjust)479 void ArmAssembler::IncreaseFrameSize(size_t adjust) {
480   AddConstant(SP, -adjust);
481   cfi_.AdjustCFAOffset(adjust);
482 }
483 
DecreaseFrameSize(size_t adjust)484 void ArmAssembler::DecreaseFrameSize(size_t adjust) {
485   AddConstant(SP, adjust);
486   cfi_.AdjustCFAOffset(-adjust);
487 }
488 
Store(FrameOffset dest,ManagedRegister msrc,size_t size)489 void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
490   ArmManagedRegister src = msrc.AsArm();
491   if (src.IsNoRegister()) {
492     CHECK_EQ(0u, size);
493   } else if (src.IsCoreRegister()) {
494     CHECK_EQ(4u, size);
495     StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
496   } else if (src.IsRegisterPair()) {
497     CHECK_EQ(8u, size);
498     StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
499     StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
500                   SP, dest.Int32Value() + 4);
501   } else if (src.IsSRegister()) {
502     StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
503   } else {
504     CHECK(src.IsDRegister()) << src;
505     StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
506   }
507 }
508 
StoreRef(FrameOffset dest,ManagedRegister msrc)509 void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
510   ArmManagedRegister src = msrc.AsArm();
511   CHECK(src.IsCoreRegister()) << src;
512   StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
513 }
514 
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)515 void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
516   ArmManagedRegister src = msrc.AsArm();
517   CHECK(src.IsCoreRegister()) << src;
518   StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
519 }
520 
StoreSpanning(FrameOffset dest,ManagedRegister msrc,FrameOffset in_off,ManagedRegister mscratch)521 void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
522                               FrameOffset in_off, ManagedRegister mscratch) {
523   ArmManagedRegister src = msrc.AsArm();
524   ArmManagedRegister scratch = mscratch.AsArm();
525   StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
526   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
527   StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
528 }
529 
CopyRef(FrameOffset dest,FrameOffset src,ManagedRegister mscratch)530 void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
531                         ManagedRegister mscratch) {
532   ArmManagedRegister scratch = mscratch.AsArm();
533   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
534   StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
535 }
536 
LoadRef(ManagedRegister mdest,ManagedRegister base,MemberOffset offs,bool unpoison_reference)537 void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
538                            bool unpoison_reference) {
539   ArmManagedRegister dst = mdest.AsArm();
540   CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
541   LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
542                  base.AsArm().AsCoreRegister(), offs.Int32Value());
543   if (unpoison_reference) {
544     MaybeUnpoisonHeapReference(dst.AsCoreRegister());
545   }
546 }
547 
LoadRef(ManagedRegister mdest,FrameOffset src)548 void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset  src) {
549   ArmManagedRegister dst = mdest.AsArm();
550   CHECK(dst.IsCoreRegister()) << dst;
551   LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value());
552 }
553 
LoadRawPtr(ManagedRegister mdest,ManagedRegister base,Offset offs)554 void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
555                            Offset offs) {
556   ArmManagedRegister dst = mdest.AsArm();
557   CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
558   LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
559                  base.AsArm().AsCoreRegister(), offs.Int32Value());
560 }
561 
StoreImmediateToFrame(FrameOffset dest,uint32_t imm,ManagedRegister mscratch)562 void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
563                                       ManagedRegister mscratch) {
564   ArmManagedRegister scratch = mscratch.AsArm();
565   CHECK(scratch.IsCoreRegister()) << scratch;
566   LoadImmediate(scratch.AsCoreRegister(), imm);
567   StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
568 }
569 
StoreImmediateToThread32(ThreadOffset<4> dest,uint32_t imm,ManagedRegister mscratch)570 void ArmAssembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
571                                        ManagedRegister mscratch) {
572   ArmManagedRegister scratch = mscratch.AsArm();
573   CHECK(scratch.IsCoreRegister()) << scratch;
574   LoadImmediate(scratch.AsCoreRegister(), imm);
575   StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value());
576 }
577 
EmitLoad(ArmAssembler * assembler,ManagedRegister m_dst,Register src_register,int32_t src_offset,size_t size)578 static void EmitLoad(ArmAssembler* assembler, ManagedRegister m_dst,
579                      Register src_register, int32_t src_offset, size_t size) {
580   ArmManagedRegister dst = m_dst.AsArm();
581   if (dst.IsNoRegister()) {
582     CHECK_EQ(0u, size) << dst;
583   } else if (dst.IsCoreRegister()) {
584     CHECK_EQ(4u, size) << dst;
585     assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
586   } else if (dst.IsRegisterPair()) {
587     CHECK_EQ(8u, size) << dst;
588     assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
589     assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
590   } else if (dst.IsSRegister()) {
591     assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset);
592   } else {
593     CHECK(dst.IsDRegister()) << dst;
594     assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
595   }
596 }
597 
Load(ManagedRegister m_dst,FrameOffset src,size_t size)598 void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
599   return EmitLoad(this, m_dst, SP, src.Int32Value(), size);
600 }
601 
LoadFromThread32(ManagedRegister m_dst,ThreadOffset<4> src,size_t size)602 void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset<4> src, size_t size) {
603   return EmitLoad(this, m_dst, TR, src.Int32Value(), size);
604 }
605 
LoadRawPtrFromThread32(ManagedRegister m_dst,ThreadOffset<4> offs)606 void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset<4> offs) {
607   ArmManagedRegister dst = m_dst.AsArm();
608   CHECK(dst.IsCoreRegister()) << dst;
609   LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
610 }
611 
CopyRawPtrFromThread32(FrameOffset fr_offs,ThreadOffset<4> thr_offs,ManagedRegister mscratch)612 void ArmAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
613                                         ThreadOffset<4> thr_offs,
614                                         ManagedRegister mscratch) {
615   ArmManagedRegister scratch = mscratch.AsArm();
616   CHECK(scratch.IsCoreRegister()) << scratch;
617   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
618                  TR, thr_offs.Int32Value());
619   StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
620                 SP, fr_offs.Int32Value());
621 }
622 
CopyRawPtrToThread32(ThreadOffset<4> thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)623 void ArmAssembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
624                                       FrameOffset fr_offs,
625                                       ManagedRegister mscratch) {
626   ArmManagedRegister scratch = mscratch.AsArm();
627   CHECK(scratch.IsCoreRegister()) << scratch;
628   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
629                  SP, fr_offs.Int32Value());
630   StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
631                 TR, thr_offs.Int32Value());
632 }
633 
StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)634 void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
635                                             FrameOffset fr_offs,
636                                             ManagedRegister mscratch) {
637   ArmManagedRegister scratch = mscratch.AsArm();
638   CHECK(scratch.IsCoreRegister()) << scratch;
639   AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
640   StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
641                 TR, thr_offs.Int32Value());
642 }
643 
StoreStackPointerToThread32(ThreadOffset<4> thr_offs)644 void ArmAssembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
645   StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
646 }
647 
SignExtend(ManagedRegister,size_t)648 void ArmAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
649   UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
650 }
651 
ZeroExtend(ManagedRegister,size_t)652 void ArmAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
653   UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
654 }
655 
Move(ManagedRegister m_dst,ManagedRegister m_src,size_t)656 void ArmAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) {
657   ArmManagedRegister dst = m_dst.AsArm();
658   ArmManagedRegister src = m_src.AsArm();
659   if (!dst.Equals(src)) {
660     if (dst.IsCoreRegister()) {
661       CHECK(src.IsCoreRegister()) << src;
662       mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
663     } else if (dst.IsDRegister()) {
664       CHECK(src.IsDRegister()) << src;
665       vmovd(dst.AsDRegister(), src.AsDRegister());
666     } else if (dst.IsSRegister()) {
667       CHECK(src.IsSRegister()) << src;
668       vmovs(dst.AsSRegister(), src.AsSRegister());
669     } else {
670       CHECK(dst.IsRegisterPair()) << dst;
671       CHECK(src.IsRegisterPair()) << src;
672       // Ensure that the first move doesn't clobber the input of the second.
673       if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
674         mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
675         mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
676       } else {
677         mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
678         mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
679       }
680     }
681   }
682 }
683 
Copy(FrameOffset dest,FrameOffset src,ManagedRegister mscratch,size_t size)684 void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
685   ArmManagedRegister scratch = mscratch.AsArm();
686   CHECK(scratch.IsCoreRegister()) << scratch;
687   CHECK(size == 4 || size == 8) << size;
688   if (size == 4) {
689     LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
690     StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
691   } else if (size == 8) {
692     LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
693     StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
694     LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
695     StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
696   }
697 }
698 
Copy(FrameOffset dest,ManagedRegister src_base,Offset src_offset,ManagedRegister mscratch,size_t size)699 void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
700                         ManagedRegister mscratch, size_t size) {
701   Register scratch = mscratch.AsArm().AsCoreRegister();
702   CHECK_EQ(size, 4u);
703   LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
704   StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
705 }
706 
Copy(ManagedRegister dest_base,Offset dest_offset,FrameOffset src,ManagedRegister mscratch,size_t size)707 void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
708                         ManagedRegister mscratch, size_t size) {
709   Register scratch = mscratch.AsArm().AsCoreRegister();
710   CHECK_EQ(size, 4u);
711   LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
712   StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value());
713 }
714 
Copy(FrameOffset,FrameOffset,Offset,ManagedRegister,size_t)715 void ArmAssembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
716                         ManagedRegister /*mscratch*/, size_t /*size*/) {
717   UNIMPLEMENTED(FATAL);
718 }
719 
Copy(ManagedRegister dest,Offset dest_offset,ManagedRegister src,Offset src_offset,ManagedRegister mscratch,size_t size)720 void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset,
721                         ManagedRegister src, Offset src_offset,
722                         ManagedRegister mscratch, size_t size) {
723   CHECK_EQ(size, 4u);
724   Register scratch = mscratch.AsArm().AsCoreRegister();
725   LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
726   StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
727 }
728 
Copy(FrameOffset,Offset,FrameOffset,Offset,ManagedRegister,size_t)729 void ArmAssembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
730                         ManagedRegister /*scratch*/, size_t /*size*/) {
731   UNIMPLEMENTED(FATAL);
732 }
733 
CreateHandleScopeEntry(ManagedRegister mout_reg,FrameOffset handle_scope_offset,ManagedRegister min_reg,bool null_allowed)734 void ArmAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
735                                    FrameOffset handle_scope_offset,
736                                    ManagedRegister min_reg, bool null_allowed) {
737   ArmManagedRegister out_reg = mout_reg.AsArm();
738   ArmManagedRegister in_reg = min_reg.AsArm();
739   CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
740   CHECK(out_reg.IsCoreRegister()) << out_reg;
741   if (null_allowed) {
742     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
743     // the address in the handle scope holding the reference.
744     // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
745     if (in_reg.IsNoRegister()) {
746       LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
747                      SP, handle_scope_offset.Int32Value());
748       in_reg = out_reg;
749     }
750     cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
751     if (!out_reg.Equals(in_reg)) {
752       it(EQ, kItElse);
753       LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
754     } else {
755       it(NE);
756     }
757     AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
758   } else {
759     AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
760   }
761 }
762 
CreateHandleScopeEntry(FrameOffset out_off,FrameOffset handle_scope_offset,ManagedRegister mscratch,bool null_allowed)763 void ArmAssembler::CreateHandleScopeEntry(FrameOffset out_off,
764                                    FrameOffset handle_scope_offset,
765                                    ManagedRegister mscratch,
766                                    bool null_allowed) {
767   ArmManagedRegister scratch = mscratch.AsArm();
768   CHECK(scratch.IsCoreRegister()) << scratch;
769   if (null_allowed) {
770     LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
771                    handle_scope_offset.Int32Value());
772     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
773     // the address in the handle scope holding the reference.
774     // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
775     cmp(scratch.AsCoreRegister(), ShifterOperand(0));
776     it(NE);
777     AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
778   } else {
779     AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
780   }
781   StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
782 }
783 
LoadReferenceFromHandleScope(ManagedRegister mout_reg,ManagedRegister min_reg)784 void ArmAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
785                                          ManagedRegister min_reg) {
786   ArmManagedRegister out_reg = mout_reg.AsArm();
787   ArmManagedRegister in_reg = min_reg.AsArm();
788   CHECK(out_reg.IsCoreRegister()) << out_reg;
789   CHECK(in_reg.IsCoreRegister()) << in_reg;
790   Label null_arg;
791   if (!out_reg.Equals(in_reg)) {
792     LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);     // TODO: why EQ?
793   }
794   cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
795   it(NE);
796   LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
797                  in_reg.AsCoreRegister(), 0, NE);
798 }
799 
VerifyObject(ManagedRegister,bool)800 void ArmAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
801   // TODO: not validating references.
802 }
803 
VerifyObject(FrameOffset,bool)804 void ArmAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
805   // TODO: not validating references.
806 }
807 
Call(ManagedRegister mbase,Offset offset,ManagedRegister mscratch)808 void ArmAssembler::Call(ManagedRegister mbase, Offset offset,
809                         ManagedRegister mscratch) {
810   ArmManagedRegister base = mbase.AsArm();
811   ArmManagedRegister scratch = mscratch.AsArm();
812   CHECK(base.IsCoreRegister()) << base;
813   CHECK(scratch.IsCoreRegister()) << scratch;
814   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
815                  base.AsCoreRegister(), offset.Int32Value());
816   blx(scratch.AsCoreRegister());
817   // TODO: place reference map on call.
818 }
819 
Call(FrameOffset base,Offset offset,ManagedRegister mscratch)820 void ArmAssembler::Call(FrameOffset base, Offset offset,
821                         ManagedRegister mscratch) {
822   ArmManagedRegister scratch = mscratch.AsArm();
823   CHECK(scratch.IsCoreRegister()) << scratch;
824   // Call *(*(SP + base) + offset)
825   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
826                  SP, base.Int32Value());
827   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
828                  scratch.AsCoreRegister(), offset.Int32Value());
829   blx(scratch.AsCoreRegister());
830   // TODO: place reference map on call
831 }
832 
CallFromThread32(ThreadOffset<4>,ManagedRegister)833 void ArmAssembler::CallFromThread32(ThreadOffset<4> /*offset*/, ManagedRegister /*scratch*/) {
834   UNIMPLEMENTED(FATAL);
835 }
836 
GetCurrentThread(ManagedRegister tr)837 void ArmAssembler::GetCurrentThread(ManagedRegister tr) {
838   mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
839 }
840 
GetCurrentThread(FrameOffset offset,ManagedRegister)841 void ArmAssembler::GetCurrentThread(FrameOffset offset,
842                                     ManagedRegister /*scratch*/) {
843   StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
844 }
845 
ExceptionPoll(ManagedRegister mscratch,size_t stack_adjust)846 void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
847   ArmManagedRegister scratch = mscratch.AsArm();
848   ArmExceptionSlowPath* slow = new (GetArena()) ArmExceptionSlowPath(scratch, stack_adjust);
849   buffer_.EnqueueSlowPath(slow);
850   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
851                  TR, Thread::ExceptionOffset<4>().Int32Value());
852   cmp(scratch.AsCoreRegister(), ShifterOperand(0));
853   b(slow->Entry(), NE);
854 }
855 
Emit(Assembler * sasm)856 void ArmExceptionSlowPath::Emit(Assembler* sasm) {
857   ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
858 #define __ sp_asm->
859   __ Bind(&entry_);
860   if (stack_adjust_ != 0) {  // Fix up the frame.
861     __ DecreaseFrameSize(stack_adjust_);
862   }
863   // Pass exception object as argument.
864   // Don't care about preserving R0 as this call won't return.
865   __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
866   // Set up call to Thread::Current()->pDeliverException.
867   __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(4, pDeliverException).Int32Value());
868   __ blx(R12);
869 #undef __
870 }
871 
872 
LeadingZeros(uint32_t val)873 static int LeadingZeros(uint32_t val) {
874   uint32_t alt;
875   int32_t n;
876   int32_t count;
877 
878   count = 16;
879   n = 32;
880   do {
881     alt = val >> count;
882     if (alt != 0) {
883       n = n - count;
884       val = alt;
885     }
886     count >>= 1;
887   } while (count);
888   return n - val;
889 }
890 
891 
ModifiedImmediate(uint32_t value)892 uint32_t ArmAssembler::ModifiedImmediate(uint32_t value) {
893   int32_t z_leading;
894   int32_t z_trailing;
895   uint32_t b0 = value & 0xff;
896 
897   /* Note: case of value==0 must use 0:000:0:0000000 encoding */
898   if (value <= 0xFF)
899     return b0;  // 0:000:a:bcdefgh.
900   if (value == ((b0 << 16) | b0))
901     return (0x1 << 12) | b0; /* 0:001:a:bcdefgh */
902   if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0))
903     return (0x3 << 12) | b0; /* 0:011:a:bcdefgh */
904   b0 = (value >> 8) & 0xff;
905   if (value == ((b0 << 24) | (b0 << 8)))
906     return (0x2 << 12) | b0; /* 0:010:a:bcdefgh */
907   /* Can we do it with rotation? */
908   z_leading = LeadingZeros(value);
909   z_trailing = 32 - LeadingZeros(~value & (value - 1));
910   /* A run of eight or fewer active bits? */
911   if ((z_leading + z_trailing) < 24)
912     return kInvalidModifiedImmediate;  /* No - bail */
913   /* left-justify the constant, discarding msb (known to be 1) */
914   value <<= z_leading + 1;
915   /* Create bcdefgh */
916   value >>= 25;
917 
918   /* Put it all together */
919   uint32_t v = 8 + z_leading;
920 
921   uint32_t i = (v & 16U /* 0b10000 */) >> 4;
922   uint32_t imm3 = (v >> 1) & 7U /* 0b111 */;
923   uint32_t a = v & 1;
924   return value | i << 26 | imm3 << 12 | a << 7;
925 }
926 
FinalizeTrackedLabels()927 void ArmAssembler::FinalizeTrackedLabels() {
928   if (!tracked_labels_.empty()) {
929     // This array should be sorted, as assembly is generated in linearized order. It isn't
930     // technically required, but GetAdjustedPosition() used in AdjustLabelPosition() can take
931     // advantage of it. So ensure that it's actually the case.
932     DCHECK(std::is_sorted(
933         tracked_labels_.begin(),
934         tracked_labels_.end(),
935         [](const Label* lhs, const Label* rhs) { return lhs->Position() < rhs->Position(); }));
936 
937     Label* last_label = nullptr;  // Track duplicates, we must not adjust twice.
938     for (Label* label : tracked_labels_) {
939       DCHECK_NE(label, last_label);
940       AdjustLabelPosition(label);
941       last_label = label;
942     }
943   }
944 }
945 
946 }  // namespace arm
947 }  // namespace art
948