1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <iostream>
18 #include <type_traits>
19
20 #include "assembler_arm_vixl.h"
21 #include "base/bit_utils.h"
22 #include "base/bit_utils_iterator.h"
23 #include "entrypoints/quick/quick_entrypoints.h"
24 #include "heap_poisoning.h"
25 #include "thread.h"
26
27 using namespace vixl::aarch32; // NOLINT(build/namespaces)
28
29 namespace art {
30 namespace arm {
31
32 #ifdef ___
33 #error "ARM Assembler macro already defined."
34 #else
35 #define ___ vixl_masm_.
36 #endif
37
38 // Thread register definition.
39 extern const vixl32::Register tr(TR);
40 // Marking register definition.
41 extern const vixl32::Register mr(MR);
42
FinalizeCode()43 void ArmVIXLAssembler::FinalizeCode() {
44 vixl_masm_.FinalizeCode();
45 }
46
CodeSize() const47 size_t ArmVIXLAssembler::CodeSize() const {
48 return vixl_masm_.GetSizeOfCodeGenerated();
49 }
50
CodeBufferBaseAddress() const51 const uint8_t* ArmVIXLAssembler::CodeBufferBaseAddress() const {
52 return vixl_masm_.GetBuffer().GetStartAddress<const uint8_t*>();
53 }
54
FinalizeInstructions(const MemoryRegion & region)55 void ArmVIXLAssembler::FinalizeInstructions(const MemoryRegion& region) {
56 // Copy the instructions from the buffer.
57 MemoryRegion from(vixl_masm_.GetBuffer()->GetStartAddress<void*>(), CodeSize());
58 region.CopyFrom(0, from);
59 }
60
PoisonHeapReference(vixl::aarch32::Register reg)61 void ArmVIXLAssembler::PoisonHeapReference(vixl::aarch32::Register reg) {
62 // reg = -reg.
63 ___ Rsb(reg, reg, 0);
64 }
65
UnpoisonHeapReference(vixl::aarch32::Register reg)66 void ArmVIXLAssembler::UnpoisonHeapReference(vixl::aarch32::Register reg) {
67 // reg = -reg.
68 ___ Rsb(reg, reg, 0);
69 }
70
MaybePoisonHeapReference(vixl32::Register reg)71 void ArmVIXLAssembler::MaybePoisonHeapReference(vixl32::Register reg) {
72 if (kPoisonHeapReferences) {
73 PoisonHeapReference(reg);
74 }
75 }
76
MaybeUnpoisonHeapReference(vixl32::Register reg)77 void ArmVIXLAssembler::MaybeUnpoisonHeapReference(vixl32::Register reg) {
78 if (kPoisonHeapReferences) {
79 UnpoisonHeapReference(reg);
80 }
81 }
82
GenerateMarkingRegisterCheck(vixl32::Register temp,int code)83 void ArmVIXLAssembler::GenerateMarkingRegisterCheck(vixl32::Register temp, int code) {
84 // The Marking Register is only used in the Baker read barrier configuration.
85 DCHECK(kEmitCompilerReadBarrier);
86 DCHECK(kUseBakerReadBarrier);
87
88 vixl32::Label mr_is_ok;
89
90 // temp = self.tls32_.is.gc_marking
91 ___ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArmPointerSize>().Int32Value()));
92 // Check that mr == self.tls32_.is.gc_marking.
93 ___ Cmp(mr, temp);
94 ___ B(eq, &mr_is_ok, /* is_far_target= */ false);
95 ___ Bkpt(code);
96 ___ Bind(&mr_is_ok);
97 }
98
LoadImmediate(vixl32::Register rd,int32_t value)99 void ArmVIXLAssembler::LoadImmediate(vixl32::Register rd, int32_t value) {
100 // TODO(VIXL): Implement this optimization in VIXL.
101 if (!ShifterOperandCanAlwaysHold(value) && ShifterOperandCanAlwaysHold(~value)) {
102 ___ Mvn(rd, ~value);
103 } else {
104 ___ Mov(rd, value);
105 }
106 }
107
ShifterOperandCanAlwaysHold(uint32_t immediate)108 bool ArmVIXLAssembler::ShifterOperandCanAlwaysHold(uint32_t immediate) {
109 return vixl_masm_.IsModifiedImmediate(immediate);
110 }
111
ShifterOperandCanHold(Opcode opcode,uint32_t immediate,vixl::aarch32::FlagsUpdate update_flags)112 bool ArmVIXLAssembler::ShifterOperandCanHold(Opcode opcode,
113 uint32_t immediate,
114 vixl::aarch32::FlagsUpdate update_flags) {
115 switch (opcode) {
116 case ADD:
117 case SUB:
118 // Less than (or equal to) 12 bits can be done if we don't need to set condition codes.
119 if (IsUint<12>(immediate) && update_flags != vixl::aarch32::SetFlags) {
120 return true;
121 }
122 return ShifterOperandCanAlwaysHold(immediate);
123
124 case MOV:
125 // TODO: Support less than or equal to 12bits.
126 return ShifterOperandCanAlwaysHold(immediate);
127
128 case MVN:
129 default:
130 return ShifterOperandCanAlwaysHold(immediate);
131 }
132 }
133
CanSplitLoadStoreOffset(int32_t allowed_offset_bits,int32_t offset,int32_t * add_to_base,int32_t * offset_for_load_store)134 bool ArmVIXLAssembler::CanSplitLoadStoreOffset(int32_t allowed_offset_bits,
135 int32_t offset,
136 /*out*/ int32_t* add_to_base,
137 /*out*/ int32_t* offset_for_load_store) {
138 int32_t other_bits = offset & ~allowed_offset_bits;
139 if (ShifterOperandCanAlwaysHold(other_bits) || ShifterOperandCanAlwaysHold(-other_bits)) {
140 *add_to_base = offset & ~allowed_offset_bits;
141 *offset_for_load_store = offset & allowed_offset_bits;
142 return true;
143 }
144 return false;
145 }
146
AdjustLoadStoreOffset(int32_t allowed_offset_bits,vixl32::Register temp,vixl32::Register base,int32_t offset)147 int32_t ArmVIXLAssembler::AdjustLoadStoreOffset(int32_t allowed_offset_bits,
148 vixl32::Register temp,
149 vixl32::Register base,
150 int32_t offset) {
151 DCHECK_NE(offset & ~allowed_offset_bits, 0);
152 int32_t add_to_base, offset_for_load;
153 if (CanSplitLoadStoreOffset(allowed_offset_bits, offset, &add_to_base, &offset_for_load)) {
154 ___ Add(temp, base, add_to_base);
155 return offset_for_load;
156 } else {
157 ___ Mov(temp, offset);
158 ___ Add(temp, temp, base);
159 return 0;
160 }
161 }
162
163 // TODO(VIXL): Implement this in VIXL.
GetAllowedLoadOffsetBits(LoadOperandType type)164 int32_t ArmVIXLAssembler::GetAllowedLoadOffsetBits(LoadOperandType type) {
165 switch (type) {
166 case kLoadSignedByte:
167 case kLoadSignedHalfword:
168 case kLoadUnsignedHalfword:
169 case kLoadUnsignedByte:
170 case kLoadWord:
171 // We can encode imm12 offset.
172 return 0xfff;
173 case kLoadSWord:
174 case kLoadDWord:
175 case kLoadWordPair:
176 // We can encode imm8:'00' offset.
177 return 0xff << 2;
178 default:
179 LOG(FATAL) << "UNREACHABLE";
180 UNREACHABLE();
181 }
182 }
183
184 // TODO(VIXL): Implement this in VIXL.
GetAllowedStoreOffsetBits(StoreOperandType type)185 int32_t ArmVIXLAssembler::GetAllowedStoreOffsetBits(StoreOperandType type) {
186 switch (type) {
187 case kStoreHalfword:
188 case kStoreByte:
189 case kStoreWord:
190 // We can encode imm12 offset.
191 return 0xfff;
192 case kStoreSWord:
193 case kStoreDWord:
194 case kStoreWordPair:
195 // We can encode imm8:'00' offset.
196 return 0xff << 2;
197 default:
198 LOG(FATAL) << "UNREACHABLE";
199 UNREACHABLE();
200 }
201 }
202
203 // TODO(VIXL): Implement this in VIXL.
CanHoldLoadOffsetThumb(LoadOperandType type,int offset)204 static bool CanHoldLoadOffsetThumb(LoadOperandType type, int offset) {
205 switch (type) {
206 case kLoadSignedByte:
207 case kLoadSignedHalfword:
208 case kLoadUnsignedHalfword:
209 case kLoadUnsignedByte:
210 case kLoadWord:
211 return IsAbsoluteUint<12>(offset);
212 case kLoadSWord:
213 case kLoadDWord:
214 return IsAbsoluteUint<10>(offset) && IsAligned<4>(offset); // VFP addressing mode.
215 case kLoadWordPair:
216 return IsAbsoluteUint<10>(offset) && IsAligned<4>(offset);
217 default:
218 LOG(FATAL) << "UNREACHABLE";
219 UNREACHABLE();
220 }
221 }
222
223 // TODO(VIXL): Implement this in VIXL.
CanHoldStoreOffsetThumb(StoreOperandType type,int offset)224 static bool CanHoldStoreOffsetThumb(StoreOperandType type, int offset) {
225 switch (type) {
226 case kStoreHalfword:
227 case kStoreByte:
228 case kStoreWord:
229 return IsAbsoluteUint<12>(offset);
230 case kStoreSWord:
231 case kStoreDWord:
232 return IsAbsoluteUint<10>(offset) && IsAligned<4>(offset); // VFP addressing mode.
233 case kStoreWordPair:
234 return IsAbsoluteUint<10>(offset) && IsAligned<4>(offset);
235 default:
236 LOG(FATAL) << "UNREACHABLE";
237 UNREACHABLE();
238 }
239 }
240
241 // Implementation note: this method must emit at most one instruction when
242 // Address::CanHoldStoreOffsetThumb.
243 // TODO(VIXL): Implement AdjustLoadStoreOffset logic in VIXL.
StoreToOffset(StoreOperandType type,vixl32::Register reg,vixl32::Register base,int32_t offset)244 void ArmVIXLAssembler::StoreToOffset(StoreOperandType type,
245 vixl32::Register reg,
246 vixl32::Register base,
247 int32_t offset) {
248 vixl32::Register tmp_reg;
249 UseScratchRegisterScope temps(&vixl_masm_);
250
251 if (!CanHoldStoreOffsetThumb(type, offset)) {
252 CHECK_NE(base.GetCode(), kIpCode);
253 if ((reg.GetCode() != kIpCode) &&
254 (!vixl_masm_.GetScratchRegisterList()->IsEmpty()) &&
255 ((type != kStoreWordPair) || (reg.GetCode() + 1 != kIpCode))) {
256 tmp_reg = temps.Acquire();
257 } else {
258 // Be careful not to use ip twice (for `reg` (or `reg` + 1 in
259 // the case of a word-pair store) and `base`) to build the
260 // Address object used by the store instruction(s) below.
261 // Instead, save R5 on the stack (or R6 if R5 is already used by
262 // `base`), use it as secondary temporary register, and restore
263 // it after the store instruction has been emitted.
264 tmp_reg = (base.GetCode() != 5) ? r5 : r6;
265 ___ Push(tmp_reg);
266 if (base.GetCode() == kSpCode) {
267 offset += kRegisterSize;
268 }
269 }
270 // TODO: Implement indexed store (not available for STRD), inline AdjustLoadStoreOffset()
271 // and in the "unsplittable" path get rid of the "add" by using the store indexed instead.
272 offset = AdjustLoadStoreOffset(GetAllowedStoreOffsetBits(type), tmp_reg, base, offset);
273 base = tmp_reg;
274 }
275 DCHECK(CanHoldStoreOffsetThumb(type, offset));
276 switch (type) {
277 case kStoreByte:
278 ___ Strb(reg, MemOperand(base, offset));
279 break;
280 case kStoreHalfword:
281 ___ Strh(reg, MemOperand(base, offset));
282 break;
283 case kStoreWord:
284 ___ Str(reg, MemOperand(base, offset));
285 break;
286 case kStoreWordPair:
287 ___ Strd(reg, vixl32::Register(reg.GetCode() + 1), MemOperand(base, offset));
288 break;
289 default:
290 LOG(FATAL) << "UNREACHABLE";
291 UNREACHABLE();
292 }
293 if ((tmp_reg.IsValid()) && (tmp_reg.GetCode() != kIpCode)) {
294 CHECK(tmp_reg.Is(r5) || tmp_reg.Is(r6)) << tmp_reg;
295 ___ Pop(tmp_reg);
296 }
297 }
298
299 // Implementation note: this method must emit at most one instruction when
300 // Address::CanHoldLoadOffsetThumb.
301 // TODO(VIXL): Implement AdjustLoadStoreOffset logic in VIXL.
LoadFromOffset(LoadOperandType type,vixl32::Register dest,vixl32::Register base,int32_t offset)302 void ArmVIXLAssembler::LoadFromOffset(LoadOperandType type,
303 vixl32::Register dest,
304 vixl32::Register base,
305 int32_t offset) {
306 if (!CanHoldLoadOffsetThumb(type, offset)) {
307 CHECK(!base.Is(ip));
308 // Inlined AdjustLoadStoreOffset() allows us to pull a few more tricks.
309 int32_t allowed_offset_bits = GetAllowedLoadOffsetBits(type);
310 DCHECK_NE(offset & ~allowed_offset_bits, 0);
311 int32_t add_to_base, offset_for_load;
312 if (CanSplitLoadStoreOffset(allowed_offset_bits, offset, &add_to_base, &offset_for_load)) {
313 // Use reg for the adjusted base. If it's low reg, we may end up using 16-bit load.
314 AddConstant(dest, base, add_to_base);
315 base = dest;
316 offset = offset_for_load;
317 } else {
318 UseScratchRegisterScope temps(&vixl_masm_);
319 vixl32::Register temp = (dest.Is(base)) ? temps.Acquire() : dest;
320 LoadImmediate(temp, offset);
321 // TODO: Implement indexed load (not available for LDRD) and use it here to avoid the ADD.
322 // Use reg for the adjusted base. If it's low reg, we may end up using 16-bit load.
323 ___ Add(dest, dest, (dest.Is(base)) ? temp : base);
324 base = dest;
325 offset = 0;
326 }
327 }
328
329 DCHECK(CanHoldLoadOffsetThumb(type, offset));
330 switch (type) {
331 case kLoadSignedByte:
332 ___ Ldrsb(dest, MemOperand(base, offset));
333 break;
334 case kLoadUnsignedByte:
335 ___ Ldrb(dest, MemOperand(base, offset));
336 break;
337 case kLoadSignedHalfword:
338 ___ Ldrsh(dest, MemOperand(base, offset));
339 break;
340 case kLoadUnsignedHalfword:
341 ___ Ldrh(dest, MemOperand(base, offset));
342 break;
343 case kLoadWord:
344 CHECK(!dest.IsSP());
345 ___ Ldr(dest, MemOperand(base, offset));
346 break;
347 case kLoadWordPair:
348 ___ Ldrd(dest, vixl32::Register(dest.GetCode() + 1), MemOperand(base, offset));
349 break;
350 default:
351 LOG(FATAL) << "UNREACHABLE";
352 UNREACHABLE();
353 }
354 }
355
StoreSToOffset(vixl32::SRegister source,vixl32::Register base,int32_t offset)356 void ArmVIXLAssembler::StoreSToOffset(vixl32::SRegister source,
357 vixl32::Register base,
358 int32_t offset) {
359 ___ Vstr(source, MemOperand(base, offset));
360 }
361
StoreDToOffset(vixl32::DRegister source,vixl32::Register base,int32_t offset)362 void ArmVIXLAssembler::StoreDToOffset(vixl32::DRegister source,
363 vixl32::Register base,
364 int32_t offset) {
365 ___ Vstr(source, MemOperand(base, offset));
366 }
367
LoadSFromOffset(vixl32::SRegister reg,vixl32::Register base,int32_t offset)368 void ArmVIXLAssembler::LoadSFromOffset(vixl32::SRegister reg,
369 vixl32::Register base,
370 int32_t offset) {
371 ___ Vldr(reg, MemOperand(base, offset));
372 }
373
LoadDFromOffset(vixl32::DRegister reg,vixl32::Register base,int32_t offset)374 void ArmVIXLAssembler::LoadDFromOffset(vixl32::DRegister reg,
375 vixl32::Register base,
376 int32_t offset) {
377 ___ Vldr(reg, MemOperand(base, offset));
378 }
379
380 // Prefer Str to Add/Stm in ArmVIXLAssembler::StoreRegisterList and
381 // ArmVIXLAssembler::LoadRegisterList where this generates less code (size).
382 static constexpr int kRegListThreshold = 4;
383
StoreRegisterList(RegList regs,size_t stack_offset)384 void ArmVIXLAssembler::StoreRegisterList(RegList regs, size_t stack_offset) {
385 int number_of_regs = POPCOUNT(static_cast<uint32_t>(regs));
386 if (number_of_regs != 0) {
387 if (number_of_regs > kRegListThreshold) {
388 UseScratchRegisterScope temps(GetVIXLAssembler());
389 vixl32::Register base = sp;
390 if (stack_offset != 0) {
391 base = temps.Acquire();
392 DCHECK_EQ(regs & (1u << base.GetCode()), 0u);
393 ___ Add(base, sp, Operand::From(stack_offset));
394 }
395 ___ Stm(base, NO_WRITE_BACK, RegisterList(regs));
396 } else {
397 for (uint32_t i : LowToHighBits(static_cast<uint32_t>(regs))) {
398 ___ Str(vixl32::Register(i), MemOperand(sp, stack_offset));
399 stack_offset += kRegSizeInBytes;
400 }
401 }
402 }
403 }
404
LoadRegisterList(RegList regs,size_t stack_offset)405 void ArmVIXLAssembler::LoadRegisterList(RegList regs, size_t stack_offset) {
406 int number_of_regs = POPCOUNT(static_cast<uint32_t>(regs));
407 if (number_of_regs != 0) {
408 if (number_of_regs > kRegListThreshold) {
409 UseScratchRegisterScope temps(GetVIXLAssembler());
410 vixl32::Register base = sp;
411 if (stack_offset != 0) {
412 base = temps.Acquire();
413 ___ Add(base, sp, Operand::From(stack_offset));
414 }
415 ___ Ldm(base, NO_WRITE_BACK, RegisterList(regs));
416 } else {
417 for (uint32_t i : LowToHighBits(static_cast<uint32_t>(regs))) {
418 ___ Ldr(vixl32::Register(i), MemOperand(sp, stack_offset));
419 stack_offset += kRegSizeInBytes;
420 }
421 }
422 }
423 }
424
AddConstant(vixl32::Register rd,int32_t value)425 void ArmVIXLAssembler::AddConstant(vixl32::Register rd, int32_t value) {
426 AddConstant(rd, rd, value);
427 }
428
429 // TODO(VIXL): think about using adds which updates flags where possible.
AddConstant(vixl32::Register rd,vixl32::Register rn,int32_t value)430 void ArmVIXLAssembler::AddConstant(vixl32::Register rd,
431 vixl32::Register rn,
432 int32_t value) {
433 DCHECK(vixl_masm_.OutsideITBlock());
434 // TODO(VIXL): implement this optimization in VIXL.
435 if (value == 0) {
436 if (!rd.Is(rn)) {
437 ___ Mov(rd, rn);
438 }
439 return;
440 }
441 ___ Add(rd, rn, value);
442 }
443
444 // Inside IT block we must use assembler, macroassembler instructions are not permitted.
AddConstantInIt(vixl32::Register rd,vixl32::Register rn,int32_t value,vixl32::Condition cond)445 void ArmVIXLAssembler::AddConstantInIt(vixl32::Register rd,
446 vixl32::Register rn,
447 int32_t value,
448 vixl32::Condition cond) {
449 DCHECK(vixl_masm_.InITBlock());
450 if (value == 0) {
451 ___ mov(cond, rd, rn);
452 } else {
453 ___ add(cond, rd, rn, value);
454 }
455 }
456
CompareAndBranchIfZero(vixl32::Register rn,vixl32::Label * label,bool is_far_target)457 void ArmVIXLMacroAssembler::CompareAndBranchIfZero(vixl32::Register rn,
458 vixl32::Label* label,
459 bool is_far_target) {
460 if (!is_far_target && rn.IsLow() && !label->IsBound()) {
461 // In T32, Cbz/Cbnz instructions have following limitations:
462 // - There are only 7 bits (i:imm5:0) to encode branch target address (cannot be far target).
463 // - Only low registers (i.e R0 .. R7) can be encoded.
464 // - Only forward branches (unbound labels) are supported.
465 Cbz(rn, label);
466 return;
467 }
468 Cmp(rn, 0);
469 B(eq, label, is_far_target);
470 }
471
CompareAndBranchIfNonZero(vixl32::Register rn,vixl32::Label * label,bool is_far_target)472 void ArmVIXLMacroAssembler::CompareAndBranchIfNonZero(vixl32::Register rn,
473 vixl32::Label* label,
474 bool is_far_target) {
475 if (!is_far_target && rn.IsLow() && !label->IsBound()) {
476 Cbnz(rn, label);
477 return;
478 }
479 Cmp(rn, 0);
480 B(ne, label, is_far_target);
481 }
482
B(vixl32::Label * label)483 void ArmVIXLMacroAssembler::B(vixl32::Label* label) {
484 if (!label->IsBound()) {
485 // Try to use a 16-bit encoding of the B instruction.
486 DCHECK(OutsideITBlock());
487 BPreferNear(label);
488 return;
489 }
490 MacroAssembler::B(label);
491 }
492
B(vixl32::Condition cond,vixl32::Label * label,bool is_far_target)493 void ArmVIXLMacroAssembler::B(vixl32::Condition cond, vixl32::Label* label, bool is_far_target) {
494 if (!label->IsBound() && !is_far_target) {
495 // Try to use a 16-bit encoding of the B instruction.
496 DCHECK(OutsideITBlock());
497 BPreferNear(cond, label);
498 return;
499 }
500 MacroAssembler::B(cond, label);
501 }
502
503 } // namespace arm
504 } // namespace art
505