1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "code_generator_arm64.h"
18
19 #include "arch/arm64/instruction_set_features_arm64.h"
20 #include "art_method.h"
21 #include "code_generator_utils.h"
22 #include "compiled_method.h"
23 #include "entrypoints/quick/quick_entrypoints.h"
24 #include "entrypoints/quick/quick_entrypoints_enum.h"
25 #include "gc/accounting/card_table.h"
26 #include "intrinsics.h"
27 #include "intrinsics_arm64.h"
28 #include "mirror/array-inl.h"
29 #include "mirror/class-inl.h"
30 #include "offsets.h"
31 #include "thread.h"
32 #include "utils/arm64/assembler_arm64.h"
33 #include "utils/assembler.h"
34 #include "utils/stack_checks.h"
35
36
37 using namespace vixl; // NOLINT(build/namespaces)
38
39 #ifdef __
40 #error "ARM64 Codegen VIXL macro-assembler macro already defined."
41 #endif
42
43 namespace art {
44
45 template<class MirrorType>
46 class GcRoot;
47
48 namespace arm64 {
49
50 using helpers::CPURegisterFrom;
51 using helpers::DRegisterFrom;
52 using helpers::FPRegisterFrom;
53 using helpers::HeapOperand;
54 using helpers::HeapOperandFrom;
55 using helpers::InputCPURegisterAt;
56 using helpers::InputFPRegisterAt;
57 using helpers::InputRegisterAt;
58 using helpers::InputOperandAt;
59 using helpers::Int64ConstantFrom;
60 using helpers::LocationFrom;
61 using helpers::OperandFromMemOperand;
62 using helpers::OutputCPURegister;
63 using helpers::OutputFPRegister;
64 using helpers::OutputRegister;
65 using helpers::RegisterFrom;
66 using helpers::StackOperandFrom;
67 using helpers::VIXLRegCodeFromART;
68 using helpers::WRegisterFrom;
69 using helpers::XRegisterFrom;
70 using helpers::ARM64EncodableConstantOrRegister;
71 using helpers::ArtVixlRegCodeCoherentForRegSet;
72
73 static constexpr int kCurrentMethodStackOffset = 0;
74 // The compare/jump sequence will generate about (1.5 * num_entries + 3) instructions. While jump
75 // table version generates 7 instructions and num_entries literals. Compare/jump sequence will
76 // generates less code/data with a small num_entries.
77 static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7;
78
ARM64Condition(IfCondition cond)79 inline Condition ARM64Condition(IfCondition cond) {
80 switch (cond) {
81 case kCondEQ: return eq;
82 case kCondNE: return ne;
83 case kCondLT: return lt;
84 case kCondLE: return le;
85 case kCondGT: return gt;
86 case kCondGE: return ge;
87 case kCondB: return lo;
88 case kCondBE: return ls;
89 case kCondA: return hi;
90 case kCondAE: return hs;
91 }
92 LOG(FATAL) << "Unreachable";
93 UNREACHABLE();
94 }
95
ARM64FPCondition(IfCondition cond,bool gt_bias)96 inline Condition ARM64FPCondition(IfCondition cond, bool gt_bias) {
97 // The ARM64 condition codes can express all the necessary branches, see the
98 // "Meaning (floating-point)" column in the table C1-1 in the ARMv8 reference manual.
99 // There is no dex instruction or HIR that would need the missing conditions
100 // "equal or unordered" or "not equal".
101 switch (cond) {
102 case kCondEQ: return eq;
103 case kCondNE: return ne /* unordered */;
104 case kCondLT: return gt_bias ? cc : lt /* unordered */;
105 case kCondLE: return gt_bias ? ls : le /* unordered */;
106 case kCondGT: return gt_bias ? hi /* unordered */ : gt;
107 case kCondGE: return gt_bias ? cs /* unordered */ : ge;
108 default:
109 LOG(FATAL) << "UNREACHABLE";
110 UNREACHABLE();
111 }
112 }
113
ARM64ReturnLocation(Primitive::Type return_type)114 Location ARM64ReturnLocation(Primitive::Type return_type) {
115 // Note that in practice, `LocationFrom(x0)` and `LocationFrom(w0)` create the
116 // same Location object, and so do `LocationFrom(d0)` and `LocationFrom(s0)`,
117 // but we use the exact registers for clarity.
118 if (return_type == Primitive::kPrimFloat) {
119 return LocationFrom(s0);
120 } else if (return_type == Primitive::kPrimDouble) {
121 return LocationFrom(d0);
122 } else if (return_type == Primitive::kPrimLong) {
123 return LocationFrom(x0);
124 } else if (return_type == Primitive::kPrimVoid) {
125 return Location::NoLocation();
126 } else {
127 return LocationFrom(w0);
128 }
129 }
130
GetReturnLocation(Primitive::Type return_type)131 Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type return_type) {
132 return ARM64ReturnLocation(return_type);
133 }
134
135 #define __ down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler()->
136 #define QUICK_ENTRY_POINT(x) QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, x).Int32Value()
137
138 // Calculate memory accessing operand for save/restore live registers.
SaveRestoreLiveRegistersHelper(CodeGenerator * codegen,RegisterSet * register_set,int64_t spill_offset,bool is_save)139 static void SaveRestoreLiveRegistersHelper(CodeGenerator* codegen,
140 RegisterSet* register_set,
141 int64_t spill_offset,
142 bool is_save) {
143 DCHECK(ArtVixlRegCodeCoherentForRegSet(register_set->GetCoreRegisters(),
144 codegen->GetNumberOfCoreRegisters(),
145 register_set->GetFloatingPointRegisters(),
146 codegen->GetNumberOfFloatingPointRegisters()));
147
148 CPURegList core_list = CPURegList(CPURegister::kRegister, kXRegSize,
149 register_set->GetCoreRegisters() & (~callee_saved_core_registers.list()));
150 CPURegList fp_list = CPURegList(CPURegister::kFPRegister, kDRegSize,
151 register_set->GetFloatingPointRegisters() & (~callee_saved_fp_registers.list()));
152
153 MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen)->GetVIXLAssembler();
154 UseScratchRegisterScope temps(masm);
155
156 Register base = masm->StackPointer();
157 int64_t core_spill_size = core_list.TotalSizeInBytes();
158 int64_t fp_spill_size = fp_list.TotalSizeInBytes();
159 int64_t reg_size = kXRegSizeInBytes;
160 int64_t max_ls_pair_offset = spill_offset + core_spill_size + fp_spill_size - 2 * reg_size;
161 uint32_t ls_access_size = WhichPowerOf2(reg_size);
162 if (((core_list.Count() > 1) || (fp_list.Count() > 1)) &&
163 !masm->IsImmLSPair(max_ls_pair_offset, ls_access_size)) {
164 // If the offset does not fit in the instruction's immediate field, use an alternate register
165 // to compute the base address(float point registers spill base address).
166 Register new_base = temps.AcquireSameSizeAs(base);
167 __ Add(new_base, base, Operand(spill_offset + core_spill_size));
168 base = new_base;
169 spill_offset = -core_spill_size;
170 int64_t new_max_ls_pair_offset = fp_spill_size - 2 * reg_size;
171 DCHECK(masm->IsImmLSPair(spill_offset, ls_access_size));
172 DCHECK(masm->IsImmLSPair(new_max_ls_pair_offset, ls_access_size));
173 }
174
175 if (is_save) {
176 __ StoreCPURegList(core_list, MemOperand(base, spill_offset));
177 __ StoreCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size));
178 } else {
179 __ LoadCPURegList(core_list, MemOperand(base, spill_offset));
180 __ LoadCPURegList(fp_list, MemOperand(base, spill_offset + core_spill_size));
181 }
182 }
183
SaveLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)184 void SlowPathCodeARM64::SaveLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
185 RegisterSet* register_set = locations->GetLiveRegisters();
186 size_t stack_offset = codegen->GetFirstRegisterSlotInSlowPath();
187 for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
188 if (!codegen->IsCoreCalleeSaveRegister(i) && register_set->ContainsCoreRegister(i)) {
189 // If the register holds an object, update the stack mask.
190 if (locations->RegisterContainsObject(i)) {
191 locations->SetStackBit(stack_offset / kVRegSize);
192 }
193 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
194 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
195 saved_core_stack_offsets_[i] = stack_offset;
196 stack_offset += kXRegSizeInBytes;
197 }
198 }
199
200 for (size_t i = 0, e = codegen->GetNumberOfFloatingPointRegisters(); i < e; ++i) {
201 if (!codegen->IsFloatingPointCalleeSaveRegister(i) &&
202 register_set->ContainsFloatingPointRegister(i)) {
203 DCHECK_LT(stack_offset, codegen->GetFrameSize() - codegen->FrameEntrySpillSize());
204 DCHECK_LT(i, kMaximumNumberOfExpectedRegisters);
205 saved_fpu_stack_offsets_[i] = stack_offset;
206 stack_offset += kDRegSizeInBytes;
207 }
208 }
209
210 SaveRestoreLiveRegistersHelper(codegen, register_set,
211 codegen->GetFirstRegisterSlotInSlowPath(), true /* is_save */);
212 }
213
RestoreLiveRegisters(CodeGenerator * codegen,LocationSummary * locations)214 void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSummary* locations) {
215 RegisterSet* register_set = locations->GetLiveRegisters();
216 SaveRestoreLiveRegistersHelper(codegen, register_set,
217 codegen->GetFirstRegisterSlotInSlowPath(), false /* is_save */);
218 }
219
220 class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 {
221 public:
BoundsCheckSlowPathARM64(HBoundsCheck * instruction)222 explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : SlowPathCodeARM64(instruction) {}
223
EmitNativeCode(CodeGenerator * codegen)224 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
225 LocationSummary* locations = instruction_->GetLocations();
226 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
227
228 __ Bind(GetEntryLabel());
229 if (instruction_->CanThrowIntoCatchBlock()) {
230 // Live registers will be restored in the catch block if caught.
231 SaveLiveRegisters(codegen, instruction_->GetLocations());
232 }
233 // We're moving two locations to locations that could overlap, so we need a parallel
234 // move resolver.
235 InvokeRuntimeCallingConvention calling_convention;
236 codegen->EmitParallelMoves(
237 locations->InAt(0), LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimInt,
238 locations->InAt(1), LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimInt);
239 arm64_codegen->InvokeRuntime(
240 QUICK_ENTRY_POINT(pThrowArrayBounds), instruction_, instruction_->GetDexPc(), this);
241 CheckEntrypointTypes<kQuickThrowArrayBounds, void, int32_t, int32_t>();
242 }
243
IsFatal() const244 bool IsFatal() const OVERRIDE { return true; }
245
GetDescription() const246 const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM64"; }
247
248 private:
249 DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64);
250 };
251
252 class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 {
253 public:
DivZeroCheckSlowPathARM64(HDivZeroCheck * instruction)254 explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : SlowPathCodeARM64(instruction) {}
255
EmitNativeCode(CodeGenerator * codegen)256 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
257 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
258 __ Bind(GetEntryLabel());
259 if (instruction_->CanThrowIntoCatchBlock()) {
260 // Live registers will be restored in the catch block if caught.
261 SaveLiveRegisters(codegen, instruction_->GetLocations());
262 }
263 arm64_codegen->InvokeRuntime(
264 QUICK_ENTRY_POINT(pThrowDivZero), instruction_, instruction_->GetDexPc(), this);
265 CheckEntrypointTypes<kQuickThrowDivZero, void, void>();
266 }
267
IsFatal() const268 bool IsFatal() const OVERRIDE { return true; }
269
GetDescription() const270 const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM64"; }
271
272 private:
273 DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64);
274 };
275
276 class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
277 public:
LoadClassSlowPathARM64(HLoadClass * cls,HInstruction * at,uint32_t dex_pc,bool do_clinit)278 LoadClassSlowPathARM64(HLoadClass* cls,
279 HInstruction* at,
280 uint32_t dex_pc,
281 bool do_clinit)
282 : SlowPathCodeARM64(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
283 DCHECK(at->IsLoadClass() || at->IsClinitCheck());
284 }
285
EmitNativeCode(CodeGenerator * codegen)286 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
287 LocationSummary* locations = at_->GetLocations();
288 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
289
290 __ Bind(GetEntryLabel());
291 SaveLiveRegisters(codegen, locations);
292
293 InvokeRuntimeCallingConvention calling_convention;
294 __ Mov(calling_convention.GetRegisterAt(0).W(), cls_->GetTypeIndex());
295 int32_t entry_point_offset = do_clinit_ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
296 : QUICK_ENTRY_POINT(pInitializeType);
297 arm64_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_, this);
298 if (do_clinit_) {
299 CheckEntrypointTypes<kQuickInitializeStaticStorage, void*, uint32_t>();
300 } else {
301 CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
302 }
303
304 // Move the class to the desired location.
305 Location out = locations->Out();
306 if (out.IsValid()) {
307 DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
308 Primitive::Type type = at_->GetType();
309 arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
310 }
311
312 RestoreLiveRegisters(codegen, locations);
313 __ B(GetExitLabel());
314 }
315
GetDescription() const316 const char* GetDescription() const OVERRIDE { return "LoadClassSlowPathARM64"; }
317
318 private:
319 // The class this slow path will load.
320 HLoadClass* const cls_;
321
322 // The instruction where this slow path is happening.
323 // (Might be the load class or an initialization check).
324 HInstruction* const at_;
325
326 // The dex PC of `at_`.
327 const uint32_t dex_pc_;
328
329 // Whether to initialize the class.
330 const bool do_clinit_;
331
332 DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
333 };
334
335 class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
336 public:
LoadStringSlowPathARM64(HLoadString * instruction)337 explicit LoadStringSlowPathARM64(HLoadString* instruction) : SlowPathCodeARM64(instruction) {}
338
EmitNativeCode(CodeGenerator * codegen)339 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
340 LocationSummary* locations = instruction_->GetLocations();
341 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
342 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
343
344 __ Bind(GetEntryLabel());
345 SaveLiveRegisters(codegen, locations);
346
347 InvokeRuntimeCallingConvention calling_convention;
348 const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex();
349 __ Mov(calling_convention.GetRegisterAt(0).W(), string_index);
350 arm64_codegen->InvokeRuntime(
351 QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this);
352 CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
353 Primitive::Type type = instruction_->GetType();
354 arm64_codegen->MoveLocation(locations->Out(), calling_convention.GetReturnLocation(type), type);
355
356 RestoreLiveRegisters(codegen, locations);
357 __ B(GetExitLabel());
358 }
359
GetDescription() const360 const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
361
362 private:
363 DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
364 };
365
366 class NullCheckSlowPathARM64 : public SlowPathCodeARM64 {
367 public:
NullCheckSlowPathARM64(HNullCheck * instr)368 explicit NullCheckSlowPathARM64(HNullCheck* instr) : SlowPathCodeARM64(instr) {}
369
EmitNativeCode(CodeGenerator * codegen)370 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
371 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
372 __ Bind(GetEntryLabel());
373 if (instruction_->CanThrowIntoCatchBlock()) {
374 // Live registers will be restored in the catch block if caught.
375 SaveLiveRegisters(codegen, instruction_->GetLocations());
376 }
377 arm64_codegen->InvokeRuntime(
378 QUICK_ENTRY_POINT(pThrowNullPointer), instruction_, instruction_->GetDexPc(), this);
379 CheckEntrypointTypes<kQuickThrowNullPointer, void, void>();
380 }
381
IsFatal() const382 bool IsFatal() const OVERRIDE { return true; }
383
GetDescription() const384 const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM64"; }
385
386 private:
387 DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64);
388 };
389
390 class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 {
391 public:
SuspendCheckSlowPathARM64(HSuspendCheck * instruction,HBasicBlock * successor)392 SuspendCheckSlowPathARM64(HSuspendCheck* instruction, HBasicBlock* successor)
393 : SlowPathCodeARM64(instruction), successor_(successor) {}
394
EmitNativeCode(CodeGenerator * codegen)395 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
396 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
397 __ Bind(GetEntryLabel());
398 SaveLiveRegisters(codegen, instruction_->GetLocations());
399 arm64_codegen->InvokeRuntime(
400 QUICK_ENTRY_POINT(pTestSuspend), instruction_, instruction_->GetDexPc(), this);
401 CheckEntrypointTypes<kQuickTestSuspend, void, void>();
402 RestoreLiveRegisters(codegen, instruction_->GetLocations());
403 if (successor_ == nullptr) {
404 __ B(GetReturnLabel());
405 } else {
406 __ B(arm64_codegen->GetLabelOf(successor_));
407 }
408 }
409
GetReturnLabel()410 vixl::Label* GetReturnLabel() {
411 DCHECK(successor_ == nullptr);
412 return &return_label_;
413 }
414
GetSuccessor() const415 HBasicBlock* GetSuccessor() const {
416 return successor_;
417 }
418
GetDescription() const419 const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM64"; }
420
421 private:
422 // If not null, the block to branch to after the suspend check.
423 HBasicBlock* const successor_;
424
425 // If `successor_` is null, the label to branch to after the suspend check.
426 vixl::Label return_label_;
427
428 DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathARM64);
429 };
430
431 class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 {
432 public:
TypeCheckSlowPathARM64(HInstruction * instruction,bool is_fatal)433 TypeCheckSlowPathARM64(HInstruction* instruction, bool is_fatal)
434 : SlowPathCodeARM64(instruction), is_fatal_(is_fatal) {}
435
EmitNativeCode(CodeGenerator * codegen)436 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
437 LocationSummary* locations = instruction_->GetLocations();
438 Location class_to_check = locations->InAt(1);
439 Location object_class = instruction_->IsCheckCast() ? locations->GetTemp(0)
440 : locations->Out();
441 DCHECK(instruction_->IsCheckCast()
442 || !locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
443 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
444 uint32_t dex_pc = instruction_->GetDexPc();
445
446 __ Bind(GetEntryLabel());
447
448 if (!is_fatal_) {
449 SaveLiveRegisters(codegen, locations);
450 }
451
452 // We're moving two locations to locations that could overlap, so we need a parallel
453 // move resolver.
454 InvokeRuntimeCallingConvention calling_convention;
455 codegen->EmitParallelMoves(
456 class_to_check, LocationFrom(calling_convention.GetRegisterAt(0)), Primitive::kPrimNot,
457 object_class, LocationFrom(calling_convention.GetRegisterAt(1)), Primitive::kPrimNot);
458
459 if (instruction_->IsInstanceOf()) {
460 arm64_codegen->InvokeRuntime(
461 QUICK_ENTRY_POINT(pInstanceofNonTrivial), instruction_, dex_pc, this);
462 CheckEntrypointTypes<kQuickInstanceofNonTrivial, uint32_t,
463 const mirror::Class*, const mirror::Class*>();
464 Primitive::Type ret_type = instruction_->GetType();
465 Location ret_loc = calling_convention.GetReturnLocation(ret_type);
466 arm64_codegen->MoveLocation(locations->Out(), ret_loc, ret_type);
467 } else {
468 DCHECK(instruction_->IsCheckCast());
469 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pCheckCast), instruction_, dex_pc, this);
470 CheckEntrypointTypes<kQuickCheckCast, void, const mirror::Class*, const mirror::Class*>();
471 }
472
473 if (!is_fatal_) {
474 RestoreLiveRegisters(codegen, locations);
475 __ B(GetExitLabel());
476 }
477 }
478
GetDescription() const479 const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathARM64"; }
IsFatal() const480 bool IsFatal() const { return is_fatal_; }
481
482 private:
483 const bool is_fatal_;
484
485 DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64);
486 };
487
488 class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 {
489 public:
DeoptimizationSlowPathARM64(HDeoptimize * instruction)490 explicit DeoptimizationSlowPathARM64(HDeoptimize* instruction)
491 : SlowPathCodeARM64(instruction) {}
492
EmitNativeCode(CodeGenerator * codegen)493 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
494 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
495 __ Bind(GetEntryLabel());
496 SaveLiveRegisters(codegen, instruction_->GetLocations());
497 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pDeoptimize),
498 instruction_,
499 instruction_->GetDexPc(),
500 this);
501 CheckEntrypointTypes<kQuickDeoptimize, void, void>();
502 }
503
GetDescription() const504 const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; }
505
506 private:
507 DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64);
508 };
509
510 class ArraySetSlowPathARM64 : public SlowPathCodeARM64 {
511 public:
ArraySetSlowPathARM64(HInstruction * instruction)512 explicit ArraySetSlowPathARM64(HInstruction* instruction) : SlowPathCodeARM64(instruction) {}
513
EmitNativeCode(CodeGenerator * codegen)514 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
515 LocationSummary* locations = instruction_->GetLocations();
516 __ Bind(GetEntryLabel());
517 SaveLiveRegisters(codegen, locations);
518
519 InvokeRuntimeCallingConvention calling_convention;
520 HParallelMove parallel_move(codegen->GetGraph()->GetArena());
521 parallel_move.AddMove(
522 locations->InAt(0),
523 LocationFrom(calling_convention.GetRegisterAt(0)),
524 Primitive::kPrimNot,
525 nullptr);
526 parallel_move.AddMove(
527 locations->InAt(1),
528 LocationFrom(calling_convention.GetRegisterAt(1)),
529 Primitive::kPrimInt,
530 nullptr);
531 parallel_move.AddMove(
532 locations->InAt(2),
533 LocationFrom(calling_convention.GetRegisterAt(2)),
534 Primitive::kPrimNot,
535 nullptr);
536 codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
537
538 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
539 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pAputObject),
540 instruction_,
541 instruction_->GetDexPc(),
542 this);
543 CheckEntrypointTypes<kQuickAputObject, void, mirror::Array*, int32_t, mirror::Object*>();
544 RestoreLiveRegisters(codegen, locations);
545 __ B(GetExitLabel());
546 }
547
GetDescription() const548 const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM64"; }
549
550 private:
551 DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM64);
552 };
553
EmitTable(CodeGeneratorARM64 * codegen)554 void JumpTableARM64::EmitTable(CodeGeneratorARM64* codegen) {
555 uint32_t num_entries = switch_instr_->GetNumEntries();
556 DCHECK_GE(num_entries, kPackedSwitchCompareJumpThreshold);
557
558 // We are about to use the assembler to place literals directly. Make sure we have enough
559 // underlying code buffer and we have generated the jump table with right size.
560 CodeBufferCheckScope scope(codegen->GetVIXLAssembler(), num_entries * sizeof(int32_t),
561 CodeBufferCheckScope::kCheck, CodeBufferCheckScope::kExactSize);
562
563 __ Bind(&table_start_);
564 const ArenaVector<HBasicBlock*>& successors = switch_instr_->GetBlock()->GetSuccessors();
565 for (uint32_t i = 0; i < num_entries; i++) {
566 vixl::Label* target_label = codegen->GetLabelOf(successors[i]);
567 DCHECK(target_label->IsBound());
568 ptrdiff_t jump_offset = target_label->location() - table_start_.location();
569 DCHECK_GT(jump_offset, std::numeric_limits<int32_t>::min());
570 DCHECK_LE(jump_offset, std::numeric_limits<int32_t>::max());
571 Literal<int32_t> literal(jump_offset);
572 __ place(&literal);
573 }
574 }
575
576 // Slow path marking an object during a read barrier.
577 class ReadBarrierMarkSlowPathARM64 : public SlowPathCodeARM64 {
578 public:
ReadBarrierMarkSlowPathARM64(HInstruction * instruction,Location out,Location obj)579 ReadBarrierMarkSlowPathARM64(HInstruction* instruction, Location out, Location obj)
580 : SlowPathCodeARM64(instruction), out_(out), obj_(obj) {
581 DCHECK(kEmitCompilerReadBarrier);
582 }
583
GetDescription() const584 const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathARM64"; }
585
EmitNativeCode(CodeGenerator * codegen)586 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
587 LocationSummary* locations = instruction_->GetLocations();
588 Primitive::Type type = Primitive::kPrimNot;
589 DCHECK(locations->CanCall());
590 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg()));
591 DCHECK(instruction_->IsInstanceFieldGet() ||
592 instruction_->IsStaticFieldGet() ||
593 instruction_->IsArrayGet() ||
594 instruction_->IsLoadClass() ||
595 instruction_->IsLoadString() ||
596 instruction_->IsInstanceOf() ||
597 instruction_->IsCheckCast())
598 << "Unexpected instruction in read barrier marking slow path: "
599 << instruction_->DebugName();
600
601 __ Bind(GetEntryLabel());
602 SaveLiveRegisters(codegen, locations);
603
604 InvokeRuntimeCallingConvention calling_convention;
605 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
606 arm64_codegen->MoveLocation(LocationFrom(calling_convention.GetRegisterAt(0)), obj_, type);
607 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierMark),
608 instruction_,
609 instruction_->GetDexPc(),
610 this);
611 CheckEntrypointTypes<kQuickReadBarrierMark, mirror::Object*, mirror::Object*>();
612 arm64_codegen->MoveLocation(out_, calling_convention.GetReturnLocation(type), type);
613
614 RestoreLiveRegisters(codegen, locations);
615 __ B(GetExitLabel());
616 }
617
618 private:
619 const Location out_;
620 const Location obj_;
621
622 DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathARM64);
623 };
624
625 // Slow path generating a read barrier for a heap reference.
626 class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 {
627 public:
ReadBarrierForHeapReferenceSlowPathARM64(HInstruction * instruction,Location out,Location ref,Location obj,uint32_t offset,Location index)628 ReadBarrierForHeapReferenceSlowPathARM64(HInstruction* instruction,
629 Location out,
630 Location ref,
631 Location obj,
632 uint32_t offset,
633 Location index)
634 : SlowPathCodeARM64(instruction),
635 out_(out),
636 ref_(ref),
637 obj_(obj),
638 offset_(offset),
639 index_(index) {
640 DCHECK(kEmitCompilerReadBarrier);
641 // If `obj` is equal to `out` or `ref`, it means the initial object
642 // has been overwritten by (or after) the heap object reference load
643 // to be instrumented, e.g.:
644 //
645 // __ Ldr(out, HeapOperand(out, class_offset);
646 // codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset);
647 //
648 // In that case, we have lost the information about the original
649 // object, and the emitted read barrier cannot work properly.
650 DCHECK(!obj.Equals(out)) << "obj=" << obj << " out=" << out;
651 DCHECK(!obj.Equals(ref)) << "obj=" << obj << " ref=" << ref;
652 }
653
EmitNativeCode(CodeGenerator * codegen)654 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
655 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
656 LocationSummary* locations = instruction_->GetLocations();
657 Primitive::Type type = Primitive::kPrimNot;
658 DCHECK(locations->CanCall());
659 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg()));
660 DCHECK(!instruction_->IsInvoke() ||
661 (instruction_->IsInvokeStaticOrDirect() &&
662 instruction_->GetLocations()->Intrinsified()))
663 << "Unexpected instruction in read barrier for heap reference slow path: "
664 << instruction_->DebugName();
665 // The read barrier instrumentation does not support the
666 // HArm64IntermediateAddress instruction yet.
667 DCHECK(!(instruction_->IsArrayGet() &&
668 instruction_->AsArrayGet()->GetArray()->IsArm64IntermediateAddress()));
669
670 __ Bind(GetEntryLabel());
671
672 SaveLiveRegisters(codegen, locations);
673
674 // We may have to change the index's value, but as `index_` is a
675 // constant member (like other "inputs" of this slow path),
676 // introduce a copy of it, `index`.
677 Location index = index_;
678 if (index_.IsValid()) {
679 // Handle `index_` for HArrayGet and intrinsic UnsafeGetObject.
680 if (instruction_->IsArrayGet()) {
681 // Compute the actual memory offset and store it in `index`.
682 Register index_reg = RegisterFrom(index_, Primitive::kPrimInt);
683 DCHECK(locations->GetLiveRegisters()->ContainsCoreRegister(index_.reg()));
684 if (codegen->IsCoreCalleeSaveRegister(index_.reg())) {
685 // We are about to change the value of `index_reg` (see the
686 // calls to vixl::MacroAssembler::Lsl and
687 // vixl::MacroAssembler::Mov below), but it has
688 // not been saved by the previous call to
689 // art::SlowPathCode::SaveLiveRegisters, as it is a
690 // callee-save register --
691 // art::SlowPathCode::SaveLiveRegisters does not consider
692 // callee-save registers, as it has been designed with the
693 // assumption that callee-save registers are supposed to be
694 // handled by the called function. So, as a callee-save
695 // register, `index_reg` _would_ eventually be saved onto
696 // the stack, but it would be too late: we would have
697 // changed its value earlier. Therefore, we manually save
698 // it here into another freely available register,
699 // `free_reg`, chosen of course among the caller-save
700 // registers (as a callee-save `free_reg` register would
701 // exhibit the same problem).
702 //
703 // Note we could have requested a temporary register from
704 // the register allocator instead; but we prefer not to, as
705 // this is a slow path, and we know we can find a
706 // caller-save register that is available.
707 Register free_reg = FindAvailableCallerSaveRegister(codegen);
708 __ Mov(free_reg.W(), index_reg);
709 index_reg = free_reg;
710 index = LocationFrom(index_reg);
711 } else {
712 // The initial register stored in `index_` has already been
713 // saved in the call to art::SlowPathCode::SaveLiveRegisters
714 // (as it is not a callee-save register), so we can freely
715 // use it.
716 }
717 // Shifting the index value contained in `index_reg` by the scale
718 // factor (2) cannot overflow in practice, as the runtime is
719 // unable to allocate object arrays with a size larger than
720 // 2^26 - 1 (that is, 2^28 - 4 bytes).
721 __ Lsl(index_reg, index_reg, Primitive::ComponentSizeShift(type));
722 static_assert(
723 sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
724 "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
725 __ Add(index_reg, index_reg, Operand(offset_));
726 } else {
727 DCHECK(instruction_->IsInvoke());
728 DCHECK(instruction_->GetLocations()->Intrinsified());
729 DCHECK((instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObject) ||
730 (instruction_->AsInvoke()->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile))
731 << instruction_->AsInvoke()->GetIntrinsic();
732 DCHECK_EQ(offset_, 0U);
733 DCHECK(index_.IsRegisterPair());
734 // UnsafeGet's offset location is a register pair, the low
735 // part contains the correct offset.
736 index = index_.ToLow();
737 }
738 }
739
740 // We're moving two or three locations to locations that could
741 // overlap, so we need a parallel move resolver.
742 InvokeRuntimeCallingConvention calling_convention;
743 HParallelMove parallel_move(codegen->GetGraph()->GetArena());
744 parallel_move.AddMove(ref_,
745 LocationFrom(calling_convention.GetRegisterAt(0)),
746 type,
747 nullptr);
748 parallel_move.AddMove(obj_,
749 LocationFrom(calling_convention.GetRegisterAt(1)),
750 type,
751 nullptr);
752 if (index.IsValid()) {
753 parallel_move.AddMove(index,
754 LocationFrom(calling_convention.GetRegisterAt(2)),
755 Primitive::kPrimInt,
756 nullptr);
757 codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
758 } else {
759 codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
760 arm64_codegen->MoveConstant(LocationFrom(calling_convention.GetRegisterAt(2)), offset_);
761 }
762 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierSlow),
763 instruction_,
764 instruction_->GetDexPc(),
765 this);
766 CheckEntrypointTypes<
767 kQuickReadBarrierSlow, mirror::Object*, mirror::Object*, mirror::Object*, uint32_t>();
768 arm64_codegen->MoveLocation(out_, calling_convention.GetReturnLocation(type), type);
769
770 RestoreLiveRegisters(codegen, locations);
771
772 __ B(GetExitLabel());
773 }
774
GetDescription() const775 const char* GetDescription() const OVERRIDE { return "ReadBarrierForHeapReferenceSlowPathARM64"; }
776
777 private:
FindAvailableCallerSaveRegister(CodeGenerator * codegen)778 Register FindAvailableCallerSaveRegister(CodeGenerator* codegen) {
779 size_t ref = static_cast<int>(XRegisterFrom(ref_).code());
780 size_t obj = static_cast<int>(XRegisterFrom(obj_).code());
781 for (size_t i = 0, e = codegen->GetNumberOfCoreRegisters(); i < e; ++i) {
782 if (i != ref && i != obj && !codegen->IsCoreCalleeSaveRegister(i)) {
783 return Register(VIXLRegCodeFromART(i), kXRegSize);
784 }
785 }
786 // We shall never fail to find a free caller-save register, as
787 // there are more than two core caller-save registers on ARM64
788 // (meaning it is possible to find one which is different from
789 // `ref` and `obj`).
790 DCHECK_GT(codegen->GetNumberOfCoreCallerSaveRegisters(), 2u);
791 LOG(FATAL) << "Could not find a free register";
792 UNREACHABLE();
793 }
794
795 const Location out_;
796 const Location ref_;
797 const Location obj_;
798 const uint32_t offset_;
799 // An additional location containing an index to an array.
800 // Only used for HArrayGet and the UnsafeGetObject &
801 // UnsafeGetObjectVolatile intrinsics.
802 const Location index_;
803
804 DISALLOW_COPY_AND_ASSIGN(ReadBarrierForHeapReferenceSlowPathARM64);
805 };
806
807 // Slow path generating a read barrier for a GC root.
808 class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 {
809 public:
ReadBarrierForRootSlowPathARM64(HInstruction * instruction,Location out,Location root)810 ReadBarrierForRootSlowPathARM64(HInstruction* instruction, Location out, Location root)
811 : SlowPathCodeARM64(instruction), out_(out), root_(root) {
812 DCHECK(kEmitCompilerReadBarrier);
813 }
814
EmitNativeCode(CodeGenerator * codegen)815 void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
816 LocationSummary* locations = instruction_->GetLocations();
817 Primitive::Type type = Primitive::kPrimNot;
818 DCHECK(locations->CanCall());
819 DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(out_.reg()));
820 DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString())
821 << "Unexpected instruction in read barrier for GC root slow path: "
822 << instruction_->DebugName();
823
824 __ Bind(GetEntryLabel());
825 SaveLiveRegisters(codegen, locations);
826
827 InvokeRuntimeCallingConvention calling_convention;
828 CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
829 // The argument of the ReadBarrierForRootSlow is not a managed
830 // reference (`mirror::Object*`), but a `GcRoot<mirror::Object>*`;
831 // thus we need a 64-bit move here, and we cannot use
832 //
833 // arm64_codegen->MoveLocation(
834 // LocationFrom(calling_convention.GetRegisterAt(0)),
835 // root_,
836 // type);
837 //
838 // which would emit a 32-bit move, as `type` is a (32-bit wide)
839 // reference type (`Primitive::kPrimNot`).
840 __ Mov(calling_convention.GetRegisterAt(0), XRegisterFrom(out_));
841 arm64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierForRootSlow),
842 instruction_,
843 instruction_->GetDexPc(),
844 this);
845 CheckEntrypointTypes<kQuickReadBarrierForRootSlow, mirror::Object*, GcRoot<mirror::Object>*>();
846 arm64_codegen->MoveLocation(out_, calling_convention.GetReturnLocation(type), type);
847
848 RestoreLiveRegisters(codegen, locations);
849 __ B(GetExitLabel());
850 }
851
GetDescription() const852 const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARM64"; }
853
854 private:
855 const Location out_;
856 const Location root_;
857
858 DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathARM64);
859 };
860
861 #undef __
862
GetNextLocation(Primitive::Type type)863 Location InvokeDexCallingConventionVisitorARM64::GetNextLocation(Primitive::Type type) {
864 Location next_location;
865 if (type == Primitive::kPrimVoid) {
866 LOG(FATAL) << "Unreachable type " << type;
867 }
868
869 if (Primitive::IsFloatingPointType(type) &&
870 (float_index_ < calling_convention.GetNumberOfFpuRegisters())) {
871 next_location = LocationFrom(calling_convention.GetFpuRegisterAt(float_index_++));
872 } else if (!Primitive::IsFloatingPointType(type) &&
873 (gp_index_ < calling_convention.GetNumberOfRegisters())) {
874 next_location = LocationFrom(calling_convention.GetRegisterAt(gp_index_++));
875 } else {
876 size_t stack_offset = calling_convention.GetStackOffsetOf(stack_index_);
877 next_location = Primitive::Is64BitType(type) ? Location::DoubleStackSlot(stack_offset)
878 : Location::StackSlot(stack_offset);
879 }
880
881 // Space on the stack is reserved for all arguments.
882 stack_index_ += Primitive::Is64BitType(type) ? 2 : 1;
883 return next_location;
884 }
885
GetMethodLocation() const886 Location InvokeDexCallingConventionVisitorARM64::GetMethodLocation() const {
887 return LocationFrom(kArtMethodRegister);
888 }
889
CodeGeneratorARM64(HGraph * graph,const Arm64InstructionSetFeatures & isa_features,const CompilerOptions & compiler_options,OptimizingCompilerStats * stats)890 CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph,
891 const Arm64InstructionSetFeatures& isa_features,
892 const CompilerOptions& compiler_options,
893 OptimizingCompilerStats* stats)
894 : CodeGenerator(graph,
895 kNumberOfAllocatableRegisters,
896 kNumberOfAllocatableFPRegisters,
897 kNumberOfAllocatableRegisterPairs,
898 callee_saved_core_registers.list(),
899 callee_saved_fp_registers.list(),
900 compiler_options,
901 stats),
902 block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
903 jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
904 location_builder_(graph, this),
905 instruction_visitor_(graph, this),
906 move_resolver_(graph->GetArena(), this),
907 assembler_(graph->GetArena()),
908 isa_features_(isa_features),
909 uint32_literals_(std::less<uint32_t>(),
910 graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
911 uint64_literals_(std::less<uint64_t>(),
912 graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
913 method_patches_(MethodReferenceComparator(),
914 graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
915 call_patches_(MethodReferenceComparator(),
916 graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
917 relative_call_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
918 pc_relative_dex_cache_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
919 boot_image_string_patches_(StringReferenceValueComparator(),
920 graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
921 pc_relative_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)),
922 boot_image_address_patches_(std::less<uint32_t>(),
923 graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) {
924 // Save the link register (containing the return address) to mimic Quick.
925 AddAllocatedRegister(LocationFrom(lr));
926 }
927
928 #define __ GetVIXLAssembler()->
929
EmitJumpTables()930 void CodeGeneratorARM64::EmitJumpTables() {
931 for (auto&& jump_table : jump_tables_) {
932 jump_table->EmitTable(this);
933 }
934 }
935
Finalize(CodeAllocator * allocator)936 void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) {
937 EmitJumpTables();
938 // Ensure we emit the literal pool.
939 __ FinalizeCode();
940
941 CodeGenerator::Finalize(allocator);
942 }
943
PrepareForEmitNativeCode()944 void ParallelMoveResolverARM64::PrepareForEmitNativeCode() {
945 // Note: There are 6 kinds of moves:
946 // 1. constant -> GPR/FPR (non-cycle)
947 // 2. constant -> stack (non-cycle)
948 // 3. GPR/FPR -> GPR/FPR
949 // 4. GPR/FPR -> stack
950 // 5. stack -> GPR/FPR
951 // 6. stack -> stack (non-cycle)
952 // Case 1, 2 and 6 should never be included in a dependency cycle on ARM64. For case 3, 4, and 5
953 // VIXL uses at most 1 GPR. VIXL has 2 GPR and 1 FPR temps, and there should be no intersecting
954 // cycles on ARM64, so we always have 1 GPR and 1 FPR available VIXL temps to resolve the
955 // dependency.
956 vixl_temps_.Open(GetVIXLAssembler());
957 }
958
FinishEmitNativeCode()959 void ParallelMoveResolverARM64::FinishEmitNativeCode() {
960 vixl_temps_.Close();
961 }
962
AllocateScratchLocationFor(Location::Kind kind)963 Location ParallelMoveResolverARM64::AllocateScratchLocationFor(Location::Kind kind) {
964 DCHECK(kind == Location::kRegister || kind == Location::kFpuRegister ||
965 kind == Location::kStackSlot || kind == Location::kDoubleStackSlot);
966 kind = (kind == Location::kFpuRegister) ? Location::kFpuRegister : Location::kRegister;
967 Location scratch = GetScratchLocation(kind);
968 if (!scratch.Equals(Location::NoLocation())) {
969 return scratch;
970 }
971 // Allocate from VIXL temp registers.
972 if (kind == Location::kRegister) {
973 scratch = LocationFrom(vixl_temps_.AcquireX());
974 } else {
975 DCHECK(kind == Location::kFpuRegister);
976 scratch = LocationFrom(vixl_temps_.AcquireD());
977 }
978 AddScratchLocation(scratch);
979 return scratch;
980 }
981
FreeScratchLocation(Location loc)982 void ParallelMoveResolverARM64::FreeScratchLocation(Location loc) {
983 if (loc.IsRegister()) {
984 vixl_temps_.Release(XRegisterFrom(loc));
985 } else {
986 DCHECK(loc.IsFpuRegister());
987 vixl_temps_.Release(DRegisterFrom(loc));
988 }
989 RemoveScratchLocation(loc);
990 }
991
EmitMove(size_t index)992 void ParallelMoveResolverARM64::EmitMove(size_t index) {
993 MoveOperands* move = moves_[index];
994 codegen_->MoveLocation(move->GetDestination(), move->GetSource(), Primitive::kPrimVoid);
995 }
996
GenerateFrameEntry()997 void CodeGeneratorARM64::GenerateFrameEntry() {
998 MacroAssembler* masm = GetVIXLAssembler();
999 BlockPoolsScope block_pools(masm);
1000 __ Bind(&frame_entry_label_);
1001
1002 bool do_overflow_check = FrameNeedsStackCheck(GetFrameSize(), kArm64) || !IsLeafMethod();
1003 if (do_overflow_check) {
1004 UseScratchRegisterScope temps(masm);
1005 Register temp = temps.AcquireX();
1006 DCHECK(GetCompilerOptions().GetImplicitStackOverflowChecks());
1007 __ Sub(temp, sp, static_cast<int32_t>(GetStackOverflowReservedBytes(kArm64)));
1008 __ Ldr(wzr, MemOperand(temp, 0));
1009 RecordPcInfo(nullptr, 0);
1010 }
1011
1012 if (!HasEmptyFrame()) {
1013 int frame_size = GetFrameSize();
1014 // Stack layout:
1015 // sp[frame_size - 8] : lr.
1016 // ... : other preserved core registers.
1017 // ... : other preserved fp registers.
1018 // ... : reserved frame space.
1019 // sp[0] : current method.
1020 __ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
1021 GetAssembler()->cfi().AdjustCFAOffset(frame_size);
1022 GetAssembler()->SpillRegisters(GetFramePreservedCoreRegisters(),
1023 frame_size - GetCoreSpillSize());
1024 GetAssembler()->SpillRegisters(GetFramePreservedFPRegisters(),
1025 frame_size - FrameEntrySpillSize());
1026 }
1027 }
1028
GenerateFrameExit()1029 void CodeGeneratorARM64::GenerateFrameExit() {
1030 BlockPoolsScope block_pools(GetVIXLAssembler());
1031 GetAssembler()->cfi().RememberState();
1032 if (!HasEmptyFrame()) {
1033 int frame_size = GetFrameSize();
1034 GetAssembler()->UnspillRegisters(GetFramePreservedFPRegisters(),
1035 frame_size - FrameEntrySpillSize());
1036 GetAssembler()->UnspillRegisters(GetFramePreservedCoreRegisters(),
1037 frame_size - GetCoreSpillSize());
1038 __ Drop(frame_size);
1039 GetAssembler()->cfi().AdjustCFAOffset(-frame_size);
1040 }
1041 __ Ret();
1042 GetAssembler()->cfi().RestoreState();
1043 GetAssembler()->cfi().DefCFAOffset(GetFrameSize());
1044 }
1045
GetFramePreservedCoreRegisters() const1046 vixl::CPURegList CodeGeneratorARM64::GetFramePreservedCoreRegisters() const {
1047 DCHECK(ArtVixlRegCodeCoherentForRegSet(core_spill_mask_, GetNumberOfCoreRegisters(), 0, 0));
1048 return vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize,
1049 core_spill_mask_);
1050 }
1051
GetFramePreservedFPRegisters() const1052 vixl::CPURegList CodeGeneratorARM64::GetFramePreservedFPRegisters() const {
1053 DCHECK(ArtVixlRegCodeCoherentForRegSet(0, 0, fpu_spill_mask_,
1054 GetNumberOfFloatingPointRegisters()));
1055 return vixl::CPURegList(vixl::CPURegister::kFPRegister, vixl::kDRegSize,
1056 fpu_spill_mask_);
1057 }
1058
Bind(HBasicBlock * block)1059 void CodeGeneratorARM64::Bind(HBasicBlock* block) {
1060 __ Bind(GetLabelOf(block));
1061 }
1062
MoveConstant(Location location,int32_t value)1063 void CodeGeneratorARM64::MoveConstant(Location location, int32_t value) {
1064 DCHECK(location.IsRegister());
1065 __ Mov(RegisterFrom(location, Primitive::kPrimInt), value);
1066 }
1067
AddLocationAsTemp(Location location,LocationSummary * locations)1068 void CodeGeneratorARM64::AddLocationAsTemp(Location location, LocationSummary* locations) {
1069 if (location.IsRegister()) {
1070 locations->AddTemp(location);
1071 } else {
1072 UNIMPLEMENTED(FATAL) << "AddLocationAsTemp not implemented for location " << location;
1073 }
1074 }
1075
MarkGCCard(Register object,Register value,bool value_can_be_null)1076 void CodeGeneratorARM64::MarkGCCard(Register object, Register value, bool value_can_be_null) {
1077 UseScratchRegisterScope temps(GetVIXLAssembler());
1078 Register card = temps.AcquireX();
1079 Register temp = temps.AcquireW(); // Index within the CardTable - 32bit.
1080 vixl::Label done;
1081 if (value_can_be_null) {
1082 __ Cbz(value, &done);
1083 }
1084 __ Ldr(card, MemOperand(tr, Thread::CardTableOffset<kArm64WordSize>().Int32Value()));
1085 __ Lsr(temp, object, gc::accounting::CardTable::kCardShift);
1086 __ Strb(card, MemOperand(card, temp.X()));
1087 if (value_can_be_null) {
1088 __ Bind(&done);
1089 }
1090 }
1091
SetupBlockedRegisters() const1092 void CodeGeneratorARM64::SetupBlockedRegisters() const {
1093 // Blocked core registers:
1094 // lr : Runtime reserved.
1095 // tr : Runtime reserved.
1096 // xSuspend : Runtime reserved. TODO: Unblock this when the runtime stops using it.
1097 // ip1 : VIXL core temp.
1098 // ip0 : VIXL core temp.
1099 //
1100 // Blocked fp registers:
1101 // d31 : VIXL fp temp.
1102 CPURegList reserved_core_registers = vixl_reserved_core_registers;
1103 reserved_core_registers.Combine(runtime_reserved_core_registers);
1104 while (!reserved_core_registers.IsEmpty()) {
1105 blocked_core_registers_[reserved_core_registers.PopLowestIndex().code()] = true;
1106 }
1107
1108 CPURegList reserved_fp_registers = vixl_reserved_fp_registers;
1109 while (!reserved_fp_registers.IsEmpty()) {
1110 blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true;
1111 }
1112
1113 if (GetGraph()->IsDebuggable()) {
1114 // Stubs do not save callee-save floating point registers. If the graph
1115 // is debuggable, we need to deal with these registers differently. For
1116 // now, just block them.
1117 CPURegList reserved_fp_registers_debuggable = callee_saved_fp_registers;
1118 while (!reserved_fp_registers_debuggable.IsEmpty()) {
1119 blocked_fpu_registers_[reserved_fp_registers_debuggable.PopLowestIndex().code()] = true;
1120 }
1121 }
1122 }
1123
SaveCoreRegister(size_t stack_index,uint32_t reg_id)1124 size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
1125 Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
1126 __ Str(reg, MemOperand(sp, stack_index));
1127 return kArm64WordSize;
1128 }
1129
RestoreCoreRegister(size_t stack_index,uint32_t reg_id)1130 size_t CodeGeneratorARM64::RestoreCoreRegister(size_t stack_index, uint32_t reg_id) {
1131 Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
1132 __ Ldr(reg, MemOperand(sp, stack_index));
1133 return kArm64WordSize;
1134 }
1135
SaveFloatingPointRegister(size_t stack_index,uint32_t reg_id)1136 size_t CodeGeneratorARM64::SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
1137 FPRegister reg = FPRegister(reg_id, kDRegSize);
1138 __ Str(reg, MemOperand(sp, stack_index));
1139 return kArm64WordSize;
1140 }
1141
RestoreFloatingPointRegister(size_t stack_index,uint32_t reg_id)1142 size_t CodeGeneratorARM64::RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
1143 FPRegister reg = FPRegister(reg_id, kDRegSize);
1144 __ Ldr(reg, MemOperand(sp, stack_index));
1145 return kArm64WordSize;
1146 }
1147
DumpCoreRegister(std::ostream & stream,int reg) const1148 void CodeGeneratorARM64::DumpCoreRegister(std::ostream& stream, int reg) const {
1149 stream << XRegister(reg);
1150 }
1151
DumpFloatingPointRegister(std::ostream & stream,int reg) const1152 void CodeGeneratorARM64::DumpFloatingPointRegister(std::ostream& stream, int reg) const {
1153 stream << DRegister(reg);
1154 }
1155
MoveConstant(CPURegister destination,HConstant * constant)1156 void CodeGeneratorARM64::MoveConstant(CPURegister destination, HConstant* constant) {
1157 if (constant->IsIntConstant()) {
1158 __ Mov(Register(destination), constant->AsIntConstant()->GetValue());
1159 } else if (constant->IsLongConstant()) {
1160 __ Mov(Register(destination), constant->AsLongConstant()->GetValue());
1161 } else if (constant->IsNullConstant()) {
1162 __ Mov(Register(destination), 0);
1163 } else if (constant->IsFloatConstant()) {
1164 __ Fmov(FPRegister(destination), constant->AsFloatConstant()->GetValue());
1165 } else {
1166 DCHECK(constant->IsDoubleConstant());
1167 __ Fmov(FPRegister(destination), constant->AsDoubleConstant()->GetValue());
1168 }
1169 }
1170
1171
CoherentConstantAndType(Location constant,Primitive::Type type)1172 static bool CoherentConstantAndType(Location constant, Primitive::Type type) {
1173 DCHECK(constant.IsConstant());
1174 HConstant* cst = constant.GetConstant();
1175 return (cst->IsIntConstant() && type == Primitive::kPrimInt) ||
1176 // Null is mapped to a core W register, which we associate with kPrimInt.
1177 (cst->IsNullConstant() && type == Primitive::kPrimInt) ||
1178 (cst->IsLongConstant() && type == Primitive::kPrimLong) ||
1179 (cst->IsFloatConstant() && type == Primitive::kPrimFloat) ||
1180 (cst->IsDoubleConstant() && type == Primitive::kPrimDouble);
1181 }
1182
MoveLocation(Location destination,Location source,Primitive::Type dst_type)1183 void CodeGeneratorARM64::MoveLocation(Location destination,
1184 Location source,
1185 Primitive::Type dst_type) {
1186 if (source.Equals(destination)) {
1187 return;
1188 }
1189
1190 // A valid move can always be inferred from the destination and source
1191 // locations. When moving from and to a register, the argument type can be
1192 // used to generate 32bit instead of 64bit moves. In debug mode we also
1193 // checks the coherency of the locations and the type.
1194 bool unspecified_type = (dst_type == Primitive::kPrimVoid);
1195
1196 if (destination.IsRegister() || destination.IsFpuRegister()) {
1197 if (unspecified_type) {
1198 HConstant* src_cst = source.IsConstant() ? source.GetConstant() : nullptr;
1199 if (source.IsStackSlot() ||
1200 (src_cst != nullptr && (src_cst->IsIntConstant()
1201 || src_cst->IsFloatConstant()
1202 || src_cst->IsNullConstant()))) {
1203 // For stack slots and 32bit constants, a 64bit type is appropriate.
1204 dst_type = destination.IsRegister() ? Primitive::kPrimInt : Primitive::kPrimFloat;
1205 } else {
1206 // If the source is a double stack slot or a 64bit constant, a 64bit
1207 // type is appropriate. Else the source is a register, and since the
1208 // type has not been specified, we chose a 64bit type to force a 64bit
1209 // move.
1210 dst_type = destination.IsRegister() ? Primitive::kPrimLong : Primitive::kPrimDouble;
1211 }
1212 }
1213 DCHECK((destination.IsFpuRegister() && Primitive::IsFloatingPointType(dst_type)) ||
1214 (destination.IsRegister() && !Primitive::IsFloatingPointType(dst_type)));
1215 CPURegister dst = CPURegisterFrom(destination, dst_type);
1216 if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
1217 DCHECK(dst.Is64Bits() == source.IsDoubleStackSlot());
1218 __ Ldr(dst, StackOperandFrom(source));
1219 } else if (source.IsConstant()) {
1220 DCHECK(CoherentConstantAndType(source, dst_type));
1221 MoveConstant(dst, source.GetConstant());
1222 } else if (source.IsRegister()) {
1223 if (destination.IsRegister()) {
1224 __ Mov(Register(dst), RegisterFrom(source, dst_type));
1225 } else {
1226 DCHECK(destination.IsFpuRegister());
1227 Primitive::Type source_type = Primitive::Is64BitType(dst_type)
1228 ? Primitive::kPrimLong
1229 : Primitive::kPrimInt;
1230 __ Fmov(FPRegisterFrom(destination, dst_type), RegisterFrom(source, source_type));
1231 }
1232 } else {
1233 DCHECK(source.IsFpuRegister());
1234 if (destination.IsRegister()) {
1235 Primitive::Type source_type = Primitive::Is64BitType(dst_type)
1236 ? Primitive::kPrimDouble
1237 : Primitive::kPrimFloat;
1238 __ Fmov(RegisterFrom(destination, dst_type), FPRegisterFrom(source, source_type));
1239 } else {
1240 DCHECK(destination.IsFpuRegister());
1241 __ Fmov(FPRegister(dst), FPRegisterFrom(source, dst_type));
1242 }
1243 }
1244 } else { // The destination is not a register. It must be a stack slot.
1245 DCHECK(destination.IsStackSlot() || destination.IsDoubleStackSlot());
1246 if (source.IsRegister() || source.IsFpuRegister()) {
1247 if (unspecified_type) {
1248 if (source.IsRegister()) {
1249 dst_type = destination.IsStackSlot() ? Primitive::kPrimInt : Primitive::kPrimLong;
1250 } else {
1251 dst_type = destination.IsStackSlot() ? Primitive::kPrimFloat : Primitive::kPrimDouble;
1252 }
1253 }
1254 DCHECK((destination.IsDoubleStackSlot() == Primitive::Is64BitType(dst_type)) &&
1255 (source.IsFpuRegister() == Primitive::IsFloatingPointType(dst_type)));
1256 __ Str(CPURegisterFrom(source, dst_type), StackOperandFrom(destination));
1257 } else if (source.IsConstant()) {
1258 DCHECK(unspecified_type || CoherentConstantAndType(source, dst_type))
1259 << source << " " << dst_type;
1260 UseScratchRegisterScope temps(GetVIXLAssembler());
1261 HConstant* src_cst = source.GetConstant();
1262 CPURegister temp;
1263 if (src_cst->IsIntConstant() || src_cst->IsNullConstant()) {
1264 temp = temps.AcquireW();
1265 } else if (src_cst->IsLongConstant()) {
1266 temp = temps.AcquireX();
1267 } else if (src_cst->IsFloatConstant()) {
1268 temp = temps.AcquireS();
1269 } else {
1270 DCHECK(src_cst->IsDoubleConstant());
1271 temp = temps.AcquireD();
1272 }
1273 MoveConstant(temp, src_cst);
1274 __ Str(temp, StackOperandFrom(destination));
1275 } else {
1276 DCHECK(source.IsStackSlot() || source.IsDoubleStackSlot());
1277 DCHECK(source.IsDoubleStackSlot() == destination.IsDoubleStackSlot());
1278 UseScratchRegisterScope temps(GetVIXLAssembler());
1279 // There is generally less pressure on FP registers.
1280 FPRegister temp = destination.IsDoubleStackSlot() ? temps.AcquireD() : temps.AcquireS();
1281 __ Ldr(temp, StackOperandFrom(source));
1282 __ Str(temp, StackOperandFrom(destination));
1283 }
1284 }
1285 }
1286
Load(Primitive::Type type,CPURegister dst,const MemOperand & src)1287 void CodeGeneratorARM64::Load(Primitive::Type type,
1288 CPURegister dst,
1289 const MemOperand& src) {
1290 switch (type) {
1291 case Primitive::kPrimBoolean:
1292 __ Ldrb(Register(dst), src);
1293 break;
1294 case Primitive::kPrimByte:
1295 __ Ldrsb(Register(dst), src);
1296 break;
1297 case Primitive::kPrimShort:
1298 __ Ldrsh(Register(dst), src);
1299 break;
1300 case Primitive::kPrimChar:
1301 __ Ldrh(Register(dst), src);
1302 break;
1303 case Primitive::kPrimInt:
1304 case Primitive::kPrimNot:
1305 case Primitive::kPrimLong:
1306 case Primitive::kPrimFloat:
1307 case Primitive::kPrimDouble:
1308 DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
1309 __ Ldr(dst, src);
1310 break;
1311 case Primitive::kPrimVoid:
1312 LOG(FATAL) << "Unreachable type " << type;
1313 }
1314 }
1315
LoadAcquire(HInstruction * instruction,CPURegister dst,const MemOperand & src,bool needs_null_check)1316 void CodeGeneratorARM64::LoadAcquire(HInstruction* instruction,
1317 CPURegister dst,
1318 const MemOperand& src,
1319 bool needs_null_check) {
1320 MacroAssembler* masm = GetVIXLAssembler();
1321 BlockPoolsScope block_pools(masm);
1322 UseScratchRegisterScope temps(masm);
1323 Register temp_base = temps.AcquireX();
1324 Primitive::Type type = instruction->GetType();
1325
1326 DCHECK(!src.IsPreIndex());
1327 DCHECK(!src.IsPostIndex());
1328
1329 // TODO(vixl): Let the MacroAssembler handle MemOperand.
1330 __ Add(temp_base, src.base(), OperandFromMemOperand(src));
1331 MemOperand base = MemOperand(temp_base);
1332 switch (type) {
1333 case Primitive::kPrimBoolean:
1334 __ Ldarb(Register(dst), base);
1335 if (needs_null_check) {
1336 MaybeRecordImplicitNullCheck(instruction);
1337 }
1338 break;
1339 case Primitive::kPrimByte:
1340 __ Ldarb(Register(dst), base);
1341 if (needs_null_check) {
1342 MaybeRecordImplicitNullCheck(instruction);
1343 }
1344 __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
1345 break;
1346 case Primitive::kPrimChar:
1347 __ Ldarh(Register(dst), base);
1348 if (needs_null_check) {
1349 MaybeRecordImplicitNullCheck(instruction);
1350 }
1351 break;
1352 case Primitive::kPrimShort:
1353 __ Ldarh(Register(dst), base);
1354 if (needs_null_check) {
1355 MaybeRecordImplicitNullCheck(instruction);
1356 }
1357 __ Sbfx(Register(dst), Register(dst), 0, Primitive::ComponentSize(type) * kBitsPerByte);
1358 break;
1359 case Primitive::kPrimInt:
1360 case Primitive::kPrimNot:
1361 case Primitive::kPrimLong:
1362 DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
1363 __ Ldar(Register(dst), base);
1364 if (needs_null_check) {
1365 MaybeRecordImplicitNullCheck(instruction);
1366 }
1367 break;
1368 case Primitive::kPrimFloat:
1369 case Primitive::kPrimDouble: {
1370 DCHECK(dst.IsFPRegister());
1371 DCHECK_EQ(dst.Is64Bits(), Primitive::Is64BitType(type));
1372
1373 Register temp = dst.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
1374 __ Ldar(temp, base);
1375 if (needs_null_check) {
1376 MaybeRecordImplicitNullCheck(instruction);
1377 }
1378 __ Fmov(FPRegister(dst), temp);
1379 break;
1380 }
1381 case Primitive::kPrimVoid:
1382 LOG(FATAL) << "Unreachable type " << type;
1383 }
1384 }
1385
Store(Primitive::Type type,CPURegister src,const MemOperand & dst)1386 void CodeGeneratorARM64::Store(Primitive::Type type,
1387 CPURegister src,
1388 const MemOperand& dst) {
1389 switch (type) {
1390 case Primitive::kPrimBoolean:
1391 case Primitive::kPrimByte:
1392 __ Strb(Register(src), dst);
1393 break;
1394 case Primitive::kPrimChar:
1395 case Primitive::kPrimShort:
1396 __ Strh(Register(src), dst);
1397 break;
1398 case Primitive::kPrimInt:
1399 case Primitive::kPrimNot:
1400 case Primitive::kPrimLong:
1401 case Primitive::kPrimFloat:
1402 case Primitive::kPrimDouble:
1403 DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1404 __ Str(src, dst);
1405 break;
1406 case Primitive::kPrimVoid:
1407 LOG(FATAL) << "Unreachable type " << type;
1408 }
1409 }
1410
StoreRelease(Primitive::Type type,CPURegister src,const MemOperand & dst)1411 void CodeGeneratorARM64::StoreRelease(Primitive::Type type,
1412 CPURegister src,
1413 const MemOperand& dst) {
1414 UseScratchRegisterScope temps(GetVIXLAssembler());
1415 Register temp_base = temps.AcquireX();
1416
1417 DCHECK(!dst.IsPreIndex());
1418 DCHECK(!dst.IsPostIndex());
1419
1420 // TODO(vixl): Let the MacroAssembler handle this.
1421 Operand op = OperandFromMemOperand(dst);
1422 __ Add(temp_base, dst.base(), op);
1423 MemOperand base = MemOperand(temp_base);
1424 switch (type) {
1425 case Primitive::kPrimBoolean:
1426 case Primitive::kPrimByte:
1427 __ Stlrb(Register(src), base);
1428 break;
1429 case Primitive::kPrimChar:
1430 case Primitive::kPrimShort:
1431 __ Stlrh(Register(src), base);
1432 break;
1433 case Primitive::kPrimInt:
1434 case Primitive::kPrimNot:
1435 case Primitive::kPrimLong:
1436 DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1437 __ Stlr(Register(src), base);
1438 break;
1439 case Primitive::kPrimFloat:
1440 case Primitive::kPrimDouble: {
1441 DCHECK(src.IsFPRegister());
1442 DCHECK_EQ(src.Is64Bits(), Primitive::Is64BitType(type));
1443
1444 Register temp = src.Is64Bits() ? temps.AcquireX() : temps.AcquireW();
1445 __ Fmov(temp, FPRegister(src));
1446 __ Stlr(temp, base);
1447 break;
1448 }
1449 case Primitive::kPrimVoid:
1450 LOG(FATAL) << "Unreachable type " << type;
1451 }
1452 }
1453
InvokeRuntime(QuickEntrypointEnum entrypoint,HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)1454 void CodeGeneratorARM64::InvokeRuntime(QuickEntrypointEnum entrypoint,
1455 HInstruction* instruction,
1456 uint32_t dex_pc,
1457 SlowPathCode* slow_path) {
1458 InvokeRuntime(GetThreadOffset<kArm64WordSize>(entrypoint).Int32Value(),
1459 instruction,
1460 dex_pc,
1461 slow_path);
1462 }
1463
InvokeRuntime(int32_t entry_point_offset,HInstruction * instruction,uint32_t dex_pc,SlowPathCode * slow_path)1464 void CodeGeneratorARM64::InvokeRuntime(int32_t entry_point_offset,
1465 HInstruction* instruction,
1466 uint32_t dex_pc,
1467 SlowPathCode* slow_path) {
1468 ValidateInvokeRuntime(instruction, slow_path);
1469 BlockPoolsScope block_pools(GetVIXLAssembler());
1470 __ Ldr(lr, MemOperand(tr, entry_point_offset));
1471 __ Blr(lr);
1472 RecordPcInfo(instruction, dex_pc, slow_path);
1473 }
1474
GenerateClassInitializationCheck(SlowPathCodeARM64 * slow_path,vixl::Register class_reg)1475 void InstructionCodeGeneratorARM64::GenerateClassInitializationCheck(SlowPathCodeARM64* slow_path,
1476 vixl::Register class_reg) {
1477 UseScratchRegisterScope temps(GetVIXLAssembler());
1478 Register temp = temps.AcquireW();
1479 size_t status_offset = mirror::Class::StatusOffset().SizeValue();
1480
1481 // Even if the initialized flag is set, we need to ensure consistent memory ordering.
1482 // TODO(vixl): Let the MacroAssembler handle MemOperand.
1483 __ Add(temp, class_reg, status_offset);
1484 __ Ldar(temp, HeapOperand(temp));
1485 __ Cmp(temp, mirror::Class::kStatusInitialized);
1486 __ B(lt, slow_path->GetEntryLabel());
1487 __ Bind(slow_path->GetExitLabel());
1488 }
1489
GenerateMemoryBarrier(MemBarrierKind kind)1490 void CodeGeneratorARM64::GenerateMemoryBarrier(MemBarrierKind kind) {
1491 BarrierType type = BarrierAll;
1492
1493 switch (kind) {
1494 case MemBarrierKind::kAnyAny:
1495 case MemBarrierKind::kAnyStore: {
1496 type = BarrierAll;
1497 break;
1498 }
1499 case MemBarrierKind::kLoadAny: {
1500 type = BarrierReads;
1501 break;
1502 }
1503 case MemBarrierKind::kStoreStore: {
1504 type = BarrierWrites;
1505 break;
1506 }
1507 default:
1508 LOG(FATAL) << "Unexpected memory barrier " << kind;
1509 }
1510 __ Dmb(InnerShareable, type);
1511 }
1512
GenerateSuspendCheck(HSuspendCheck * instruction,HBasicBlock * successor)1513 void InstructionCodeGeneratorARM64::GenerateSuspendCheck(HSuspendCheck* instruction,
1514 HBasicBlock* successor) {
1515 SuspendCheckSlowPathARM64* slow_path =
1516 down_cast<SuspendCheckSlowPathARM64*>(instruction->GetSlowPath());
1517 if (slow_path == nullptr) {
1518 slow_path = new (GetGraph()->GetArena()) SuspendCheckSlowPathARM64(instruction, successor);
1519 instruction->SetSlowPath(slow_path);
1520 codegen_->AddSlowPath(slow_path);
1521 if (successor != nullptr) {
1522 DCHECK(successor->IsLoopHeader());
1523 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(instruction);
1524 }
1525 } else {
1526 DCHECK_EQ(slow_path->GetSuccessor(), successor);
1527 }
1528
1529 UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
1530 Register temp = temps.AcquireW();
1531
1532 __ Ldrh(temp, MemOperand(tr, Thread::ThreadFlagsOffset<kArm64WordSize>().SizeValue()));
1533 if (successor == nullptr) {
1534 __ Cbnz(temp, slow_path->GetEntryLabel());
1535 __ Bind(slow_path->GetReturnLabel());
1536 } else {
1537 __ Cbz(temp, codegen_->GetLabelOf(successor));
1538 __ B(slow_path->GetEntryLabel());
1539 // slow_path will return to GetLabelOf(successor).
1540 }
1541 }
1542
InstructionCodeGeneratorARM64(HGraph * graph,CodeGeneratorARM64 * codegen)1543 InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
1544 CodeGeneratorARM64* codegen)
1545 : InstructionCodeGenerator(graph, codegen),
1546 assembler_(codegen->GetAssembler()),
1547 codegen_(codegen) {}
1548
1549 #define FOR_EACH_UNIMPLEMENTED_INSTRUCTION(M) \
1550 /* No unimplemented IR. */
1551
1552 #define UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name) name##UnimplementedInstructionBreakCode
1553
1554 enum UnimplementedInstructionBreakCode {
1555 // Using a base helps identify when we hit such breakpoints.
1556 UnimplementedInstructionBreakCodeBaseCode = 0x900,
1557 #define ENUM_UNIMPLEMENTED_INSTRUCTION(name) UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name),
1558 FOR_EACH_UNIMPLEMENTED_INSTRUCTION(ENUM_UNIMPLEMENTED_INSTRUCTION)
1559 #undef ENUM_UNIMPLEMENTED_INSTRUCTION
1560 };
1561
1562 #define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
1563 void InstructionCodeGeneratorARM64::Visit##name(H##name* instr ATTRIBUTE_UNUSED) { \
1564 __ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
1565 } \
1566 void LocationsBuilderARM64::Visit##name(H##name* instr) { \
1567 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr); \
1568 locations->SetOut(Location::Any()); \
1569 }
FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)1570 FOR_EACH_UNIMPLEMENTED_INSTRUCTION(DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS)
1571 #undef DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS
1572
1573 #undef UNIMPLEMENTED_INSTRUCTION_BREAK_CODE
1574 #undef FOR_EACH_UNIMPLEMENTED_INSTRUCTION
1575
1576 void LocationsBuilderARM64::HandleBinaryOp(HBinaryOperation* instr) {
1577 DCHECK_EQ(instr->InputCount(), 2U);
1578 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1579 Primitive::Type type = instr->GetResultType();
1580 switch (type) {
1581 case Primitive::kPrimInt:
1582 case Primitive::kPrimLong:
1583 locations->SetInAt(0, Location::RequiresRegister());
1584 locations->SetInAt(1, ARM64EncodableConstantOrRegister(instr->InputAt(1), instr));
1585 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1586 break;
1587
1588 case Primitive::kPrimFloat:
1589 case Primitive::kPrimDouble:
1590 locations->SetInAt(0, Location::RequiresFpuRegister());
1591 locations->SetInAt(1, Location::RequiresFpuRegister());
1592 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
1593 break;
1594
1595 default:
1596 LOG(FATAL) << "Unexpected " << instr->DebugName() << " type " << type;
1597 }
1598 }
1599
HandleFieldGet(HInstruction * instruction)1600 void LocationsBuilderARM64::HandleFieldGet(HInstruction* instruction) {
1601 DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
1602
1603 bool object_field_get_with_read_barrier =
1604 kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot);
1605 LocationSummary* locations =
1606 new (GetGraph()->GetArena()) LocationSummary(instruction,
1607 object_field_get_with_read_barrier ?
1608 LocationSummary::kCallOnSlowPath :
1609 LocationSummary::kNoCall);
1610 locations->SetInAt(0, Location::RequiresRegister());
1611 if (Primitive::IsFloatingPointType(instruction->GetType())) {
1612 locations->SetOut(Location::RequiresFpuRegister());
1613 } else {
1614 // The output overlaps for an object field get when read barriers
1615 // are enabled: we do not want the load to overwrite the object's
1616 // location, as we need it to emit the read barrier.
1617 locations->SetOut(
1618 Location::RequiresRegister(),
1619 object_field_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
1620 }
1621 }
1622
HandleFieldGet(HInstruction * instruction,const FieldInfo & field_info)1623 void InstructionCodeGeneratorARM64::HandleFieldGet(HInstruction* instruction,
1624 const FieldInfo& field_info) {
1625 DCHECK(instruction->IsInstanceFieldGet() || instruction->IsStaticFieldGet());
1626 LocationSummary* locations = instruction->GetLocations();
1627 Location base_loc = locations->InAt(0);
1628 Location out = locations->Out();
1629 uint32_t offset = field_info.GetFieldOffset().Uint32Value();
1630 Primitive::Type field_type = field_info.GetFieldType();
1631 BlockPoolsScope block_pools(GetVIXLAssembler());
1632 MemOperand field = HeapOperand(InputRegisterAt(instruction, 0), field_info.GetFieldOffset());
1633
1634 if (field_type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
1635 // Object FieldGet with Baker's read barrier case.
1636 MacroAssembler* masm = GetVIXLAssembler();
1637 UseScratchRegisterScope temps(masm);
1638 // /* HeapReference<Object> */ out = *(base + offset)
1639 Register base = RegisterFrom(base_loc, Primitive::kPrimNot);
1640 Register temp = temps.AcquireW();
1641 // Note that potential implicit null checks are handled in this
1642 // CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier call.
1643 codegen_->GenerateFieldLoadWithBakerReadBarrier(
1644 instruction,
1645 out,
1646 base,
1647 offset,
1648 temp,
1649 /* needs_null_check */ true,
1650 field_info.IsVolatile());
1651 } else {
1652 // General case.
1653 if (field_info.IsVolatile()) {
1654 // Note that a potential implicit null check is handled in this
1655 // CodeGeneratorARM64::LoadAcquire call.
1656 // NB: LoadAcquire will record the pc info if needed.
1657 codegen_->LoadAcquire(
1658 instruction, OutputCPURegister(instruction), field, /* needs_null_check */ true);
1659 } else {
1660 codegen_->Load(field_type, OutputCPURegister(instruction), field);
1661 codegen_->MaybeRecordImplicitNullCheck(instruction);
1662 }
1663 if (field_type == Primitive::kPrimNot) {
1664 // If read barriers are enabled, emit read barriers other than
1665 // Baker's using a slow path (and also unpoison the loaded
1666 // reference, if heap poisoning is enabled).
1667 codegen_->MaybeGenerateReadBarrierSlow(instruction, out, out, base_loc, offset);
1668 }
1669 }
1670 }
1671
HandleFieldSet(HInstruction * instruction)1672 void LocationsBuilderARM64::HandleFieldSet(HInstruction* instruction) {
1673 LocationSummary* locations =
1674 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1675 locations->SetInAt(0, Location::RequiresRegister());
1676 if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) {
1677 locations->SetInAt(1, Location::RequiresFpuRegister());
1678 } else {
1679 locations->SetInAt(1, Location::RequiresRegister());
1680 }
1681 }
1682
HandleFieldSet(HInstruction * instruction,const FieldInfo & field_info,bool value_can_be_null)1683 void InstructionCodeGeneratorARM64::HandleFieldSet(HInstruction* instruction,
1684 const FieldInfo& field_info,
1685 bool value_can_be_null) {
1686 DCHECK(instruction->IsInstanceFieldSet() || instruction->IsStaticFieldSet());
1687 BlockPoolsScope block_pools(GetVIXLAssembler());
1688
1689 Register obj = InputRegisterAt(instruction, 0);
1690 CPURegister value = InputCPURegisterAt(instruction, 1);
1691 CPURegister source = value;
1692 Offset offset = field_info.GetFieldOffset();
1693 Primitive::Type field_type = field_info.GetFieldType();
1694
1695 {
1696 // We use a block to end the scratch scope before the write barrier, thus
1697 // freeing the temporary registers so they can be used in `MarkGCCard`.
1698 UseScratchRegisterScope temps(GetVIXLAssembler());
1699
1700 if (kPoisonHeapReferences && field_type == Primitive::kPrimNot) {
1701 DCHECK(value.IsW());
1702 Register temp = temps.AcquireW();
1703 __ Mov(temp, value.W());
1704 GetAssembler()->PoisonHeapReference(temp.W());
1705 source = temp;
1706 }
1707
1708 if (field_info.IsVolatile()) {
1709 codegen_->StoreRelease(field_type, source, HeapOperand(obj, offset));
1710 codegen_->MaybeRecordImplicitNullCheck(instruction);
1711 } else {
1712 codegen_->Store(field_type, source, HeapOperand(obj, offset));
1713 codegen_->MaybeRecordImplicitNullCheck(instruction);
1714 }
1715 }
1716
1717 if (CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1))) {
1718 codegen_->MarkGCCard(obj, Register(value), value_can_be_null);
1719 }
1720 }
1721
HandleBinaryOp(HBinaryOperation * instr)1722 void InstructionCodeGeneratorARM64::HandleBinaryOp(HBinaryOperation* instr) {
1723 Primitive::Type type = instr->GetType();
1724
1725 switch (type) {
1726 case Primitive::kPrimInt:
1727 case Primitive::kPrimLong: {
1728 Register dst = OutputRegister(instr);
1729 Register lhs = InputRegisterAt(instr, 0);
1730 Operand rhs = InputOperandAt(instr, 1);
1731 if (instr->IsAdd()) {
1732 __ Add(dst, lhs, rhs);
1733 } else if (instr->IsAnd()) {
1734 __ And(dst, lhs, rhs);
1735 } else if (instr->IsOr()) {
1736 __ Orr(dst, lhs, rhs);
1737 } else if (instr->IsSub()) {
1738 __ Sub(dst, lhs, rhs);
1739 } else if (instr->IsRor()) {
1740 if (rhs.IsImmediate()) {
1741 uint32_t shift = rhs.immediate() & (lhs.SizeInBits() - 1);
1742 __ Ror(dst, lhs, shift);
1743 } else {
1744 // Ensure shift distance is in the same size register as the result. If
1745 // we are rotating a long and the shift comes in a w register originally,
1746 // we don't need to sxtw for use as an x since the shift distances are
1747 // all & reg_bits - 1.
1748 __ Ror(dst, lhs, RegisterFrom(instr->GetLocations()->InAt(1), type));
1749 }
1750 } else {
1751 DCHECK(instr->IsXor());
1752 __ Eor(dst, lhs, rhs);
1753 }
1754 break;
1755 }
1756 case Primitive::kPrimFloat:
1757 case Primitive::kPrimDouble: {
1758 FPRegister dst = OutputFPRegister(instr);
1759 FPRegister lhs = InputFPRegisterAt(instr, 0);
1760 FPRegister rhs = InputFPRegisterAt(instr, 1);
1761 if (instr->IsAdd()) {
1762 __ Fadd(dst, lhs, rhs);
1763 } else if (instr->IsSub()) {
1764 __ Fsub(dst, lhs, rhs);
1765 } else {
1766 LOG(FATAL) << "Unexpected floating-point binary operation";
1767 }
1768 break;
1769 }
1770 default:
1771 LOG(FATAL) << "Unexpected binary operation type " << type;
1772 }
1773 }
1774
HandleShift(HBinaryOperation * instr)1775 void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) {
1776 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1777
1778 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1779 Primitive::Type type = instr->GetResultType();
1780 switch (type) {
1781 case Primitive::kPrimInt:
1782 case Primitive::kPrimLong: {
1783 locations->SetInAt(0, Location::RequiresRegister());
1784 locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1)));
1785 locations->SetOut(Location::RequiresRegister());
1786 break;
1787 }
1788 default:
1789 LOG(FATAL) << "Unexpected shift type " << type;
1790 }
1791 }
1792
HandleShift(HBinaryOperation * instr)1793 void InstructionCodeGeneratorARM64::HandleShift(HBinaryOperation* instr) {
1794 DCHECK(instr->IsShl() || instr->IsShr() || instr->IsUShr());
1795
1796 Primitive::Type type = instr->GetType();
1797 switch (type) {
1798 case Primitive::kPrimInt:
1799 case Primitive::kPrimLong: {
1800 Register dst = OutputRegister(instr);
1801 Register lhs = InputRegisterAt(instr, 0);
1802 Operand rhs = InputOperandAt(instr, 1);
1803 if (rhs.IsImmediate()) {
1804 uint32_t shift_value = rhs.immediate() &
1805 (type == Primitive::kPrimInt ? kMaxIntShiftDistance : kMaxLongShiftDistance);
1806 if (instr->IsShl()) {
1807 __ Lsl(dst, lhs, shift_value);
1808 } else if (instr->IsShr()) {
1809 __ Asr(dst, lhs, shift_value);
1810 } else {
1811 __ Lsr(dst, lhs, shift_value);
1812 }
1813 } else {
1814 Register rhs_reg = dst.IsX() ? rhs.reg().X() : rhs.reg().W();
1815
1816 if (instr->IsShl()) {
1817 __ Lsl(dst, lhs, rhs_reg);
1818 } else if (instr->IsShr()) {
1819 __ Asr(dst, lhs, rhs_reg);
1820 } else {
1821 __ Lsr(dst, lhs, rhs_reg);
1822 }
1823 }
1824 break;
1825 }
1826 default:
1827 LOG(FATAL) << "Unexpected shift operation type " << type;
1828 }
1829 }
1830
VisitAdd(HAdd * instruction)1831 void LocationsBuilderARM64::VisitAdd(HAdd* instruction) {
1832 HandleBinaryOp(instruction);
1833 }
1834
VisitAdd(HAdd * instruction)1835 void InstructionCodeGeneratorARM64::VisitAdd(HAdd* instruction) {
1836 HandleBinaryOp(instruction);
1837 }
1838
VisitAnd(HAnd * instruction)1839 void LocationsBuilderARM64::VisitAnd(HAnd* instruction) {
1840 HandleBinaryOp(instruction);
1841 }
1842
VisitAnd(HAnd * instruction)1843 void InstructionCodeGeneratorARM64::VisitAnd(HAnd* instruction) {
1844 HandleBinaryOp(instruction);
1845 }
1846
VisitBitwiseNegatedRight(HBitwiseNegatedRight * instr)1847 void LocationsBuilderARM64::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instr) {
1848 DCHECK(Primitive::IsIntegralType(instr->GetType())) << instr->GetType();
1849 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instr);
1850 locations->SetInAt(0, Location::RequiresRegister());
1851 // There is no immediate variant of negated bitwise instructions in AArch64.
1852 locations->SetInAt(1, Location::RequiresRegister());
1853 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1854 }
1855
VisitBitwiseNegatedRight(HBitwiseNegatedRight * instr)1856 void InstructionCodeGeneratorARM64::VisitBitwiseNegatedRight(HBitwiseNegatedRight* instr) {
1857 Register dst = OutputRegister(instr);
1858 Register lhs = InputRegisterAt(instr, 0);
1859 Register rhs = InputRegisterAt(instr, 1);
1860
1861 switch (instr->GetOpKind()) {
1862 case HInstruction::kAnd:
1863 __ Bic(dst, lhs, rhs);
1864 break;
1865 case HInstruction::kOr:
1866 __ Orn(dst, lhs, rhs);
1867 break;
1868 case HInstruction::kXor:
1869 __ Eon(dst, lhs, rhs);
1870 break;
1871 default:
1872 LOG(FATAL) << "Unreachable";
1873 }
1874 }
1875
VisitArm64DataProcWithShifterOp(HArm64DataProcWithShifterOp * instruction)1876 void LocationsBuilderARM64::VisitArm64DataProcWithShifterOp(
1877 HArm64DataProcWithShifterOp* instruction) {
1878 DCHECK(instruction->GetType() == Primitive::kPrimInt ||
1879 instruction->GetType() == Primitive::kPrimLong);
1880 LocationSummary* locations =
1881 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1882 if (instruction->GetInstrKind() == HInstruction::kNeg) {
1883 locations->SetInAt(0, Location::ConstantLocation(instruction->InputAt(0)->AsConstant()));
1884 } else {
1885 locations->SetInAt(0, Location::RequiresRegister());
1886 }
1887 locations->SetInAt(1, Location::RequiresRegister());
1888 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1889 }
1890
VisitArm64DataProcWithShifterOp(HArm64DataProcWithShifterOp * instruction)1891 void InstructionCodeGeneratorARM64::VisitArm64DataProcWithShifterOp(
1892 HArm64DataProcWithShifterOp* instruction) {
1893 Primitive::Type type = instruction->GetType();
1894 HInstruction::InstructionKind kind = instruction->GetInstrKind();
1895 DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
1896 Register out = OutputRegister(instruction);
1897 Register left;
1898 if (kind != HInstruction::kNeg) {
1899 left = InputRegisterAt(instruction, 0);
1900 }
1901 // If this `HArm64DataProcWithShifterOp` was created by merging a type conversion as the
1902 // shifter operand operation, the IR generating `right_reg` (input to the type
1903 // conversion) can have a different type from the current instruction's type,
1904 // so we manually indicate the type.
1905 Register right_reg = RegisterFrom(instruction->GetLocations()->InAt(1), type);
1906 int64_t shift_amount = instruction->GetShiftAmount() &
1907 (type == Primitive::kPrimInt ? kMaxIntShiftDistance : kMaxLongShiftDistance);
1908
1909 Operand right_operand(0);
1910
1911 HArm64DataProcWithShifterOp::OpKind op_kind = instruction->GetOpKind();
1912 if (HArm64DataProcWithShifterOp::IsExtensionOp(op_kind)) {
1913 right_operand = Operand(right_reg, helpers::ExtendFromOpKind(op_kind));
1914 } else {
1915 right_operand = Operand(right_reg, helpers::ShiftFromOpKind(op_kind), shift_amount);
1916 }
1917
1918 // Logical binary operations do not support extension operations in the
1919 // operand. Note that VIXL would still manage if it was passed by generating
1920 // the extension as a separate instruction.
1921 // `HNeg` also does not support extension. See comments in `ShifterOperandSupportsExtension()`.
1922 DCHECK(!right_operand.IsExtendedRegister() ||
1923 (kind != HInstruction::kAnd && kind != HInstruction::kOr && kind != HInstruction::kXor &&
1924 kind != HInstruction::kNeg));
1925 switch (kind) {
1926 case HInstruction::kAdd:
1927 __ Add(out, left, right_operand);
1928 break;
1929 case HInstruction::kAnd:
1930 __ And(out, left, right_operand);
1931 break;
1932 case HInstruction::kNeg:
1933 DCHECK(instruction->InputAt(0)->AsConstant()->IsArithmeticZero());
1934 __ Neg(out, right_operand);
1935 break;
1936 case HInstruction::kOr:
1937 __ Orr(out, left, right_operand);
1938 break;
1939 case HInstruction::kSub:
1940 __ Sub(out, left, right_operand);
1941 break;
1942 case HInstruction::kXor:
1943 __ Eor(out, left, right_operand);
1944 break;
1945 default:
1946 LOG(FATAL) << "Unexpected operation kind: " << kind;
1947 UNREACHABLE();
1948 }
1949 }
1950
VisitArm64IntermediateAddress(HArm64IntermediateAddress * instruction)1951 void LocationsBuilderARM64::VisitArm64IntermediateAddress(HArm64IntermediateAddress* instruction) {
1952 // The read barrier instrumentation does not support the
1953 // HArm64IntermediateAddress instruction yet.
1954 DCHECK(!kEmitCompilerReadBarrier);
1955 LocationSummary* locations =
1956 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
1957 locations->SetInAt(0, Location::RequiresRegister());
1958 locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->GetOffset(), instruction));
1959 locations->SetOut(Location::RequiresRegister());
1960 }
1961
VisitArm64IntermediateAddress(HArm64IntermediateAddress * instruction)1962 void InstructionCodeGeneratorARM64::VisitArm64IntermediateAddress(
1963 HArm64IntermediateAddress* instruction) {
1964 // The read barrier instrumentation does not support the
1965 // HArm64IntermediateAddress instruction yet.
1966 DCHECK(!kEmitCompilerReadBarrier);
1967 __ Add(OutputRegister(instruction),
1968 InputRegisterAt(instruction, 0),
1969 Operand(InputOperandAt(instruction, 1)));
1970 }
1971
VisitMultiplyAccumulate(HMultiplyAccumulate * instr)1972 void LocationsBuilderARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
1973 LocationSummary* locations =
1974 new (GetGraph()->GetArena()) LocationSummary(instr, LocationSummary::kNoCall);
1975 HInstruction* accumulator = instr->InputAt(HMultiplyAccumulate::kInputAccumulatorIndex);
1976 if (instr->GetOpKind() == HInstruction::kSub &&
1977 accumulator->IsConstant() &&
1978 accumulator->AsConstant()->IsArithmeticZero()) {
1979 // Don't allocate register for Mneg instruction.
1980 } else {
1981 locations->SetInAt(HMultiplyAccumulate::kInputAccumulatorIndex,
1982 Location::RequiresRegister());
1983 }
1984 locations->SetInAt(HMultiplyAccumulate::kInputMulLeftIndex, Location::RequiresRegister());
1985 locations->SetInAt(HMultiplyAccumulate::kInputMulRightIndex, Location::RequiresRegister());
1986 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
1987 }
1988
VisitMultiplyAccumulate(HMultiplyAccumulate * instr)1989 void InstructionCodeGeneratorARM64::VisitMultiplyAccumulate(HMultiplyAccumulate* instr) {
1990 Register res = OutputRegister(instr);
1991 Register mul_left = InputRegisterAt(instr, HMultiplyAccumulate::kInputMulLeftIndex);
1992 Register mul_right = InputRegisterAt(instr, HMultiplyAccumulate::kInputMulRightIndex);
1993
1994 // Avoid emitting code that could trigger Cortex A53's erratum 835769.
1995 // This fixup should be carried out for all multiply-accumulate instructions:
1996 // madd, msub, smaddl, smsubl, umaddl and umsubl.
1997 if (instr->GetType() == Primitive::kPrimLong &&
1998 codegen_->GetInstructionSetFeatures().NeedFixCortexA53_835769()) {
1999 MacroAssembler* masm = down_cast<CodeGeneratorARM64*>(codegen_)->GetVIXLAssembler();
2000 vixl::Instruction* prev = masm->GetCursorAddress<vixl::Instruction*>() - vixl::kInstructionSize;
2001 if (prev->IsLoadOrStore()) {
2002 // Make sure we emit only exactly one nop.
2003 vixl::CodeBufferCheckScope scope(masm,
2004 vixl::kInstructionSize,
2005 vixl::CodeBufferCheckScope::kCheck,
2006 vixl::CodeBufferCheckScope::kExactSize);
2007 __ nop();
2008 }
2009 }
2010
2011 if (instr->GetOpKind() == HInstruction::kAdd) {
2012 Register accumulator = InputRegisterAt(instr, HMultiplyAccumulate::kInputAccumulatorIndex);
2013 __ Madd(res, mul_left, mul_right, accumulator);
2014 } else {
2015 DCHECK(instr->GetOpKind() == HInstruction::kSub);
2016 HInstruction* accum_instr = instr->InputAt(HMultiplyAccumulate::kInputAccumulatorIndex);
2017 if (accum_instr->IsConstant() && accum_instr->AsConstant()->IsArithmeticZero()) {
2018 __ Mneg(res, mul_left, mul_right);
2019 } else {
2020 Register accumulator = InputRegisterAt(instr, HMultiplyAccumulate::kInputAccumulatorIndex);
2021 __ Msub(res, mul_left, mul_right, accumulator);
2022 }
2023 }
2024 }
2025
VisitArrayGet(HArrayGet * instruction)2026 void LocationsBuilderARM64::VisitArrayGet(HArrayGet* instruction) {
2027 bool object_array_get_with_read_barrier =
2028 kEmitCompilerReadBarrier && (instruction->GetType() == Primitive::kPrimNot);
2029 LocationSummary* locations =
2030 new (GetGraph()->GetArena()) LocationSummary(instruction,
2031 object_array_get_with_read_barrier ?
2032 LocationSummary::kCallOnSlowPath :
2033 LocationSummary::kNoCall);
2034 locations->SetInAt(0, Location::RequiresRegister());
2035 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
2036 if (Primitive::IsFloatingPointType(instruction->GetType())) {
2037 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2038 } else {
2039 // The output overlaps in the case of an object array get with
2040 // read barriers enabled: we do not want the move to overwrite the
2041 // array's location, as we need it to emit the read barrier.
2042 locations->SetOut(
2043 Location::RequiresRegister(),
2044 object_array_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap);
2045 }
2046 }
2047
VisitArrayGet(HArrayGet * instruction)2048 void InstructionCodeGeneratorARM64::VisitArrayGet(HArrayGet* instruction) {
2049 Primitive::Type type = instruction->GetType();
2050 Register obj = InputRegisterAt(instruction, 0);
2051 LocationSummary* locations = instruction->GetLocations();
2052 Location index = locations->InAt(1);
2053 uint32_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(type)).Uint32Value();
2054 Location out = locations->Out();
2055
2056 MacroAssembler* masm = GetVIXLAssembler();
2057 UseScratchRegisterScope temps(masm);
2058 // Block pools between `Load` and `MaybeRecordImplicitNullCheck`.
2059 BlockPoolsScope block_pools(masm);
2060
2061 if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) {
2062 // Object ArrayGet with Baker's read barrier case.
2063 Register temp = temps.AcquireW();
2064 // The read barrier instrumentation does not support the
2065 // HArm64IntermediateAddress instruction yet.
2066 DCHECK(!instruction->GetArray()->IsArm64IntermediateAddress());
2067 // Note that a potential implicit null check is handled in the
2068 // CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier call.
2069 codegen_->GenerateArrayLoadWithBakerReadBarrier(
2070 instruction, out, obj.W(), offset, index, temp, /* needs_null_check */ true);
2071 } else {
2072 // General case.
2073 MemOperand source = HeapOperand(obj);
2074 if (index.IsConstant()) {
2075 offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(type);
2076 source = HeapOperand(obj, offset);
2077 } else {
2078 Register temp = temps.AcquireSameSizeAs(obj);
2079 if (instruction->GetArray()->IsArm64IntermediateAddress()) {
2080 // The read barrier instrumentation does not support the
2081 // HArm64IntermediateAddress instruction yet.
2082 DCHECK(!kEmitCompilerReadBarrier);
2083 // We do not need to compute the intermediate address from the array: the
2084 // input instruction has done it already. See the comment in
2085 // `InstructionSimplifierArm64::TryExtractArrayAccessAddress()`.
2086 if (kIsDebugBuild) {
2087 HArm64IntermediateAddress* tmp = instruction->GetArray()->AsArm64IntermediateAddress();
2088 DCHECK_EQ(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64(), offset);
2089 }
2090 temp = obj;
2091 } else {
2092 __ Add(temp, obj, offset);
2093 }
2094 source = HeapOperand(temp, XRegisterFrom(index), LSL, Primitive::ComponentSizeShift(type));
2095 }
2096
2097 codegen_->Load(type, OutputCPURegister(instruction), source);
2098 codegen_->MaybeRecordImplicitNullCheck(instruction);
2099
2100 if (type == Primitive::kPrimNot) {
2101 static_assert(
2102 sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
2103 "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
2104 Location obj_loc = locations->InAt(0);
2105 if (index.IsConstant()) {
2106 codegen_->MaybeGenerateReadBarrierSlow(instruction, out, out, obj_loc, offset);
2107 } else {
2108 codegen_->MaybeGenerateReadBarrierSlow(instruction, out, out, obj_loc, offset, index);
2109 }
2110 }
2111 }
2112 }
2113
VisitArrayLength(HArrayLength * instruction)2114 void LocationsBuilderARM64::VisitArrayLength(HArrayLength* instruction) {
2115 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2116 locations->SetInAt(0, Location::RequiresRegister());
2117 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2118 }
2119
VisitArrayLength(HArrayLength * instruction)2120 void InstructionCodeGeneratorARM64::VisitArrayLength(HArrayLength* instruction) {
2121 BlockPoolsScope block_pools(GetVIXLAssembler());
2122 __ Ldr(OutputRegister(instruction),
2123 HeapOperand(InputRegisterAt(instruction, 0), mirror::Array::LengthOffset()));
2124 codegen_->MaybeRecordImplicitNullCheck(instruction);
2125 }
2126
VisitArraySet(HArraySet * instruction)2127 void LocationsBuilderARM64::VisitArraySet(HArraySet* instruction) {
2128 Primitive::Type value_type = instruction->GetComponentType();
2129
2130 bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
2131 bool object_array_set_with_read_barrier =
2132 kEmitCompilerReadBarrier && (value_type == Primitive::kPrimNot);
2133 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(
2134 instruction,
2135 (may_need_runtime_call_for_type_check || object_array_set_with_read_barrier) ?
2136 LocationSummary::kCallOnSlowPath :
2137 LocationSummary::kNoCall);
2138 locations->SetInAt(0, Location::RequiresRegister());
2139 locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1)));
2140 if (Primitive::IsFloatingPointType(value_type)) {
2141 locations->SetInAt(2, Location::RequiresFpuRegister());
2142 } else {
2143 locations->SetInAt(2, Location::RequiresRegister());
2144 }
2145 }
2146
VisitArraySet(HArraySet * instruction)2147 void InstructionCodeGeneratorARM64::VisitArraySet(HArraySet* instruction) {
2148 Primitive::Type value_type = instruction->GetComponentType();
2149 LocationSummary* locations = instruction->GetLocations();
2150 bool may_need_runtime_call_for_type_check = instruction->NeedsTypeCheck();
2151 bool needs_write_barrier =
2152 CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue());
2153
2154 Register array = InputRegisterAt(instruction, 0);
2155 CPURegister value = InputCPURegisterAt(instruction, 2);
2156 CPURegister source = value;
2157 Location index = locations->InAt(1);
2158 size_t offset = mirror::Array::DataOffset(Primitive::ComponentSize(value_type)).Uint32Value();
2159 MemOperand destination = HeapOperand(array);
2160 MacroAssembler* masm = GetVIXLAssembler();
2161 BlockPoolsScope block_pools(masm);
2162
2163 if (!needs_write_barrier) {
2164 DCHECK(!may_need_runtime_call_for_type_check);
2165 if (index.IsConstant()) {
2166 offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
2167 destination = HeapOperand(array, offset);
2168 } else {
2169 UseScratchRegisterScope temps(masm);
2170 Register temp = temps.AcquireSameSizeAs(array);
2171 if (instruction->GetArray()->IsArm64IntermediateAddress()) {
2172 // The read barrier instrumentation does not support the
2173 // HArm64IntermediateAddress instruction yet.
2174 DCHECK(!kEmitCompilerReadBarrier);
2175 // We do not need to compute the intermediate address from the array: the
2176 // input instruction has done it already. See the comment in
2177 // `InstructionSimplifierArm64::TryExtractArrayAccessAddress()`.
2178 if (kIsDebugBuild) {
2179 HArm64IntermediateAddress* tmp = instruction->GetArray()->AsArm64IntermediateAddress();
2180 DCHECK(tmp->GetOffset()->AsIntConstant()->GetValueAsUint64() == offset);
2181 }
2182 temp = array;
2183 } else {
2184 __ Add(temp, array, offset);
2185 }
2186 destination = HeapOperand(temp,
2187 XRegisterFrom(index),
2188 LSL,
2189 Primitive::ComponentSizeShift(value_type));
2190 }
2191 codegen_->Store(value_type, value, destination);
2192 codegen_->MaybeRecordImplicitNullCheck(instruction);
2193 } else {
2194 DCHECK(needs_write_barrier);
2195 DCHECK(!instruction->GetArray()->IsArm64IntermediateAddress());
2196 vixl::Label done;
2197 SlowPathCodeARM64* slow_path = nullptr;
2198 {
2199 // We use a block to end the scratch scope before the write barrier, thus
2200 // freeing the temporary registers so they can be used in `MarkGCCard`.
2201 UseScratchRegisterScope temps(masm);
2202 Register temp = temps.AcquireSameSizeAs(array);
2203 if (index.IsConstant()) {
2204 offset += Int64ConstantFrom(index) << Primitive::ComponentSizeShift(value_type);
2205 destination = HeapOperand(array, offset);
2206 } else {
2207 destination = HeapOperand(temp,
2208 XRegisterFrom(index),
2209 LSL,
2210 Primitive::ComponentSizeShift(value_type));
2211 }
2212
2213 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
2214 uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
2215 uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
2216
2217 if (may_need_runtime_call_for_type_check) {
2218 slow_path = new (GetGraph()->GetArena()) ArraySetSlowPathARM64(instruction);
2219 codegen_->AddSlowPath(slow_path);
2220 if (instruction->GetValueCanBeNull()) {
2221 vixl::Label non_zero;
2222 __ Cbnz(Register(value), &non_zero);
2223 if (!index.IsConstant()) {
2224 __ Add(temp, array, offset);
2225 }
2226 __ Str(wzr, destination);
2227 codegen_->MaybeRecordImplicitNullCheck(instruction);
2228 __ B(&done);
2229 __ Bind(&non_zero);
2230 }
2231
2232 if (kEmitCompilerReadBarrier) {
2233 // When read barriers are enabled, the type checking
2234 // instrumentation requires two read barriers:
2235 //
2236 // __ Mov(temp2, temp);
2237 // // /* HeapReference<Class> */ temp = temp->component_type_
2238 // __ Ldr(temp, HeapOperand(temp, component_offset));
2239 // codegen_->GenerateReadBarrierSlow(
2240 // instruction, temp_loc, temp_loc, temp2_loc, component_offset);
2241 //
2242 // // /* HeapReference<Class> */ temp2 = value->klass_
2243 // __ Ldr(temp2, HeapOperand(Register(value), class_offset));
2244 // codegen_->GenerateReadBarrierSlow(
2245 // instruction, temp2_loc, temp2_loc, value_loc, class_offset, temp_loc);
2246 //
2247 // __ Cmp(temp, temp2);
2248 //
2249 // However, the second read barrier may trash `temp`, as it
2250 // is a temporary register, and as such would not be saved
2251 // along with live registers before calling the runtime (nor
2252 // restored afterwards). So in this case, we bail out and
2253 // delegate the work to the array set slow path.
2254 //
2255 // TODO: Extend the register allocator to support a new
2256 // "(locally) live temp" location so as to avoid always
2257 // going into the slow path when read barriers are enabled.
2258 __ B(slow_path->GetEntryLabel());
2259 } else {
2260 Register temp2 = temps.AcquireSameSizeAs(array);
2261 // /* HeapReference<Class> */ temp = array->klass_
2262 __ Ldr(temp, HeapOperand(array, class_offset));
2263 codegen_->MaybeRecordImplicitNullCheck(instruction);
2264 GetAssembler()->MaybeUnpoisonHeapReference(temp);
2265
2266 // /* HeapReference<Class> */ temp = temp->component_type_
2267 __ Ldr(temp, HeapOperand(temp, component_offset));
2268 // /* HeapReference<Class> */ temp2 = value->klass_
2269 __ Ldr(temp2, HeapOperand(Register(value), class_offset));
2270 // If heap poisoning is enabled, no need to unpoison `temp`
2271 // nor `temp2`, as we are comparing two poisoned references.
2272 __ Cmp(temp, temp2);
2273
2274 if (instruction->StaticTypeOfArrayIsObjectArray()) {
2275 vixl::Label do_put;
2276 __ B(eq, &do_put);
2277 // If heap poisoning is enabled, the `temp` reference has
2278 // not been unpoisoned yet; unpoison it now.
2279 GetAssembler()->MaybeUnpoisonHeapReference(temp);
2280
2281 // /* HeapReference<Class> */ temp = temp->super_class_
2282 __ Ldr(temp, HeapOperand(temp, super_offset));
2283 // If heap poisoning is enabled, no need to unpoison
2284 // `temp`, as we are comparing against null below.
2285 __ Cbnz(temp, slow_path->GetEntryLabel());
2286 __ Bind(&do_put);
2287 } else {
2288 __ B(ne, slow_path->GetEntryLabel());
2289 }
2290 temps.Release(temp2);
2291 }
2292 }
2293
2294 if (kPoisonHeapReferences) {
2295 Register temp2 = temps.AcquireSameSizeAs(array);
2296 DCHECK(value.IsW());
2297 __ Mov(temp2, value.W());
2298 GetAssembler()->PoisonHeapReference(temp2);
2299 source = temp2;
2300 }
2301
2302 if (!index.IsConstant()) {
2303 __ Add(temp, array, offset);
2304 }
2305 __ Str(source, destination);
2306
2307 if (!may_need_runtime_call_for_type_check) {
2308 codegen_->MaybeRecordImplicitNullCheck(instruction);
2309 }
2310 }
2311
2312 codegen_->MarkGCCard(array, value.W(), instruction->GetValueCanBeNull());
2313
2314 if (done.IsLinked()) {
2315 __ Bind(&done);
2316 }
2317
2318 if (slow_path != nullptr) {
2319 __ Bind(slow_path->GetExitLabel());
2320 }
2321 }
2322 }
2323
VisitBoundsCheck(HBoundsCheck * instruction)2324 void LocationsBuilderARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
2325 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
2326 ? LocationSummary::kCallOnSlowPath
2327 : LocationSummary::kNoCall;
2328 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2329 locations->SetInAt(0, Location::RequiresRegister());
2330 locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
2331 if (instruction->HasUses()) {
2332 locations->SetOut(Location::SameAsFirstInput());
2333 }
2334 }
2335
VisitBoundsCheck(HBoundsCheck * instruction)2336 void InstructionCodeGeneratorARM64::VisitBoundsCheck(HBoundsCheck* instruction) {
2337 BoundsCheckSlowPathARM64* slow_path =
2338 new (GetGraph()->GetArena()) BoundsCheckSlowPathARM64(instruction);
2339 codegen_->AddSlowPath(slow_path);
2340
2341 __ Cmp(InputRegisterAt(instruction, 0), InputOperandAt(instruction, 1));
2342 __ B(slow_path->GetEntryLabel(), hs);
2343 }
2344
VisitClinitCheck(HClinitCheck * check)2345 void LocationsBuilderARM64::VisitClinitCheck(HClinitCheck* check) {
2346 LocationSummary* locations =
2347 new (GetGraph()->GetArena()) LocationSummary(check, LocationSummary::kCallOnSlowPath);
2348 locations->SetInAt(0, Location::RequiresRegister());
2349 if (check->HasUses()) {
2350 locations->SetOut(Location::SameAsFirstInput());
2351 }
2352 }
2353
VisitClinitCheck(HClinitCheck * check)2354 void InstructionCodeGeneratorARM64::VisitClinitCheck(HClinitCheck* check) {
2355 // We assume the class is not null.
2356 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
2357 check->GetLoadClass(), check, check->GetDexPc(), true);
2358 codegen_->AddSlowPath(slow_path);
2359 GenerateClassInitializationCheck(slow_path, InputRegisterAt(check, 0));
2360 }
2361
IsFloatingPointZeroConstant(HInstruction * inst)2362 static bool IsFloatingPointZeroConstant(HInstruction* inst) {
2363 return (inst->IsFloatConstant() && (inst->AsFloatConstant()->IsArithmeticZero()))
2364 || (inst->IsDoubleConstant() && (inst->AsDoubleConstant()->IsArithmeticZero()));
2365 }
2366
GenerateFcmp(HInstruction * instruction)2367 void InstructionCodeGeneratorARM64::GenerateFcmp(HInstruction* instruction) {
2368 FPRegister lhs_reg = InputFPRegisterAt(instruction, 0);
2369 Location rhs_loc = instruction->GetLocations()->InAt(1);
2370 if (rhs_loc.IsConstant()) {
2371 // 0.0 is the only immediate that can be encoded directly in
2372 // an FCMP instruction.
2373 //
2374 // Both the JLS (section 15.20.1) and the JVMS (section 6.5)
2375 // specify that in a floating-point comparison, positive zero
2376 // and negative zero are considered equal, so we can use the
2377 // literal 0.0 for both cases here.
2378 //
2379 // Note however that some methods (Float.equal, Float.compare,
2380 // Float.compareTo, Double.equal, Double.compare,
2381 // Double.compareTo, Math.max, Math.min, StrictMath.max,
2382 // StrictMath.min) consider 0.0 to be (strictly) greater than
2383 // -0.0. So if we ever translate calls to these methods into a
2384 // HCompare instruction, we must handle the -0.0 case with
2385 // care here.
2386 DCHECK(IsFloatingPointZeroConstant(rhs_loc.GetConstant()));
2387 __ Fcmp(lhs_reg, 0.0);
2388 } else {
2389 __ Fcmp(lhs_reg, InputFPRegisterAt(instruction, 1));
2390 }
2391 }
2392
VisitCompare(HCompare * compare)2393 void LocationsBuilderARM64::VisitCompare(HCompare* compare) {
2394 LocationSummary* locations =
2395 new (GetGraph()->GetArena()) LocationSummary(compare, LocationSummary::kNoCall);
2396 Primitive::Type in_type = compare->InputAt(0)->GetType();
2397 switch (in_type) {
2398 case Primitive::kPrimBoolean:
2399 case Primitive::kPrimByte:
2400 case Primitive::kPrimShort:
2401 case Primitive::kPrimChar:
2402 case Primitive::kPrimInt:
2403 case Primitive::kPrimLong: {
2404 locations->SetInAt(0, Location::RequiresRegister());
2405 locations->SetInAt(1, ARM64EncodableConstantOrRegister(compare->InputAt(1), compare));
2406 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2407 break;
2408 }
2409 case Primitive::kPrimFloat:
2410 case Primitive::kPrimDouble: {
2411 locations->SetInAt(0, Location::RequiresFpuRegister());
2412 locations->SetInAt(1,
2413 IsFloatingPointZeroConstant(compare->InputAt(1))
2414 ? Location::ConstantLocation(compare->InputAt(1)->AsConstant())
2415 : Location::RequiresFpuRegister());
2416 locations->SetOut(Location::RequiresRegister());
2417 break;
2418 }
2419 default:
2420 LOG(FATAL) << "Unexpected type for compare operation " << in_type;
2421 }
2422 }
2423
VisitCompare(HCompare * compare)2424 void InstructionCodeGeneratorARM64::VisitCompare(HCompare* compare) {
2425 Primitive::Type in_type = compare->InputAt(0)->GetType();
2426
2427 // 0 if: left == right
2428 // 1 if: left > right
2429 // -1 if: left < right
2430 switch (in_type) {
2431 case Primitive::kPrimBoolean:
2432 case Primitive::kPrimByte:
2433 case Primitive::kPrimShort:
2434 case Primitive::kPrimChar:
2435 case Primitive::kPrimInt:
2436 case Primitive::kPrimLong: {
2437 Register result = OutputRegister(compare);
2438 Register left = InputRegisterAt(compare, 0);
2439 Operand right = InputOperandAt(compare, 1);
2440 __ Cmp(left, right);
2441 __ Cset(result, ne); // result == +1 if NE or 0 otherwise
2442 __ Cneg(result, result, lt); // result == -1 if LT or unchanged otherwise
2443 break;
2444 }
2445 case Primitive::kPrimFloat:
2446 case Primitive::kPrimDouble: {
2447 Register result = OutputRegister(compare);
2448 GenerateFcmp(compare);
2449 __ Cset(result, ne);
2450 __ Cneg(result, result, ARM64FPCondition(kCondLT, compare->IsGtBias()));
2451 break;
2452 }
2453 default:
2454 LOG(FATAL) << "Unimplemented compare type " << in_type;
2455 }
2456 }
2457
HandleCondition(HCondition * instruction)2458 void LocationsBuilderARM64::HandleCondition(HCondition* instruction) {
2459 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
2460
2461 if (Primitive::IsFloatingPointType(instruction->InputAt(0)->GetType())) {
2462 locations->SetInAt(0, Location::RequiresFpuRegister());
2463 locations->SetInAt(1,
2464 IsFloatingPointZeroConstant(instruction->InputAt(1))
2465 ? Location::ConstantLocation(instruction->InputAt(1)->AsConstant())
2466 : Location::RequiresFpuRegister());
2467 } else {
2468 // Integer cases.
2469 locations->SetInAt(0, Location::RequiresRegister());
2470 locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->InputAt(1), instruction));
2471 }
2472
2473 if (!instruction->IsEmittedAtUseSite()) {
2474 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2475 }
2476 }
2477
HandleCondition(HCondition * instruction)2478 void InstructionCodeGeneratorARM64::HandleCondition(HCondition* instruction) {
2479 if (instruction->IsEmittedAtUseSite()) {
2480 return;
2481 }
2482
2483 LocationSummary* locations = instruction->GetLocations();
2484 Register res = RegisterFrom(locations->Out(), instruction->GetType());
2485 IfCondition if_cond = instruction->GetCondition();
2486
2487 if (Primitive::IsFloatingPointType(instruction->InputAt(0)->GetType())) {
2488 GenerateFcmp(instruction);
2489 __ Cset(res, ARM64FPCondition(if_cond, instruction->IsGtBias()));
2490 } else {
2491 // Integer cases.
2492 Register lhs = InputRegisterAt(instruction, 0);
2493 Operand rhs = InputOperandAt(instruction, 1);
2494 __ Cmp(lhs, rhs);
2495 __ Cset(res, ARM64Condition(if_cond));
2496 }
2497 }
2498
2499 #define FOR_EACH_CONDITION_INSTRUCTION(M) \
2500 M(Equal) \
2501 M(NotEqual) \
2502 M(LessThan) \
2503 M(LessThanOrEqual) \
2504 M(GreaterThan) \
2505 M(GreaterThanOrEqual) \
2506 M(Below) \
2507 M(BelowOrEqual) \
2508 M(Above) \
2509 M(AboveOrEqual)
2510 #define DEFINE_CONDITION_VISITORS(Name) \
2511 void LocationsBuilderARM64::Visit##Name(H##Name* comp) { HandleCondition(comp); } \
2512 void InstructionCodeGeneratorARM64::Visit##Name(H##Name* comp) { HandleCondition(comp); }
FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)2513 FOR_EACH_CONDITION_INSTRUCTION(DEFINE_CONDITION_VISITORS)
2514 #undef DEFINE_CONDITION_VISITORS
2515 #undef FOR_EACH_CONDITION_INSTRUCTION
2516
2517 void InstructionCodeGeneratorARM64::DivRemOneOrMinusOne(HBinaryOperation* instruction) {
2518 DCHECK(instruction->IsDiv() || instruction->IsRem());
2519
2520 LocationSummary* locations = instruction->GetLocations();
2521 Location second = locations->InAt(1);
2522 DCHECK(second.IsConstant());
2523
2524 Register out = OutputRegister(instruction);
2525 Register dividend = InputRegisterAt(instruction, 0);
2526 int64_t imm = Int64FromConstant(second.GetConstant());
2527 DCHECK(imm == 1 || imm == -1);
2528
2529 if (instruction->IsRem()) {
2530 __ Mov(out, 0);
2531 } else {
2532 if (imm == 1) {
2533 __ Mov(out, dividend);
2534 } else {
2535 __ Neg(out, dividend);
2536 }
2537 }
2538 }
2539
DivRemByPowerOfTwo(HBinaryOperation * instruction)2540 void InstructionCodeGeneratorARM64::DivRemByPowerOfTwo(HBinaryOperation* instruction) {
2541 DCHECK(instruction->IsDiv() || instruction->IsRem());
2542
2543 LocationSummary* locations = instruction->GetLocations();
2544 Location second = locations->InAt(1);
2545 DCHECK(second.IsConstant());
2546
2547 Register out = OutputRegister(instruction);
2548 Register dividend = InputRegisterAt(instruction, 0);
2549 int64_t imm = Int64FromConstant(second.GetConstant());
2550 uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm));
2551 int ctz_imm = CTZ(abs_imm);
2552
2553 UseScratchRegisterScope temps(GetVIXLAssembler());
2554 Register temp = temps.AcquireSameSizeAs(out);
2555
2556 if (instruction->IsDiv()) {
2557 __ Add(temp, dividend, abs_imm - 1);
2558 __ Cmp(dividend, 0);
2559 __ Csel(out, temp, dividend, lt);
2560 if (imm > 0) {
2561 __ Asr(out, out, ctz_imm);
2562 } else {
2563 __ Neg(out, Operand(out, ASR, ctz_imm));
2564 }
2565 } else {
2566 int bits = instruction->GetResultType() == Primitive::kPrimInt ? 32 : 64;
2567 __ Asr(temp, dividend, bits - 1);
2568 __ Lsr(temp, temp, bits - ctz_imm);
2569 __ Add(out, dividend, temp);
2570 __ And(out, out, abs_imm - 1);
2571 __ Sub(out, out, temp);
2572 }
2573 }
2574
GenerateDivRemWithAnyConstant(HBinaryOperation * instruction)2575 void InstructionCodeGeneratorARM64::GenerateDivRemWithAnyConstant(HBinaryOperation* instruction) {
2576 DCHECK(instruction->IsDiv() || instruction->IsRem());
2577
2578 LocationSummary* locations = instruction->GetLocations();
2579 Location second = locations->InAt(1);
2580 DCHECK(second.IsConstant());
2581
2582 Register out = OutputRegister(instruction);
2583 Register dividend = InputRegisterAt(instruction, 0);
2584 int64_t imm = Int64FromConstant(second.GetConstant());
2585
2586 Primitive::Type type = instruction->GetResultType();
2587 DCHECK(type == Primitive::kPrimInt || type == Primitive::kPrimLong);
2588
2589 int64_t magic;
2590 int shift;
2591 CalculateMagicAndShiftForDivRem(imm, type == Primitive::kPrimLong /* is_long */, &magic, &shift);
2592
2593 UseScratchRegisterScope temps(GetVIXLAssembler());
2594 Register temp = temps.AcquireSameSizeAs(out);
2595
2596 // temp = get_high(dividend * magic)
2597 __ Mov(temp, magic);
2598 if (type == Primitive::kPrimLong) {
2599 __ Smulh(temp, dividend, temp);
2600 } else {
2601 __ Smull(temp.X(), dividend, temp);
2602 __ Lsr(temp.X(), temp.X(), 32);
2603 }
2604
2605 if (imm > 0 && magic < 0) {
2606 __ Add(temp, temp, dividend);
2607 } else if (imm < 0 && magic > 0) {
2608 __ Sub(temp, temp, dividend);
2609 }
2610
2611 if (shift != 0) {
2612 __ Asr(temp, temp, shift);
2613 }
2614
2615 if (instruction->IsDiv()) {
2616 __ Sub(out, temp, Operand(temp, ASR, type == Primitive::kPrimLong ? 63 : 31));
2617 } else {
2618 __ Sub(temp, temp, Operand(temp, ASR, type == Primitive::kPrimLong ? 63 : 31));
2619 // TODO: Strength reduction for msub.
2620 Register temp_imm = temps.AcquireSameSizeAs(out);
2621 __ Mov(temp_imm, imm);
2622 __ Msub(out, temp, temp_imm, dividend);
2623 }
2624 }
2625
GenerateDivRemIntegral(HBinaryOperation * instruction)2626 void InstructionCodeGeneratorARM64::GenerateDivRemIntegral(HBinaryOperation* instruction) {
2627 DCHECK(instruction->IsDiv() || instruction->IsRem());
2628 Primitive::Type type = instruction->GetResultType();
2629 DCHECK(type == Primitive::kPrimInt || Primitive::kPrimLong);
2630
2631 LocationSummary* locations = instruction->GetLocations();
2632 Register out = OutputRegister(instruction);
2633 Location second = locations->InAt(1);
2634
2635 if (second.IsConstant()) {
2636 int64_t imm = Int64FromConstant(second.GetConstant());
2637
2638 if (imm == 0) {
2639 // Do not generate anything. DivZeroCheck would prevent any code to be executed.
2640 } else if (imm == 1 || imm == -1) {
2641 DivRemOneOrMinusOne(instruction);
2642 } else if (IsPowerOfTwo(AbsOrMin(imm))) {
2643 DivRemByPowerOfTwo(instruction);
2644 } else {
2645 DCHECK(imm <= -2 || imm >= 2);
2646 GenerateDivRemWithAnyConstant(instruction);
2647 }
2648 } else {
2649 Register dividend = InputRegisterAt(instruction, 0);
2650 Register divisor = InputRegisterAt(instruction, 1);
2651 if (instruction->IsDiv()) {
2652 __ Sdiv(out, dividend, divisor);
2653 } else {
2654 UseScratchRegisterScope temps(GetVIXLAssembler());
2655 Register temp = temps.AcquireSameSizeAs(out);
2656 __ Sdiv(temp, dividend, divisor);
2657 __ Msub(out, temp, divisor, dividend);
2658 }
2659 }
2660 }
2661
VisitDiv(HDiv * div)2662 void LocationsBuilderARM64::VisitDiv(HDiv* div) {
2663 LocationSummary* locations =
2664 new (GetGraph()->GetArena()) LocationSummary(div, LocationSummary::kNoCall);
2665 switch (div->GetResultType()) {
2666 case Primitive::kPrimInt:
2667 case Primitive::kPrimLong:
2668 locations->SetInAt(0, Location::RequiresRegister());
2669 locations->SetInAt(1, Location::RegisterOrConstant(div->InputAt(1)));
2670 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
2671 break;
2672
2673 case Primitive::kPrimFloat:
2674 case Primitive::kPrimDouble:
2675 locations->SetInAt(0, Location::RequiresFpuRegister());
2676 locations->SetInAt(1, Location::RequiresFpuRegister());
2677 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
2678 break;
2679
2680 default:
2681 LOG(FATAL) << "Unexpected div type " << div->GetResultType();
2682 }
2683 }
2684
VisitDiv(HDiv * div)2685 void InstructionCodeGeneratorARM64::VisitDiv(HDiv* div) {
2686 Primitive::Type type = div->GetResultType();
2687 switch (type) {
2688 case Primitive::kPrimInt:
2689 case Primitive::kPrimLong:
2690 GenerateDivRemIntegral(div);
2691 break;
2692
2693 case Primitive::kPrimFloat:
2694 case Primitive::kPrimDouble:
2695 __ Fdiv(OutputFPRegister(div), InputFPRegisterAt(div, 0), InputFPRegisterAt(div, 1));
2696 break;
2697
2698 default:
2699 LOG(FATAL) << "Unexpected div type " << type;
2700 }
2701 }
2702
VisitDivZeroCheck(HDivZeroCheck * instruction)2703 void LocationsBuilderARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2704 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
2705 ? LocationSummary::kCallOnSlowPath
2706 : LocationSummary::kNoCall;
2707 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
2708 locations->SetInAt(0, Location::RegisterOrConstant(instruction->InputAt(0)));
2709 if (instruction->HasUses()) {
2710 locations->SetOut(Location::SameAsFirstInput());
2711 }
2712 }
2713
VisitDivZeroCheck(HDivZeroCheck * instruction)2714 void InstructionCodeGeneratorARM64::VisitDivZeroCheck(HDivZeroCheck* instruction) {
2715 SlowPathCodeARM64* slow_path =
2716 new (GetGraph()->GetArena()) DivZeroCheckSlowPathARM64(instruction);
2717 codegen_->AddSlowPath(slow_path);
2718 Location value = instruction->GetLocations()->InAt(0);
2719
2720 Primitive::Type type = instruction->GetType();
2721
2722 if (!Primitive::IsIntegralType(type)) {
2723 LOG(FATAL) << "Unexpected type " << type << " for DivZeroCheck.";
2724 return;
2725 }
2726
2727 if (value.IsConstant()) {
2728 int64_t divisor = Int64ConstantFrom(value);
2729 if (divisor == 0) {
2730 __ B(slow_path->GetEntryLabel());
2731 } else {
2732 // A division by a non-null constant is valid. We don't need to perform
2733 // any check, so simply fall through.
2734 }
2735 } else {
2736 __ Cbz(InputRegisterAt(instruction, 0), slow_path->GetEntryLabel());
2737 }
2738 }
2739
VisitDoubleConstant(HDoubleConstant * constant)2740 void LocationsBuilderARM64::VisitDoubleConstant(HDoubleConstant* constant) {
2741 LocationSummary* locations =
2742 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
2743 locations->SetOut(Location::ConstantLocation(constant));
2744 }
2745
VisitDoubleConstant(HDoubleConstant * constant ATTRIBUTE_UNUSED)2746 void InstructionCodeGeneratorARM64::VisitDoubleConstant(
2747 HDoubleConstant* constant ATTRIBUTE_UNUSED) {
2748 // Will be generated at use site.
2749 }
2750
VisitExit(HExit * exit)2751 void LocationsBuilderARM64::VisitExit(HExit* exit) {
2752 exit->SetLocations(nullptr);
2753 }
2754
VisitExit(HExit * exit ATTRIBUTE_UNUSED)2755 void InstructionCodeGeneratorARM64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) {
2756 }
2757
VisitFloatConstant(HFloatConstant * constant)2758 void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
2759 LocationSummary* locations =
2760 new (GetGraph()->GetArena()) LocationSummary(constant, LocationSummary::kNoCall);
2761 locations->SetOut(Location::ConstantLocation(constant));
2762 }
2763
VisitFloatConstant(HFloatConstant * constant ATTRIBUTE_UNUSED)2764 void InstructionCodeGeneratorARM64::VisitFloatConstant(HFloatConstant* constant ATTRIBUTE_UNUSED) {
2765 // Will be generated at use site.
2766 }
2767
HandleGoto(HInstruction * got,HBasicBlock * successor)2768 void InstructionCodeGeneratorARM64::HandleGoto(HInstruction* got, HBasicBlock* successor) {
2769 DCHECK(!successor->IsExitBlock());
2770 HBasicBlock* block = got->GetBlock();
2771 HInstruction* previous = got->GetPrevious();
2772 HLoopInformation* info = block->GetLoopInformation();
2773
2774 if (info != nullptr && info->IsBackEdge(*block) && info->HasSuspendCheck()) {
2775 codegen_->ClearSpillSlotsFromLoopPhisInStackMap(info->GetSuspendCheck());
2776 GenerateSuspendCheck(info->GetSuspendCheck(), successor);
2777 return;
2778 }
2779 if (block->IsEntryBlock() && (previous != nullptr) && previous->IsSuspendCheck()) {
2780 GenerateSuspendCheck(previous->AsSuspendCheck(), nullptr);
2781 }
2782 if (!codegen_->GoesToNextBlock(block, successor)) {
2783 __ B(codegen_->GetLabelOf(successor));
2784 }
2785 }
2786
VisitGoto(HGoto * got)2787 void LocationsBuilderARM64::VisitGoto(HGoto* got) {
2788 got->SetLocations(nullptr);
2789 }
2790
VisitGoto(HGoto * got)2791 void InstructionCodeGeneratorARM64::VisitGoto(HGoto* got) {
2792 HandleGoto(got, got->GetSuccessor());
2793 }
2794
VisitTryBoundary(HTryBoundary * try_boundary)2795 void LocationsBuilderARM64::VisitTryBoundary(HTryBoundary* try_boundary) {
2796 try_boundary->SetLocations(nullptr);
2797 }
2798
VisitTryBoundary(HTryBoundary * try_boundary)2799 void InstructionCodeGeneratorARM64::VisitTryBoundary(HTryBoundary* try_boundary) {
2800 HBasicBlock* successor = try_boundary->GetNormalFlowSuccessor();
2801 if (!successor->IsExitBlock()) {
2802 HandleGoto(try_boundary, successor);
2803 }
2804 }
2805
GenerateTestAndBranch(HInstruction * instruction,size_t condition_input_index,vixl::Label * true_target,vixl::Label * false_target)2806 void InstructionCodeGeneratorARM64::GenerateTestAndBranch(HInstruction* instruction,
2807 size_t condition_input_index,
2808 vixl::Label* true_target,
2809 vixl::Label* false_target) {
2810 // FP branching requires both targets to be explicit. If either of the targets
2811 // is nullptr (fallthrough) use and bind `fallthrough_target` instead.
2812 vixl::Label fallthrough_target;
2813 HInstruction* cond = instruction->InputAt(condition_input_index);
2814
2815 if (true_target == nullptr && false_target == nullptr) {
2816 // Nothing to do. The code always falls through.
2817 return;
2818 } else if (cond->IsIntConstant()) {
2819 // Constant condition, statically compared against "true" (integer value 1).
2820 if (cond->AsIntConstant()->IsTrue()) {
2821 if (true_target != nullptr) {
2822 __ B(true_target);
2823 }
2824 } else {
2825 DCHECK(cond->AsIntConstant()->IsFalse()) << cond->AsIntConstant()->GetValue();
2826 if (false_target != nullptr) {
2827 __ B(false_target);
2828 }
2829 }
2830 return;
2831 }
2832
2833 // The following code generates these patterns:
2834 // (1) true_target == nullptr && false_target != nullptr
2835 // - opposite condition true => branch to false_target
2836 // (2) true_target != nullptr && false_target == nullptr
2837 // - condition true => branch to true_target
2838 // (3) true_target != nullptr && false_target != nullptr
2839 // - condition true => branch to true_target
2840 // - branch to false_target
2841 if (IsBooleanValueOrMaterializedCondition(cond)) {
2842 // The condition instruction has been materialized, compare the output to 0.
2843 Location cond_val = instruction->GetLocations()->InAt(condition_input_index);
2844 DCHECK(cond_val.IsRegister());
2845 if (true_target == nullptr) {
2846 __ Cbz(InputRegisterAt(instruction, condition_input_index), false_target);
2847 } else {
2848 __ Cbnz(InputRegisterAt(instruction, condition_input_index), true_target);
2849 }
2850 } else {
2851 // The condition instruction has not been materialized, use its inputs as
2852 // the comparison and its condition as the branch condition.
2853 HCondition* condition = cond->AsCondition();
2854
2855 Primitive::Type type = condition->InputAt(0)->GetType();
2856 if (Primitive::IsFloatingPointType(type)) {
2857 GenerateFcmp(condition);
2858 if (true_target == nullptr) {
2859 IfCondition opposite_condition = condition->GetOppositeCondition();
2860 __ B(ARM64FPCondition(opposite_condition, condition->IsGtBias()), false_target);
2861 } else {
2862 __ B(ARM64FPCondition(condition->GetCondition(), condition->IsGtBias()), true_target);
2863 }
2864 } else {
2865 // Integer cases.
2866 Register lhs = InputRegisterAt(condition, 0);
2867 Operand rhs = InputOperandAt(condition, 1);
2868
2869 Condition arm64_cond;
2870 vixl::Label* non_fallthrough_target;
2871 if (true_target == nullptr) {
2872 arm64_cond = ARM64Condition(condition->GetOppositeCondition());
2873 non_fallthrough_target = false_target;
2874 } else {
2875 arm64_cond = ARM64Condition(condition->GetCondition());
2876 non_fallthrough_target = true_target;
2877 }
2878
2879 if ((arm64_cond == eq || arm64_cond == ne || arm64_cond == lt || arm64_cond == ge) &&
2880 rhs.IsImmediate() && (rhs.immediate() == 0)) {
2881 switch (arm64_cond) {
2882 case eq:
2883 __ Cbz(lhs, non_fallthrough_target);
2884 break;
2885 case ne:
2886 __ Cbnz(lhs, non_fallthrough_target);
2887 break;
2888 case lt:
2889 // Test the sign bit and branch accordingly.
2890 __ Tbnz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, non_fallthrough_target);
2891 break;
2892 case ge:
2893 // Test the sign bit and branch accordingly.
2894 __ Tbz(lhs, (lhs.IsX() ? kXRegSize : kWRegSize) - 1, non_fallthrough_target);
2895 break;
2896 default:
2897 // Without the `static_cast` the compiler throws an error for
2898 // `-Werror=sign-promo`.
2899 LOG(FATAL) << "Unexpected condition: " << static_cast<int>(arm64_cond);
2900 }
2901 } else {
2902 __ Cmp(lhs, rhs);
2903 __ B(arm64_cond, non_fallthrough_target);
2904 }
2905 }
2906 }
2907
2908 // If neither branch falls through (case 3), the conditional branch to `true_target`
2909 // was already emitted (case 2) and we need to emit a jump to `false_target`.
2910 if (true_target != nullptr && false_target != nullptr) {
2911 __ B(false_target);
2912 }
2913
2914 if (fallthrough_target.IsLinked()) {
2915 __ Bind(&fallthrough_target);
2916 }
2917 }
2918
VisitIf(HIf * if_instr)2919 void LocationsBuilderARM64::VisitIf(HIf* if_instr) {
2920 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(if_instr);
2921 if (IsBooleanValueOrMaterializedCondition(if_instr->InputAt(0))) {
2922 locations->SetInAt(0, Location::RequiresRegister());
2923 }
2924 }
2925
VisitIf(HIf * if_instr)2926 void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
2927 HBasicBlock* true_successor = if_instr->IfTrueSuccessor();
2928 HBasicBlock* false_successor = if_instr->IfFalseSuccessor();
2929 vixl::Label* true_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), true_successor) ?
2930 nullptr : codegen_->GetLabelOf(true_successor);
2931 vixl::Label* false_target = codegen_->GoesToNextBlock(if_instr->GetBlock(), false_successor) ?
2932 nullptr : codegen_->GetLabelOf(false_successor);
2933 GenerateTestAndBranch(if_instr, /* condition_input_index */ 0, true_target, false_target);
2934 }
2935
VisitDeoptimize(HDeoptimize * deoptimize)2936 void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
2937 LocationSummary* locations = new (GetGraph()->GetArena())
2938 LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
2939 if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
2940 locations->SetInAt(0, Location::RequiresRegister());
2941 }
2942 }
2943
VisitDeoptimize(HDeoptimize * deoptimize)2944 void InstructionCodeGeneratorARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
2945 SlowPathCodeARM64* slow_path =
2946 deopt_slow_paths_.NewSlowPath<DeoptimizationSlowPathARM64>(deoptimize);
2947 GenerateTestAndBranch(deoptimize,
2948 /* condition_input_index */ 0,
2949 slow_path->GetEntryLabel(),
2950 /* false_target */ nullptr);
2951 }
2952
2953 enum SelectVariant {
2954 kCsel,
2955 kCselFalseConst,
2956 kCselTrueConst,
2957 kFcsel,
2958 };
2959
IsConditionOnFloatingPointValues(HInstruction * condition)2960 static inline bool IsConditionOnFloatingPointValues(HInstruction* condition) {
2961 return condition->IsCondition() &&
2962 Primitive::IsFloatingPointType(condition->InputAt(0)->GetType());
2963 }
2964
IsRecognizedCselConstant(HInstruction * constant)2965 static inline bool IsRecognizedCselConstant(HInstruction* constant) {
2966 if (constant->IsConstant()) {
2967 int64_t value = Int64FromConstant(constant->AsConstant());
2968 if ((value == -1) || (value == 0) || (value == 1)) {
2969 return true;
2970 }
2971 }
2972 return false;
2973 }
2974
GetSelectVariant(HSelect * select)2975 static inline SelectVariant GetSelectVariant(HSelect* select) {
2976 if (Primitive::IsFloatingPointType(select->GetType())) {
2977 return kFcsel;
2978 } else if (IsRecognizedCselConstant(select->GetFalseValue())) {
2979 return kCselFalseConst;
2980 } else if (IsRecognizedCselConstant(select->GetTrueValue())) {
2981 return kCselTrueConst;
2982 } else {
2983 return kCsel;
2984 }
2985 }
2986
HasSwappedInputs(SelectVariant variant)2987 static inline bool HasSwappedInputs(SelectVariant variant) {
2988 return variant == kCselTrueConst;
2989 }
2990
GetConditionForSelect(HCondition * condition,SelectVariant variant)2991 static inline Condition GetConditionForSelect(HCondition* condition, SelectVariant variant) {
2992 IfCondition cond = HasSwappedInputs(variant) ? condition->GetOppositeCondition()
2993 : condition->GetCondition();
2994 return IsConditionOnFloatingPointValues(condition) ? ARM64FPCondition(cond, condition->IsGtBias())
2995 : ARM64Condition(cond);
2996 }
2997
VisitSelect(HSelect * select)2998 void LocationsBuilderARM64::VisitSelect(HSelect* select) {
2999 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(select);
3000 switch (GetSelectVariant(select)) {
3001 case kCsel:
3002 locations->SetInAt(0, Location::RequiresRegister());
3003 locations->SetInAt(1, Location::RequiresRegister());
3004 locations->SetOut(Location::RequiresRegister());
3005 break;
3006 case kCselFalseConst:
3007 locations->SetInAt(0, Location::ConstantLocation(select->InputAt(0)->AsConstant()));
3008 locations->SetInAt(1, Location::RequiresRegister());
3009 locations->SetOut(Location::RequiresRegister());
3010 break;
3011 case kCselTrueConst:
3012 locations->SetInAt(0, Location::RequiresRegister());
3013 locations->SetInAt(1, Location::ConstantLocation(select->InputAt(1)->AsConstant()));
3014 locations->SetOut(Location::RequiresRegister());
3015 break;
3016 case kFcsel:
3017 locations->SetInAt(0, Location::RequiresFpuRegister());
3018 locations->SetInAt(1, Location::RequiresFpuRegister());
3019 locations->SetOut(Location::RequiresFpuRegister());
3020 break;
3021 }
3022 if (IsBooleanValueOrMaterializedCondition(select->GetCondition())) {
3023 locations->SetInAt(2, Location::RequiresRegister());
3024 }
3025 }
3026
VisitSelect(HSelect * select)3027 void InstructionCodeGeneratorARM64::VisitSelect(HSelect* select) {
3028 HInstruction* cond = select->GetCondition();
3029 SelectVariant variant = GetSelectVariant(select);
3030 Condition csel_cond;
3031
3032 if (IsBooleanValueOrMaterializedCondition(cond)) {
3033 if (cond->IsCondition() && cond->GetNext() == select) {
3034 // Condition codes set from previous instruction.
3035 csel_cond = GetConditionForSelect(cond->AsCondition(), variant);
3036 } else {
3037 __ Cmp(InputRegisterAt(select, 2), 0);
3038 csel_cond = HasSwappedInputs(variant) ? eq : ne;
3039 }
3040 } else if (IsConditionOnFloatingPointValues(cond)) {
3041 GenerateFcmp(cond);
3042 csel_cond = GetConditionForSelect(cond->AsCondition(), variant);
3043 } else {
3044 __ Cmp(InputRegisterAt(cond, 0), InputOperandAt(cond, 1));
3045 csel_cond = GetConditionForSelect(cond->AsCondition(), variant);
3046 }
3047
3048 switch (variant) {
3049 case kCsel:
3050 case kCselFalseConst:
3051 __ Csel(OutputRegister(select),
3052 InputRegisterAt(select, 1),
3053 InputOperandAt(select, 0),
3054 csel_cond);
3055 break;
3056 case kCselTrueConst:
3057 __ Csel(OutputRegister(select),
3058 InputRegisterAt(select, 0),
3059 InputOperandAt(select, 1),
3060 csel_cond);
3061 break;
3062 case kFcsel:
3063 __ Fcsel(OutputFPRegister(select),
3064 InputFPRegisterAt(select, 1),
3065 InputFPRegisterAt(select, 0),
3066 csel_cond);
3067 break;
3068 }
3069 }
3070
VisitNativeDebugInfo(HNativeDebugInfo * info)3071 void LocationsBuilderARM64::VisitNativeDebugInfo(HNativeDebugInfo* info) {
3072 new (GetGraph()->GetArena()) LocationSummary(info);
3073 }
3074
VisitNativeDebugInfo(HNativeDebugInfo *)3075 void InstructionCodeGeneratorARM64::VisitNativeDebugInfo(HNativeDebugInfo*) {
3076 // MaybeRecordNativeDebugInfo is already called implicitly in CodeGenerator::Compile.
3077 }
3078
GenerateNop()3079 void CodeGeneratorARM64::GenerateNop() {
3080 __ Nop();
3081 }
3082
VisitInstanceFieldGet(HInstanceFieldGet * instruction)3083 void LocationsBuilderARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3084 HandleFieldGet(instruction);
3085 }
3086
VisitInstanceFieldGet(HInstanceFieldGet * instruction)3087 void InstructionCodeGeneratorARM64::VisitInstanceFieldGet(HInstanceFieldGet* instruction) {
3088 HandleFieldGet(instruction, instruction->GetFieldInfo());
3089 }
3090
VisitInstanceFieldSet(HInstanceFieldSet * instruction)3091 void LocationsBuilderARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3092 HandleFieldSet(instruction);
3093 }
3094
VisitInstanceFieldSet(HInstanceFieldSet * instruction)3095 void InstructionCodeGeneratorARM64::VisitInstanceFieldSet(HInstanceFieldSet* instruction) {
3096 HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
3097 }
3098
TypeCheckNeedsATemporary(TypeCheckKind type_check_kind)3099 static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) {
3100 return kEmitCompilerReadBarrier &&
3101 (kUseBakerReadBarrier ||
3102 type_check_kind == TypeCheckKind::kAbstractClassCheck ||
3103 type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
3104 type_check_kind == TypeCheckKind::kArrayObjectCheck);
3105 }
3106
VisitInstanceOf(HInstanceOf * instruction)3107 void LocationsBuilderARM64::VisitInstanceOf(HInstanceOf* instruction) {
3108 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
3109 TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
3110 switch (type_check_kind) {
3111 case TypeCheckKind::kExactCheck:
3112 case TypeCheckKind::kAbstractClassCheck:
3113 case TypeCheckKind::kClassHierarchyCheck:
3114 case TypeCheckKind::kArrayObjectCheck:
3115 call_kind =
3116 kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
3117 break;
3118 case TypeCheckKind::kArrayCheck:
3119 case TypeCheckKind::kUnresolvedCheck:
3120 case TypeCheckKind::kInterfaceCheck:
3121 call_kind = LocationSummary::kCallOnSlowPath;
3122 break;
3123 }
3124
3125 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
3126 locations->SetInAt(0, Location::RequiresRegister());
3127 locations->SetInAt(1, Location::RequiresRegister());
3128 // The "out" register is used as a temporary, so it overlaps with the inputs.
3129 // Note that TypeCheckSlowPathARM64 uses this register too.
3130 locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
3131 // When read barriers are enabled, we need a temporary register for
3132 // some cases.
3133 if (TypeCheckNeedsATemporary(type_check_kind)) {
3134 locations->AddTemp(Location::RequiresRegister());
3135 }
3136 }
3137
VisitInstanceOf(HInstanceOf * instruction)3138 void InstructionCodeGeneratorARM64::VisitInstanceOf(HInstanceOf* instruction) {
3139 TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
3140 LocationSummary* locations = instruction->GetLocations();
3141 Location obj_loc = locations->InAt(0);
3142 Register obj = InputRegisterAt(instruction, 0);
3143 Register cls = InputRegisterAt(instruction, 1);
3144 Location out_loc = locations->Out();
3145 Register out = OutputRegister(instruction);
3146 Location maybe_temp_loc = TypeCheckNeedsATemporary(type_check_kind) ?
3147 locations->GetTemp(0) :
3148 Location::NoLocation();
3149 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
3150 uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
3151 uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
3152 uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
3153
3154 vixl::Label done, zero;
3155 SlowPathCodeARM64* slow_path = nullptr;
3156
3157 // Return 0 if `obj` is null.
3158 // Avoid null check if we know `obj` is not null.
3159 if (instruction->MustDoNullCheck()) {
3160 __ Cbz(obj, &zero);
3161 }
3162
3163 // /* HeapReference<Class> */ out = obj->klass_
3164 GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset, maybe_temp_loc);
3165
3166 switch (type_check_kind) {
3167 case TypeCheckKind::kExactCheck: {
3168 __ Cmp(out, cls);
3169 __ Cset(out, eq);
3170 if (zero.IsLinked()) {
3171 __ B(&done);
3172 }
3173 break;
3174 }
3175
3176 case TypeCheckKind::kAbstractClassCheck: {
3177 // If the class is abstract, we eagerly fetch the super class of the
3178 // object to avoid doing a comparison we know will fail.
3179 vixl::Label loop, success;
3180 __ Bind(&loop);
3181 // /* HeapReference<Class> */ out = out->super_class_
3182 GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
3183 // If `out` is null, we use it for the result, and jump to `done`.
3184 __ Cbz(out, &done);
3185 __ Cmp(out, cls);
3186 __ B(ne, &loop);
3187 __ Mov(out, 1);
3188 if (zero.IsLinked()) {
3189 __ B(&done);
3190 }
3191 break;
3192 }
3193
3194 case TypeCheckKind::kClassHierarchyCheck: {
3195 // Walk over the class hierarchy to find a match.
3196 vixl::Label loop, success;
3197 __ Bind(&loop);
3198 __ Cmp(out, cls);
3199 __ B(eq, &success);
3200 // /* HeapReference<Class> */ out = out->super_class_
3201 GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, maybe_temp_loc);
3202 __ Cbnz(out, &loop);
3203 // If `out` is null, we use it for the result, and jump to `done`.
3204 __ B(&done);
3205 __ Bind(&success);
3206 __ Mov(out, 1);
3207 if (zero.IsLinked()) {
3208 __ B(&done);
3209 }
3210 break;
3211 }
3212
3213 case TypeCheckKind::kArrayObjectCheck: {
3214 // Do an exact check.
3215 vixl::Label exact_check;
3216 __ Cmp(out, cls);
3217 __ B(eq, &exact_check);
3218 // Otherwise, we need to check that the object's class is a non-primitive array.
3219 // /* HeapReference<Class> */ out = out->component_type_
3220 GenerateReferenceLoadOneRegister(instruction, out_loc, component_offset, maybe_temp_loc);
3221 // If `out` is null, we use it for the result, and jump to `done`.
3222 __ Cbz(out, &done);
3223 __ Ldrh(out, HeapOperand(out, primitive_offset));
3224 static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
3225 __ Cbnz(out, &zero);
3226 __ Bind(&exact_check);
3227 __ Mov(out, 1);
3228 __ B(&done);
3229 break;
3230 }
3231
3232 case TypeCheckKind::kArrayCheck: {
3233 __ Cmp(out, cls);
3234 DCHECK(locations->OnlyCallsOnSlowPath());
3235 slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
3236 /* is_fatal */ false);
3237 codegen_->AddSlowPath(slow_path);
3238 __ B(ne, slow_path->GetEntryLabel());
3239 __ Mov(out, 1);
3240 if (zero.IsLinked()) {
3241 __ B(&done);
3242 }
3243 break;
3244 }
3245
3246 case TypeCheckKind::kUnresolvedCheck:
3247 case TypeCheckKind::kInterfaceCheck: {
3248 // Note that we indeed only call on slow path, but we always go
3249 // into the slow path for the unresolved and interface check
3250 // cases.
3251 //
3252 // We cannot directly call the InstanceofNonTrivial runtime
3253 // entry point without resorting to a type checking slow path
3254 // here (i.e. by calling InvokeRuntime directly), as it would
3255 // require to assign fixed registers for the inputs of this
3256 // HInstanceOf instruction (following the runtime calling
3257 // convention), which might be cluttered by the potential first
3258 // read barrier emission at the beginning of this method.
3259 //
3260 // TODO: Introduce a new runtime entry point taking the object
3261 // to test (instead of its class) as argument, and let it deal
3262 // with the read barrier issues. This will let us refactor this
3263 // case of the `switch` code as it was previously (with a direct
3264 // call to the runtime not using a type checking slow path).
3265 // This should also be beneficial for the other cases above.
3266 DCHECK(locations->OnlyCallsOnSlowPath());
3267 slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
3268 /* is_fatal */ false);
3269 codegen_->AddSlowPath(slow_path);
3270 __ B(slow_path->GetEntryLabel());
3271 if (zero.IsLinked()) {
3272 __ B(&done);
3273 }
3274 break;
3275 }
3276 }
3277
3278 if (zero.IsLinked()) {
3279 __ Bind(&zero);
3280 __ Mov(out, 0);
3281 }
3282
3283 if (done.IsLinked()) {
3284 __ Bind(&done);
3285 }
3286
3287 if (slow_path != nullptr) {
3288 __ Bind(slow_path->GetExitLabel());
3289 }
3290 }
3291
VisitCheckCast(HCheckCast * instruction)3292 void LocationsBuilderARM64::VisitCheckCast(HCheckCast* instruction) {
3293 LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
3294 bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
3295
3296 TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
3297 switch (type_check_kind) {
3298 case TypeCheckKind::kExactCheck:
3299 case TypeCheckKind::kAbstractClassCheck:
3300 case TypeCheckKind::kClassHierarchyCheck:
3301 case TypeCheckKind::kArrayObjectCheck:
3302 call_kind = (throws_into_catch || kEmitCompilerReadBarrier) ?
3303 LocationSummary::kCallOnSlowPath :
3304 LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
3305 break;
3306 case TypeCheckKind::kArrayCheck:
3307 case TypeCheckKind::kUnresolvedCheck:
3308 case TypeCheckKind::kInterfaceCheck:
3309 call_kind = LocationSummary::kCallOnSlowPath;
3310 break;
3311 }
3312
3313 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
3314 locations->SetInAt(0, Location::RequiresRegister());
3315 locations->SetInAt(1, Location::RequiresRegister());
3316 // Note that TypeCheckSlowPathARM64 uses this "temp" register too.
3317 locations->AddTemp(Location::RequiresRegister());
3318 // When read barriers are enabled, we need an additional temporary
3319 // register for some cases.
3320 if (TypeCheckNeedsATemporary(type_check_kind)) {
3321 locations->AddTemp(Location::RequiresRegister());
3322 }
3323 }
3324
VisitCheckCast(HCheckCast * instruction)3325 void InstructionCodeGeneratorARM64::VisitCheckCast(HCheckCast* instruction) {
3326 TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
3327 LocationSummary* locations = instruction->GetLocations();
3328 Location obj_loc = locations->InAt(0);
3329 Register obj = InputRegisterAt(instruction, 0);
3330 Register cls = InputRegisterAt(instruction, 1);
3331 Location temp_loc = locations->GetTemp(0);
3332 Location maybe_temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ?
3333 locations->GetTemp(1) :
3334 Location::NoLocation();
3335 Register temp = WRegisterFrom(temp_loc);
3336 uint32_t class_offset = mirror::Object::ClassOffset().Int32Value();
3337 uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value();
3338 uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value();
3339 uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value();
3340
3341 bool is_type_check_slow_path_fatal =
3342 (type_check_kind == TypeCheckKind::kExactCheck ||
3343 type_check_kind == TypeCheckKind::kAbstractClassCheck ||
3344 type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
3345 type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
3346 !instruction->CanThrowIntoCatchBlock();
3347 SlowPathCodeARM64* type_check_slow_path =
3348 new (GetGraph()->GetArena()) TypeCheckSlowPathARM64(instruction,
3349 is_type_check_slow_path_fatal);
3350 codegen_->AddSlowPath(type_check_slow_path);
3351
3352 vixl::Label done;
3353 // Avoid null check if we know obj is not null.
3354 if (instruction->MustDoNullCheck()) {
3355 __ Cbz(obj, &done);
3356 }
3357
3358 // /* HeapReference<Class> */ temp = obj->klass_
3359 GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
3360
3361 switch (type_check_kind) {
3362 case TypeCheckKind::kExactCheck:
3363 case TypeCheckKind::kArrayCheck: {
3364 __ Cmp(temp, cls);
3365 // Jump to slow path for throwing the exception or doing a
3366 // more involved array check.
3367 __ B(ne, type_check_slow_path->GetEntryLabel());
3368 break;
3369 }
3370
3371 case TypeCheckKind::kAbstractClassCheck: {
3372 // If the class is abstract, we eagerly fetch the super class of the
3373 // object to avoid doing a comparison we know will fail.
3374 vixl::Label loop, compare_classes;
3375 __ Bind(&loop);
3376 // /* HeapReference<Class> */ temp = temp->super_class_
3377 GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
3378
3379 // If the class reference currently in `temp` is not null, jump
3380 // to the `compare_classes` label to compare it with the checked
3381 // class.
3382 __ Cbnz(temp, &compare_classes);
3383 // Otherwise, jump to the slow path to throw the exception.
3384 //
3385 // But before, move back the object's class into `temp` before
3386 // going into the slow path, as it has been overwritten in the
3387 // meantime.
3388 // /* HeapReference<Class> */ temp = obj->klass_
3389 GenerateReferenceLoadTwoRegisters(
3390 instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
3391 __ B(type_check_slow_path->GetEntryLabel());
3392
3393 __ Bind(&compare_classes);
3394 __ Cmp(temp, cls);
3395 __ B(ne, &loop);
3396 break;
3397 }
3398
3399 case TypeCheckKind::kClassHierarchyCheck: {
3400 // Walk over the class hierarchy to find a match.
3401 vixl::Label loop;
3402 __ Bind(&loop);
3403 __ Cmp(temp, cls);
3404 __ B(eq, &done);
3405
3406 // /* HeapReference<Class> */ temp = temp->super_class_
3407 GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, maybe_temp2_loc);
3408
3409 // If the class reference currently in `temp` is not null, jump
3410 // back at the beginning of the loop.
3411 __ Cbnz(temp, &loop);
3412 // Otherwise, jump to the slow path to throw the exception.
3413 //
3414 // But before, move back the object's class into `temp` before
3415 // going into the slow path, as it has been overwritten in the
3416 // meantime.
3417 // /* HeapReference<Class> */ temp = obj->klass_
3418 GenerateReferenceLoadTwoRegisters(
3419 instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
3420 __ B(type_check_slow_path->GetEntryLabel());
3421 break;
3422 }
3423
3424 case TypeCheckKind::kArrayObjectCheck: {
3425 // Do an exact check.
3426 vixl::Label check_non_primitive_component_type;
3427 __ Cmp(temp, cls);
3428 __ B(eq, &done);
3429
3430 // Otherwise, we need to check that the object's class is a non-primitive array.
3431 // /* HeapReference<Class> */ temp = temp->component_type_
3432 GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, maybe_temp2_loc);
3433
3434 // If the component type is not null (i.e. the object is indeed
3435 // an array), jump to label `check_non_primitive_component_type`
3436 // to further check that this component type is not a primitive
3437 // type.
3438 __ Cbnz(temp, &check_non_primitive_component_type);
3439 // Otherwise, jump to the slow path to throw the exception.
3440 //
3441 // But before, move back the object's class into `temp` before
3442 // going into the slow path, as it has been overwritten in the
3443 // meantime.
3444 // /* HeapReference<Class> */ temp = obj->klass_
3445 GenerateReferenceLoadTwoRegisters(
3446 instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
3447 __ B(type_check_slow_path->GetEntryLabel());
3448
3449 __ Bind(&check_non_primitive_component_type);
3450 __ Ldrh(temp, HeapOperand(temp, primitive_offset));
3451 static_assert(Primitive::kPrimNot == 0, "Expected 0 for kPrimNot");
3452 __ Cbz(temp, &done);
3453 // Same comment as above regarding `temp` and the slow path.
3454 // /* HeapReference<Class> */ temp = obj->klass_
3455 GenerateReferenceLoadTwoRegisters(
3456 instruction, temp_loc, obj_loc, class_offset, maybe_temp2_loc);
3457 __ B(type_check_slow_path->GetEntryLabel());
3458 break;
3459 }
3460
3461 case TypeCheckKind::kUnresolvedCheck:
3462 case TypeCheckKind::kInterfaceCheck:
3463 // We always go into the type check slow path for the unresolved
3464 // and interface check cases.
3465 //
3466 // We cannot directly call the CheckCast runtime entry point
3467 // without resorting to a type checking slow path here (i.e. by
3468 // calling InvokeRuntime directly), as it would require to
3469 // assign fixed registers for the inputs of this HInstanceOf
3470 // instruction (following the runtime calling convention), which
3471 // might be cluttered by the potential first read barrier
3472 // emission at the beginning of this method.
3473 //
3474 // TODO: Introduce a new runtime entry point taking the object
3475 // to test (instead of its class) as argument, and let it deal
3476 // with the read barrier issues. This will let us refactor this
3477 // case of the `switch` code as it was previously (with a direct
3478 // call to the runtime not using a type checking slow path).
3479 // This should also be beneficial for the other cases above.
3480 __ B(type_check_slow_path->GetEntryLabel());
3481 break;
3482 }
3483 __ Bind(&done);
3484
3485 __ Bind(type_check_slow_path->GetExitLabel());
3486 }
3487
VisitIntConstant(HIntConstant * constant)3488 void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
3489 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
3490 locations->SetOut(Location::ConstantLocation(constant));
3491 }
3492
VisitIntConstant(HIntConstant * constant ATTRIBUTE_UNUSED)3493 void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant ATTRIBUTE_UNUSED) {
3494 // Will be generated at use site.
3495 }
3496
VisitNullConstant(HNullConstant * constant)3497 void LocationsBuilderARM64::VisitNullConstant(HNullConstant* constant) {
3498 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
3499 locations->SetOut(Location::ConstantLocation(constant));
3500 }
3501
VisitNullConstant(HNullConstant * constant ATTRIBUTE_UNUSED)3502 void InstructionCodeGeneratorARM64::VisitNullConstant(HNullConstant* constant ATTRIBUTE_UNUSED) {
3503 // Will be generated at use site.
3504 }
3505
VisitInvokeUnresolved(HInvokeUnresolved * invoke)3506 void LocationsBuilderARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
3507 // The trampoline uses the same calling convention as dex calling conventions,
3508 // except instead of loading arg0/r0 with the target Method*, arg0/r0 will contain
3509 // the method_idx.
3510 HandleInvoke(invoke);
3511 }
3512
VisitInvokeUnresolved(HInvokeUnresolved * invoke)3513 void InstructionCodeGeneratorARM64::VisitInvokeUnresolved(HInvokeUnresolved* invoke) {
3514 codegen_->GenerateInvokeUnresolvedRuntimeCall(invoke);
3515 }
3516
HandleInvoke(HInvoke * invoke)3517 void LocationsBuilderARM64::HandleInvoke(HInvoke* invoke) {
3518 InvokeDexCallingConventionVisitorARM64 calling_convention_visitor;
3519 CodeGenerator::CreateCommonInvokeLocationSummary(invoke, &calling_convention_visitor);
3520 }
3521
VisitInvokeInterface(HInvokeInterface * invoke)3522 void LocationsBuilderARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
3523 HandleInvoke(invoke);
3524 }
3525
VisitInvokeInterface(HInvokeInterface * invoke)3526 void InstructionCodeGeneratorARM64::VisitInvokeInterface(HInvokeInterface* invoke) {
3527 // TODO: b/18116999, our IMTs can miss an IncompatibleClassChangeError.
3528 LocationSummary* locations = invoke->GetLocations();
3529 Register temp = XRegisterFrom(locations->GetTemp(0));
3530 uint32_t method_offset = mirror::Class::EmbeddedImTableEntryOffset(
3531 invoke->GetImtIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value();
3532 Location receiver = locations->InAt(0);
3533 Offset class_offset = mirror::Object::ClassOffset();
3534 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
3535
3536 // The register ip1 is required to be used for the hidden argument in
3537 // art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
3538 MacroAssembler* masm = GetVIXLAssembler();
3539 UseScratchRegisterScope scratch_scope(masm);
3540 BlockPoolsScope block_pools(masm);
3541 scratch_scope.Exclude(ip1);
3542 __ Mov(ip1, invoke->GetDexMethodIndex());
3543
3544 if (receiver.IsStackSlot()) {
3545 __ Ldr(temp.W(), StackOperandFrom(receiver));
3546 // /* HeapReference<Class> */ temp = temp->klass_
3547 __ Ldr(temp.W(), HeapOperand(temp.W(), class_offset));
3548 } else {
3549 // /* HeapReference<Class> */ temp = receiver->klass_
3550 __ Ldr(temp.W(), HeapOperandFrom(receiver, class_offset));
3551 }
3552 codegen_->MaybeRecordImplicitNullCheck(invoke);
3553 // Instead of simply (possibly) unpoisoning `temp` here, we should
3554 // emit a read barrier for the previous class reference load.
3555 // However this is not required in practice, as this is an
3556 // intermediate/temporary reference and because the current
3557 // concurrent copying collector keeps the from-space memory
3558 // intact/accessible until the end of the marking phase (the
3559 // concurrent copying collector may not in the future).
3560 GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
3561 // temp = temp->GetImtEntryAt(method_offset);
3562 __ Ldr(temp, MemOperand(temp, method_offset));
3563 // lr = temp->GetEntryPoint();
3564 __ Ldr(lr, MemOperand(temp, entry_point.Int32Value()));
3565 // lr();
3566 __ Blr(lr);
3567 DCHECK(!codegen_->IsLeafMethod());
3568 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
3569 }
3570
VisitInvokeVirtual(HInvokeVirtual * invoke)3571 void LocationsBuilderARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
3572 IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
3573 if (intrinsic.TryDispatch(invoke)) {
3574 return;
3575 }
3576
3577 HandleInvoke(invoke);
3578 }
3579
VisitInvokeStaticOrDirect(HInvokeStaticOrDirect * invoke)3580 void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
3581 // Explicit clinit checks triggered by static invokes must have been pruned by
3582 // art::PrepareForRegisterAllocation.
3583 DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
3584
3585 IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
3586 if (intrinsic.TryDispatch(invoke)) {
3587 return;
3588 }
3589
3590 HandleInvoke(invoke);
3591 }
3592
TryGenerateIntrinsicCode(HInvoke * invoke,CodeGeneratorARM64 * codegen)3593 static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorARM64* codegen) {
3594 if (invoke->GetLocations()->Intrinsified()) {
3595 IntrinsicCodeGeneratorARM64 intrinsic(codegen);
3596 intrinsic.Dispatch(invoke);
3597 return true;
3598 }
3599 return false;
3600 }
3601
GetSupportedInvokeStaticOrDirectDispatch(const HInvokeStaticOrDirect::DispatchInfo & desired_dispatch_info,MethodReference target_method ATTRIBUTE_UNUSED)3602 HInvokeStaticOrDirect::DispatchInfo CodeGeneratorARM64::GetSupportedInvokeStaticOrDirectDispatch(
3603 const HInvokeStaticOrDirect::DispatchInfo& desired_dispatch_info,
3604 MethodReference target_method ATTRIBUTE_UNUSED) {
3605 // On ARM64 we support all dispatch types.
3606 return desired_dispatch_info;
3607 }
3608
GenerateStaticOrDirectCall(HInvokeStaticOrDirect * invoke,Location temp)3609 void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, Location temp) {
3610 // For better instruction scheduling we load the direct code pointer before the method pointer.
3611 bool direct_code_loaded = false;
3612 switch (invoke->GetCodePtrLocation()) {
3613 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
3614 // LR = code address from literal pool with link-time patch.
3615 __ Ldr(lr, DeduplicateMethodCodeLiteral(invoke->GetTargetMethod()));
3616 direct_code_loaded = true;
3617 break;
3618 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
3619 // LR = invoke->GetDirectCodePtr();
3620 __ Ldr(lr, DeduplicateUint64Literal(invoke->GetDirectCodePtr()));
3621 direct_code_loaded = true;
3622 break;
3623 default:
3624 break;
3625 }
3626
3627 // Make sure that ArtMethod* is passed in kArtMethodRegister as per the calling convention.
3628 Location callee_method = temp; // For all kinds except kRecursive, callee will be in temp.
3629 switch (invoke->GetMethodLoadKind()) {
3630 case HInvokeStaticOrDirect::MethodLoadKind::kStringInit:
3631 // temp = thread->string_init_entrypoint
3632 __ Ldr(XRegisterFrom(temp), MemOperand(tr, invoke->GetStringInitOffset()));
3633 break;
3634 case HInvokeStaticOrDirect::MethodLoadKind::kRecursive:
3635 callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
3636 break;
3637 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress:
3638 // Load method address from literal pool.
3639 __ Ldr(XRegisterFrom(temp), DeduplicateUint64Literal(invoke->GetMethodAddress()));
3640 break;
3641 case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddressWithFixup:
3642 // Load method address from literal pool with a link-time patch.
3643 __ Ldr(XRegisterFrom(temp),
3644 DeduplicateMethodAddressLiteral(invoke->GetTargetMethod()));
3645 break;
3646 case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: {
3647 // Add ADRP with its PC-relative DexCache access patch.
3648 const DexFile& dex_file = *invoke->GetTargetMethod().dex_file;
3649 uint32_t element_offset = invoke->GetDexCacheArrayOffset();
3650 vixl::Label* adrp_label = NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
3651 {
3652 vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
3653 __ Bind(adrp_label);
3654 __ adrp(XRegisterFrom(temp), /* offset placeholder */ 0);
3655 }
3656 // Add LDR with its PC-relative DexCache access patch.
3657 vixl::Label* ldr_label =
3658 NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
3659 {
3660 vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
3661 __ Bind(ldr_label);
3662 __ ldr(XRegisterFrom(temp), MemOperand(XRegisterFrom(temp), /* offset placeholder */ 0));
3663 }
3664 break;
3665 }
3666 case HInvokeStaticOrDirect::MethodLoadKind::kDexCacheViaMethod: {
3667 Location current_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex());
3668 Register reg = XRegisterFrom(temp);
3669 Register method_reg;
3670 if (current_method.IsRegister()) {
3671 method_reg = XRegisterFrom(current_method);
3672 } else {
3673 DCHECK(invoke->GetLocations()->Intrinsified());
3674 DCHECK(!current_method.IsValid());
3675 method_reg = reg;
3676 __ Ldr(reg.X(), MemOperand(sp, kCurrentMethodStackOffset));
3677 }
3678
3679 // /* ArtMethod*[] */ temp = temp.ptr_sized_fields_->dex_cache_resolved_methods_;
3680 __ Ldr(reg.X(),
3681 MemOperand(method_reg.X(),
3682 ArtMethod::DexCacheResolvedMethodsOffset(kArm64WordSize).Int32Value()));
3683 // temp = temp[index_in_cache];
3684 // Note: Don't use invoke->GetTargetMethod() as it may point to a different dex file.
3685 uint32_t index_in_cache = invoke->GetDexMethodIndex();
3686 __ Ldr(reg.X(), MemOperand(reg.X(), GetCachePointerOffset(index_in_cache)));
3687 break;
3688 }
3689 }
3690
3691 switch (invoke->GetCodePtrLocation()) {
3692 case HInvokeStaticOrDirect::CodePtrLocation::kCallSelf:
3693 __ Bl(&frame_entry_label_);
3694 break;
3695 case HInvokeStaticOrDirect::CodePtrLocation::kCallPCRelative: {
3696 relative_call_patches_.emplace_back(invoke->GetTargetMethod());
3697 vixl::Label* label = &relative_call_patches_.back().label;
3698 vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
3699 __ Bind(label);
3700 __ bl(0); // Branch and link to itself. This will be overriden at link time.
3701 break;
3702 }
3703 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirectWithFixup:
3704 case HInvokeStaticOrDirect::CodePtrLocation::kCallDirect:
3705 // LR prepared above for better instruction scheduling.
3706 DCHECK(direct_code_loaded);
3707 // lr()
3708 __ Blr(lr);
3709 break;
3710 case HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod:
3711 // LR = callee_method->entry_point_from_quick_compiled_code_;
3712 __ Ldr(lr, MemOperand(
3713 XRegisterFrom(callee_method),
3714 ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize).Int32Value()));
3715 // lr()
3716 __ Blr(lr);
3717 break;
3718 }
3719
3720 DCHECK(!IsLeafMethod());
3721 }
3722
GenerateVirtualCall(HInvokeVirtual * invoke,Location temp_in)3723 void CodeGeneratorARM64::GenerateVirtualCall(HInvokeVirtual* invoke, Location temp_in) {
3724 // Use the calling convention instead of the location of the receiver, as
3725 // intrinsics may have put the receiver in a different register. In the intrinsics
3726 // slow path, the arguments have been moved to the right place, so here we are
3727 // guaranteed that the receiver is the first register of the calling convention.
3728 InvokeDexCallingConvention calling_convention;
3729 Register receiver = calling_convention.GetRegisterAt(0);
3730 Register temp = XRegisterFrom(temp_in);
3731 size_t method_offset = mirror::Class::EmbeddedVTableEntryOffset(
3732 invoke->GetVTableIndex(), kArm64PointerSize).SizeValue();
3733 Offset class_offset = mirror::Object::ClassOffset();
3734 Offset entry_point = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
3735
3736 BlockPoolsScope block_pools(GetVIXLAssembler());
3737
3738 DCHECK(receiver.IsRegister());
3739 // /* HeapReference<Class> */ temp = receiver->klass_
3740 __ Ldr(temp.W(), HeapOperandFrom(LocationFrom(receiver), class_offset));
3741 MaybeRecordImplicitNullCheck(invoke);
3742 // Instead of simply (possibly) unpoisoning `temp` here, we should
3743 // emit a read barrier for the previous class reference load.
3744 // intermediate/temporary reference and because the current
3745 // concurrent copying collector keeps the from-space memory
3746 // intact/accessible until the end of the marking phase (the
3747 // concurrent copying collector may not in the future).
3748 GetAssembler()->MaybeUnpoisonHeapReference(temp.W());
3749 // temp = temp->GetMethodAt(method_offset);
3750 __ Ldr(temp, MemOperand(temp, method_offset));
3751 // lr = temp->GetEntryPoint();
3752 __ Ldr(lr, MemOperand(temp, entry_point.SizeValue()));
3753 // lr();
3754 __ Blr(lr);
3755 }
3756
NewPcRelativeStringPatch(const DexFile & dex_file,uint32_t string_index,vixl::Label * adrp_label)3757 vixl::Label* CodeGeneratorARM64::NewPcRelativeStringPatch(const DexFile& dex_file,
3758 uint32_t string_index,
3759 vixl::Label* adrp_label) {
3760 return NewPcRelativePatch(dex_file, string_index, adrp_label, &pc_relative_string_patches_);
3761 }
3762
NewPcRelativeDexCacheArrayPatch(const DexFile & dex_file,uint32_t element_offset,vixl::Label * adrp_label)3763 vixl::Label* CodeGeneratorARM64::NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file,
3764 uint32_t element_offset,
3765 vixl::Label* adrp_label) {
3766 return NewPcRelativePatch(dex_file, element_offset, adrp_label, &pc_relative_dex_cache_patches_);
3767 }
3768
NewPcRelativePatch(const DexFile & dex_file,uint32_t offset_or_index,vixl::Label * adrp_label,ArenaDeque<PcRelativePatchInfo> * patches)3769 vixl::Label* CodeGeneratorARM64::NewPcRelativePatch(const DexFile& dex_file,
3770 uint32_t offset_or_index,
3771 vixl::Label* adrp_label,
3772 ArenaDeque<PcRelativePatchInfo>* patches) {
3773 // Add a patch entry and return the label.
3774 patches->emplace_back(dex_file, offset_or_index);
3775 PcRelativePatchInfo* info = &patches->back();
3776 vixl::Label* label = &info->label;
3777 // If adrp_label is null, this is the ADRP patch and needs to point to its own label.
3778 info->pc_insn_label = (adrp_label != nullptr) ? adrp_label : label;
3779 return label;
3780 }
3781
DeduplicateBootImageStringLiteral(const DexFile & dex_file,uint32_t string_index)3782 vixl::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageStringLiteral(
3783 const DexFile& dex_file, uint32_t string_index) {
3784 return boot_image_string_patches_.GetOrCreate(
3785 StringReference(&dex_file, string_index),
3786 [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); });
3787 }
3788
DeduplicateBootImageAddressLiteral(uint64_t address)3789 vixl::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateBootImageAddressLiteral(uint64_t address) {
3790 bool needs_patch = GetCompilerOptions().GetIncludePatchInformation();
3791 Uint32ToLiteralMap* map = needs_patch ? &boot_image_address_patches_ : &uint32_literals_;
3792 return DeduplicateUint32Literal(dchecked_integral_cast<uint32_t>(address), map);
3793 }
3794
DeduplicateDexCacheAddressLiteral(uint64_t address)3795 vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateDexCacheAddressLiteral(uint64_t address) {
3796 return DeduplicateUint64Literal(address);
3797 }
3798
EmitLinkerPatches(ArenaVector<LinkerPatch> * linker_patches)3799 void CodeGeneratorARM64::EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) {
3800 DCHECK(linker_patches->empty());
3801 size_t size =
3802 method_patches_.size() +
3803 call_patches_.size() +
3804 relative_call_patches_.size() +
3805 pc_relative_dex_cache_patches_.size() +
3806 boot_image_string_patches_.size() +
3807 pc_relative_string_patches_.size() +
3808 boot_image_address_patches_.size();
3809 linker_patches->reserve(size);
3810 for (const auto& entry : method_patches_) {
3811 const MethodReference& target_method = entry.first;
3812 vixl::Literal<uint64_t>* literal = entry.second;
3813 linker_patches->push_back(LinkerPatch::MethodPatch(literal->offset(),
3814 target_method.dex_file,
3815 target_method.dex_method_index));
3816 }
3817 for (const auto& entry : call_patches_) {
3818 const MethodReference& target_method = entry.first;
3819 vixl::Literal<uint64_t>* literal = entry.second;
3820 linker_patches->push_back(LinkerPatch::CodePatch(literal->offset(),
3821 target_method.dex_file,
3822 target_method.dex_method_index));
3823 }
3824 for (const MethodPatchInfo<vixl::Label>& info : relative_call_patches_) {
3825 linker_patches->push_back(LinkerPatch::RelativeCodePatch(info.label.location(),
3826 info.target_method.dex_file,
3827 info.target_method.dex_method_index));
3828 }
3829 for (const PcRelativePatchInfo& info : pc_relative_dex_cache_patches_) {
3830 linker_patches->push_back(LinkerPatch::DexCacheArrayPatch(info.label.location(),
3831 &info.target_dex_file,
3832 info.pc_insn_label->location(),
3833 info.offset_or_index));
3834 }
3835 for (const auto& entry : boot_image_string_patches_) {
3836 const StringReference& target_string = entry.first;
3837 vixl::Literal<uint32_t>* literal = entry.second;
3838 linker_patches->push_back(LinkerPatch::StringPatch(literal->offset(),
3839 target_string.dex_file,
3840 target_string.string_index));
3841 }
3842 for (const PcRelativePatchInfo& info : pc_relative_string_patches_) {
3843 linker_patches->push_back(LinkerPatch::RelativeStringPatch(info.label.location(),
3844 &info.target_dex_file,
3845 info.pc_insn_label->location(),
3846 info.offset_or_index));
3847 }
3848 for (const auto& entry : boot_image_address_patches_) {
3849 DCHECK(GetCompilerOptions().GetIncludePatchInformation());
3850 vixl::Literal<uint32_t>* literal = entry.second;
3851 linker_patches->push_back(LinkerPatch::RecordPosition(literal->offset()));
3852 }
3853 }
3854
DeduplicateUint32Literal(uint32_t value,Uint32ToLiteralMap * map)3855 vixl::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateUint32Literal(uint32_t value,
3856 Uint32ToLiteralMap* map) {
3857 return map->GetOrCreate(
3858 value,
3859 [this, value]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(value); });
3860 }
3861
DeduplicateUint64Literal(uint64_t value)3862 vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateUint64Literal(uint64_t value) {
3863 return uint64_literals_.GetOrCreate(
3864 value,
3865 [this, value]() { return __ CreateLiteralDestroyedWithPool<uint64_t>(value); });
3866 }
3867
DeduplicateMethodLiteral(MethodReference target_method,MethodToLiteralMap * map)3868 vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodLiteral(
3869 MethodReference target_method,
3870 MethodToLiteralMap* map) {
3871 return map->GetOrCreate(
3872 target_method,
3873 [this]() { return __ CreateLiteralDestroyedWithPool<uint64_t>(/* placeholder */ 0u); });
3874 }
3875
DeduplicateMethodAddressLiteral(MethodReference target_method)3876 vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodAddressLiteral(
3877 MethodReference target_method) {
3878 return DeduplicateMethodLiteral(target_method, &method_patches_);
3879 }
3880
DeduplicateMethodCodeLiteral(MethodReference target_method)3881 vixl::Literal<uint64_t>* CodeGeneratorARM64::DeduplicateMethodCodeLiteral(
3882 MethodReference target_method) {
3883 return DeduplicateMethodLiteral(target_method, &call_patches_);
3884 }
3885
3886
VisitInvokeStaticOrDirect(HInvokeStaticOrDirect * invoke)3887 void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
3888 // Explicit clinit checks triggered by static invokes must have been pruned by
3889 // art::PrepareForRegisterAllocation.
3890 DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
3891
3892 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
3893 return;
3894 }
3895
3896 BlockPoolsScope block_pools(GetVIXLAssembler());
3897 LocationSummary* locations = invoke->GetLocations();
3898 codegen_->GenerateStaticOrDirectCall(
3899 invoke, locations->HasTemps() ? locations->GetTemp(0) : Location::NoLocation());
3900 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
3901 }
3902
VisitInvokeVirtual(HInvokeVirtual * invoke)3903 void InstructionCodeGeneratorARM64::VisitInvokeVirtual(HInvokeVirtual* invoke) {
3904 if (TryGenerateIntrinsicCode(invoke, codegen_)) {
3905 return;
3906 }
3907
3908 codegen_->GenerateVirtualCall(invoke, invoke->GetLocations()->GetTemp(0));
3909 DCHECK(!codegen_->IsLeafMethod());
3910 codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
3911 }
3912
VisitLoadClass(HLoadClass * cls)3913 void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
3914 InvokeRuntimeCallingConvention calling_convention;
3915 CodeGenerator::CreateLoadClassLocationSummary(
3916 cls,
3917 LocationFrom(calling_convention.GetRegisterAt(0)),
3918 LocationFrom(vixl::x0),
3919 /* code_generator_supports_read_barrier */ true);
3920 }
3921
VisitLoadClass(HLoadClass * cls)3922 void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
3923 if (cls->NeedsAccessCheck()) {
3924 codegen_->MoveConstant(cls->GetLocations()->GetTemp(0), cls->GetTypeIndex());
3925 codegen_->InvokeRuntime(QUICK_ENTRY_POINT(pInitializeTypeAndVerifyAccess),
3926 cls,
3927 cls->GetDexPc(),
3928 nullptr);
3929 CheckEntrypointTypes<kQuickInitializeTypeAndVerifyAccess, void*, uint32_t>();
3930 return;
3931 }
3932
3933 Location out_loc = cls->GetLocations()->Out();
3934 Register out = OutputRegister(cls);
3935 Register current_method = InputRegisterAt(cls, 0);
3936 if (cls->IsReferrersClass()) {
3937 DCHECK(!cls->CanCallRuntime());
3938 DCHECK(!cls->MustGenerateClinitCheck());
3939 // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
3940 GenerateGcRootFieldLoad(
3941 cls, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
3942 } else {
3943 MemberOffset resolved_types_offset = ArtMethod::DexCacheResolvedTypesOffset(kArm64PointerSize);
3944 // /* GcRoot<mirror::Class>[] */ out =
3945 // current_method.ptr_sized_fields_->dex_cache_resolved_types_
3946 __ Ldr(out.X(), MemOperand(current_method, resolved_types_offset.Int32Value()));
3947 // /* GcRoot<mirror::Class> */ out = out[type_index]
3948 GenerateGcRootFieldLoad(
3949 cls, out_loc, out.X(), CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
3950
3951 if (!cls->IsInDexCache() || cls->MustGenerateClinitCheck()) {
3952 DCHECK(cls->CanCallRuntime());
3953 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
3954 cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
3955 codegen_->AddSlowPath(slow_path);
3956 if (!cls->IsInDexCache()) {
3957 __ Cbz(out, slow_path->GetEntryLabel());
3958 }
3959 if (cls->MustGenerateClinitCheck()) {
3960 GenerateClassInitializationCheck(slow_path, out);
3961 } else {
3962 __ Bind(slow_path->GetExitLabel());
3963 }
3964 }
3965 }
3966 }
3967
GetExceptionTlsAddress()3968 static MemOperand GetExceptionTlsAddress() {
3969 return MemOperand(tr, Thread::ExceptionOffset<kArm64WordSize>().Int32Value());
3970 }
3971
VisitLoadException(HLoadException * load)3972 void LocationsBuilderARM64::VisitLoadException(HLoadException* load) {
3973 LocationSummary* locations =
3974 new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kNoCall);
3975 locations->SetOut(Location::RequiresRegister());
3976 }
3977
VisitLoadException(HLoadException * instruction)3978 void InstructionCodeGeneratorARM64::VisitLoadException(HLoadException* instruction) {
3979 __ Ldr(OutputRegister(instruction), GetExceptionTlsAddress());
3980 }
3981
VisitClearException(HClearException * clear)3982 void LocationsBuilderARM64::VisitClearException(HClearException* clear) {
3983 new (GetGraph()->GetArena()) LocationSummary(clear, LocationSummary::kNoCall);
3984 }
3985
VisitClearException(HClearException * clear ATTRIBUTE_UNUSED)3986 void InstructionCodeGeneratorARM64::VisitClearException(HClearException* clear ATTRIBUTE_UNUSED) {
3987 __ Str(wzr, GetExceptionTlsAddress());
3988 }
3989
GetSupportedLoadStringKind(HLoadString::LoadKind desired_string_load_kind)3990 HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind(
3991 HLoadString::LoadKind desired_string_load_kind) {
3992 if (kEmitCompilerReadBarrier) {
3993 switch (desired_string_load_kind) {
3994 case HLoadString::LoadKind::kBootImageLinkTimeAddress:
3995 case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
3996 case HLoadString::LoadKind::kBootImageAddress:
3997 // TODO: Implement for read barrier.
3998 return HLoadString::LoadKind::kDexCacheViaMethod;
3999 default:
4000 break;
4001 }
4002 }
4003 switch (desired_string_load_kind) {
4004 case HLoadString::LoadKind::kBootImageLinkTimeAddress:
4005 DCHECK(!GetCompilerOptions().GetCompilePic());
4006 break;
4007 case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
4008 DCHECK(GetCompilerOptions().GetCompilePic());
4009 break;
4010 case HLoadString::LoadKind::kBootImageAddress:
4011 break;
4012 case HLoadString::LoadKind::kDexCacheAddress:
4013 DCHECK(Runtime::Current()->UseJitCompilation());
4014 break;
4015 case HLoadString::LoadKind::kDexCachePcRelative:
4016 DCHECK(!Runtime::Current()->UseJitCompilation());
4017 break;
4018 case HLoadString::LoadKind::kDexCacheViaMethod:
4019 break;
4020 }
4021 return desired_string_load_kind;
4022 }
4023
VisitLoadString(HLoadString * load)4024 void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
4025 LocationSummary::CallKind call_kind = (load->NeedsEnvironment() || kEmitCompilerReadBarrier)
4026 ? LocationSummary::kCallOnSlowPath
4027 : LocationSummary::kNoCall;
4028 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(load, call_kind);
4029 if (load->GetLoadKind() == HLoadString::LoadKind::kDexCacheViaMethod) {
4030 locations->SetInAt(0, Location::RequiresRegister());
4031 }
4032 locations->SetOut(Location::RequiresRegister());
4033 }
4034
VisitLoadString(HLoadString * load)4035 void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
4036 Location out_loc = load->GetLocations()->Out();
4037 Register out = OutputRegister(load);
4038
4039 switch (load->GetLoadKind()) {
4040 case HLoadString::LoadKind::kBootImageLinkTimeAddress:
4041 DCHECK(!kEmitCompilerReadBarrier);
4042 __ Ldr(out, codegen_->DeduplicateBootImageStringLiteral(load->GetDexFile(),
4043 load->GetStringIndex()));
4044 return; // No dex cache slow path.
4045 case HLoadString::LoadKind::kBootImageLinkTimePcRelative: {
4046 DCHECK(!kEmitCompilerReadBarrier);
4047 // Add ADRP with its PC-relative String patch.
4048 const DexFile& dex_file = load->GetDexFile();
4049 uint32_t string_index = load->GetStringIndex();
4050 vixl::Label* adrp_label = codegen_->NewPcRelativeStringPatch(dex_file, string_index);
4051 {
4052 vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
4053 __ Bind(adrp_label);
4054 __ adrp(out.X(), /* offset placeholder */ 0);
4055 }
4056 // Add ADD with its PC-relative String patch.
4057 vixl::Label* add_label =
4058 codegen_->NewPcRelativeStringPatch(dex_file, string_index, adrp_label);
4059 {
4060 vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
4061 __ Bind(add_label);
4062 __ add(out.X(), out.X(), Operand(/* offset placeholder */ 0));
4063 }
4064 return; // No dex cache slow path.
4065 }
4066 case HLoadString::LoadKind::kBootImageAddress: {
4067 DCHECK(!kEmitCompilerReadBarrier);
4068 DCHECK(load->GetAddress() != 0u && IsUint<32>(load->GetAddress()));
4069 __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(load->GetAddress()));
4070 return; // No dex cache slow path.
4071 }
4072 case HLoadString::LoadKind::kDexCacheAddress: {
4073 DCHECK_NE(load->GetAddress(), 0u);
4074 // LDR immediate has a 12-bit offset multiplied by the size and for 32-bit loads
4075 // that gives a 16KiB range. To try and reduce the number of literals if we load
4076 // multiple strings, simply split the dex cache address to a 16KiB aligned base
4077 // loaded from a literal and the remaining offset embedded in the load.
4078 static_assert(sizeof(GcRoot<mirror::String>) == 4u, "Expected GC root to be 4 bytes.");
4079 DCHECK_ALIGNED(load->GetAddress(), 4u);
4080 constexpr size_t offset_bits = /* encoded bits */ 12 + /* scale */ 2;
4081 uint64_t base_address = load->GetAddress() & ~MaxInt<uint64_t>(offset_bits);
4082 uint32_t offset = load->GetAddress() & MaxInt<uint64_t>(offset_bits);
4083 __ Ldr(out.X(), codegen_->DeduplicateDexCacheAddressLiteral(base_address));
4084 GenerateGcRootFieldLoad(load, out_loc, out.X(), offset);
4085 break;
4086 }
4087 case HLoadString::LoadKind::kDexCachePcRelative: {
4088 // Add ADRP with its PC-relative DexCache access patch.
4089 const DexFile& dex_file = load->GetDexFile();
4090 uint32_t element_offset = load->GetDexCacheElementOffset();
4091 vixl::Label* adrp_label = codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset);
4092 {
4093 vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
4094 __ Bind(adrp_label);
4095 __ adrp(out.X(), /* offset placeholder */ 0);
4096 }
4097 // Add LDR with its PC-relative DexCache access patch.
4098 vixl::Label* ldr_label =
4099 codegen_->NewPcRelativeDexCacheArrayPatch(dex_file, element_offset, adrp_label);
4100 GenerateGcRootFieldLoad(load, out_loc, out.X(), /* offset placeholder */ 0, ldr_label);
4101 break;
4102 }
4103 case HLoadString::LoadKind::kDexCacheViaMethod: {
4104 Register current_method = InputRegisterAt(load, 0);
4105 // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_
4106 GenerateGcRootFieldLoad(
4107 load, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value());
4108 // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_
4109 __ Ldr(out.X(), HeapOperand(out, mirror::Class::DexCacheStringsOffset().Uint32Value()));
4110 // /* GcRoot<mirror::String> */ out = out[string_index]
4111 GenerateGcRootFieldLoad(
4112 load, out_loc, out.X(), CodeGenerator::GetCacheOffset(load->GetStringIndex()));
4113 break;
4114 }
4115 default:
4116 LOG(FATAL) << "Unexpected load kind: " << load->GetLoadKind();
4117 UNREACHABLE();
4118 }
4119
4120 if (!load->IsInDexCache()) {
4121 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM64(load);
4122 codegen_->AddSlowPath(slow_path);
4123 __ Cbz(out, slow_path->GetEntryLabel());
4124 __ Bind(slow_path->GetExitLabel());
4125 }
4126 }
4127
VisitLongConstant(HLongConstant * constant)4128 void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
4129 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(constant);
4130 locations->SetOut(Location::ConstantLocation(constant));
4131 }
4132
VisitLongConstant(HLongConstant * constant ATTRIBUTE_UNUSED)4133 void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant ATTRIBUTE_UNUSED) {
4134 // Will be generated at use site.
4135 }
4136
VisitMonitorOperation(HMonitorOperation * instruction)4137 void LocationsBuilderARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
4138 LocationSummary* locations =
4139 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4140 InvokeRuntimeCallingConvention calling_convention;
4141 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
4142 }
4143
VisitMonitorOperation(HMonitorOperation * instruction)4144 void InstructionCodeGeneratorARM64::VisitMonitorOperation(HMonitorOperation* instruction) {
4145 codegen_->InvokeRuntime(instruction->IsEnter()
4146 ? QUICK_ENTRY_POINT(pLockObject) : QUICK_ENTRY_POINT(pUnlockObject),
4147 instruction,
4148 instruction->GetDexPc(),
4149 nullptr);
4150 if (instruction->IsEnter()) {
4151 CheckEntrypointTypes<kQuickLockObject, void, mirror::Object*>();
4152 } else {
4153 CheckEntrypointTypes<kQuickUnlockObject, void, mirror::Object*>();
4154 }
4155 }
4156
VisitMul(HMul * mul)4157 void LocationsBuilderARM64::VisitMul(HMul* mul) {
4158 LocationSummary* locations =
4159 new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
4160 switch (mul->GetResultType()) {
4161 case Primitive::kPrimInt:
4162 case Primitive::kPrimLong:
4163 locations->SetInAt(0, Location::RequiresRegister());
4164 locations->SetInAt(1, Location::RequiresRegister());
4165 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4166 break;
4167
4168 case Primitive::kPrimFloat:
4169 case Primitive::kPrimDouble:
4170 locations->SetInAt(0, Location::RequiresFpuRegister());
4171 locations->SetInAt(1, Location::RequiresFpuRegister());
4172 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
4173 break;
4174
4175 default:
4176 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
4177 }
4178 }
4179
VisitMul(HMul * mul)4180 void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
4181 switch (mul->GetResultType()) {
4182 case Primitive::kPrimInt:
4183 case Primitive::kPrimLong:
4184 __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1));
4185 break;
4186
4187 case Primitive::kPrimFloat:
4188 case Primitive::kPrimDouble:
4189 __ Fmul(OutputFPRegister(mul), InputFPRegisterAt(mul, 0), InputFPRegisterAt(mul, 1));
4190 break;
4191
4192 default:
4193 LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
4194 }
4195 }
4196
VisitNeg(HNeg * neg)4197 void LocationsBuilderARM64::VisitNeg(HNeg* neg) {
4198 LocationSummary* locations =
4199 new (GetGraph()->GetArena()) LocationSummary(neg, LocationSummary::kNoCall);
4200 switch (neg->GetResultType()) {
4201 case Primitive::kPrimInt:
4202 case Primitive::kPrimLong:
4203 locations->SetInAt(0, ARM64EncodableConstantOrRegister(neg->InputAt(0), neg));
4204 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4205 break;
4206
4207 case Primitive::kPrimFloat:
4208 case Primitive::kPrimDouble:
4209 locations->SetInAt(0, Location::RequiresFpuRegister());
4210 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
4211 break;
4212
4213 default:
4214 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
4215 }
4216 }
4217
VisitNeg(HNeg * neg)4218 void InstructionCodeGeneratorARM64::VisitNeg(HNeg* neg) {
4219 switch (neg->GetResultType()) {
4220 case Primitive::kPrimInt:
4221 case Primitive::kPrimLong:
4222 __ Neg(OutputRegister(neg), InputOperandAt(neg, 0));
4223 break;
4224
4225 case Primitive::kPrimFloat:
4226 case Primitive::kPrimDouble:
4227 __ Fneg(OutputFPRegister(neg), InputFPRegisterAt(neg, 0));
4228 break;
4229
4230 default:
4231 LOG(FATAL) << "Unexpected neg type " << neg->GetResultType();
4232 }
4233 }
4234
VisitNewArray(HNewArray * instruction)4235 void LocationsBuilderARM64::VisitNewArray(HNewArray* instruction) {
4236 LocationSummary* locations =
4237 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4238 InvokeRuntimeCallingConvention calling_convention;
4239 locations->AddTemp(LocationFrom(calling_convention.GetRegisterAt(0)));
4240 locations->SetOut(LocationFrom(x0));
4241 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(1)));
4242 locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(2)));
4243 }
4244
VisitNewArray(HNewArray * instruction)4245 void InstructionCodeGeneratorARM64::VisitNewArray(HNewArray* instruction) {
4246 LocationSummary* locations = instruction->GetLocations();
4247 InvokeRuntimeCallingConvention calling_convention;
4248 Register type_index = RegisterFrom(locations->GetTemp(0), Primitive::kPrimInt);
4249 DCHECK(type_index.Is(w0));
4250 __ Mov(type_index, instruction->GetTypeIndex());
4251 // Note: if heap poisoning is enabled, the entry point takes cares
4252 // of poisoning the reference.
4253 codegen_->InvokeRuntime(instruction->GetEntrypoint(),
4254 instruction,
4255 instruction->GetDexPc(),
4256 nullptr);
4257 CheckEntrypointTypes<kQuickAllocArrayWithAccessCheck, void*, uint32_t, int32_t, ArtMethod*>();
4258 }
4259
VisitNewInstance(HNewInstance * instruction)4260 void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
4261 LocationSummary* locations =
4262 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4263 InvokeRuntimeCallingConvention calling_convention;
4264 if (instruction->IsStringAlloc()) {
4265 locations->AddTemp(LocationFrom(kArtMethodRegister));
4266 } else {
4267 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
4268 locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1)));
4269 }
4270 locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot));
4271 }
4272
VisitNewInstance(HNewInstance * instruction)4273 void InstructionCodeGeneratorARM64::VisitNewInstance(HNewInstance* instruction) {
4274 // Note: if heap poisoning is enabled, the entry point takes cares
4275 // of poisoning the reference.
4276 if (instruction->IsStringAlloc()) {
4277 // String is allocated through StringFactory. Call NewEmptyString entry point.
4278 Location temp = instruction->GetLocations()->GetTemp(0);
4279 MemberOffset code_offset = ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
4280 __ Ldr(XRegisterFrom(temp), MemOperand(tr, QUICK_ENTRY_POINT(pNewEmptyString)));
4281 __ Ldr(lr, MemOperand(XRegisterFrom(temp), code_offset.Int32Value()));
4282 __ Blr(lr);
4283 codegen_->RecordPcInfo(instruction, instruction->GetDexPc());
4284 } else {
4285 codegen_->InvokeRuntime(instruction->GetEntrypoint(),
4286 instruction,
4287 instruction->GetDexPc(),
4288 nullptr);
4289 CheckEntrypointTypes<kQuickAllocObjectWithAccessCheck, void*, uint32_t, ArtMethod*>();
4290 }
4291 }
4292
VisitNot(HNot * instruction)4293 void LocationsBuilderARM64::VisitNot(HNot* instruction) {
4294 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
4295 locations->SetInAt(0, Location::RequiresRegister());
4296 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4297 }
4298
VisitNot(HNot * instruction)4299 void InstructionCodeGeneratorARM64::VisitNot(HNot* instruction) {
4300 switch (instruction->GetResultType()) {
4301 case Primitive::kPrimInt:
4302 case Primitive::kPrimLong:
4303 __ Mvn(OutputRegister(instruction), InputOperandAt(instruction, 0));
4304 break;
4305
4306 default:
4307 LOG(FATAL) << "Unexpected type for not operation " << instruction->GetResultType();
4308 }
4309 }
4310
VisitBooleanNot(HBooleanNot * instruction)4311 void LocationsBuilderARM64::VisitBooleanNot(HBooleanNot* instruction) {
4312 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
4313 locations->SetInAt(0, Location::RequiresRegister());
4314 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4315 }
4316
VisitBooleanNot(HBooleanNot * instruction)4317 void InstructionCodeGeneratorARM64::VisitBooleanNot(HBooleanNot* instruction) {
4318 __ Eor(OutputRegister(instruction), InputRegisterAt(instruction, 0), vixl::Operand(1));
4319 }
4320
VisitNullCheck(HNullCheck * instruction)4321 void LocationsBuilderARM64::VisitNullCheck(HNullCheck* instruction) {
4322 LocationSummary::CallKind call_kind = instruction->CanThrowIntoCatchBlock()
4323 ? LocationSummary::kCallOnSlowPath
4324 : LocationSummary::kNoCall;
4325 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction, call_kind);
4326 locations->SetInAt(0, Location::RequiresRegister());
4327 if (instruction->HasUses()) {
4328 locations->SetOut(Location::SameAsFirstInput());
4329 }
4330 }
4331
GenerateImplicitNullCheck(HNullCheck * instruction)4332 void CodeGeneratorARM64::GenerateImplicitNullCheck(HNullCheck* instruction) {
4333 if (CanMoveNullCheckToUser(instruction)) {
4334 return;
4335 }
4336
4337 BlockPoolsScope block_pools(GetVIXLAssembler());
4338 Location obj = instruction->GetLocations()->InAt(0);
4339 __ Ldr(wzr, HeapOperandFrom(obj, Offset(0)));
4340 RecordPcInfo(instruction, instruction->GetDexPc());
4341 }
4342
GenerateExplicitNullCheck(HNullCheck * instruction)4343 void CodeGeneratorARM64::GenerateExplicitNullCheck(HNullCheck* instruction) {
4344 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) NullCheckSlowPathARM64(instruction);
4345 AddSlowPath(slow_path);
4346
4347 LocationSummary* locations = instruction->GetLocations();
4348 Location obj = locations->InAt(0);
4349
4350 __ Cbz(RegisterFrom(obj, instruction->InputAt(0)->GetType()), slow_path->GetEntryLabel());
4351 }
4352
VisitNullCheck(HNullCheck * instruction)4353 void InstructionCodeGeneratorARM64::VisitNullCheck(HNullCheck* instruction) {
4354 codegen_->GenerateNullCheck(instruction);
4355 }
4356
VisitOr(HOr * instruction)4357 void LocationsBuilderARM64::VisitOr(HOr* instruction) {
4358 HandleBinaryOp(instruction);
4359 }
4360
VisitOr(HOr * instruction)4361 void InstructionCodeGeneratorARM64::VisitOr(HOr* instruction) {
4362 HandleBinaryOp(instruction);
4363 }
4364
VisitParallelMove(HParallelMove * instruction ATTRIBUTE_UNUSED)4365 void LocationsBuilderARM64::VisitParallelMove(HParallelMove* instruction ATTRIBUTE_UNUSED) {
4366 LOG(FATAL) << "Unreachable";
4367 }
4368
VisitParallelMove(HParallelMove * instruction)4369 void InstructionCodeGeneratorARM64::VisitParallelMove(HParallelMove* instruction) {
4370 codegen_->GetMoveResolver()->EmitNativeCode(instruction);
4371 }
4372
VisitParameterValue(HParameterValue * instruction)4373 void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
4374 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
4375 Location location = parameter_visitor_.GetNextLocation(instruction->GetType());
4376 if (location.IsStackSlot()) {
4377 location = Location::StackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
4378 } else if (location.IsDoubleStackSlot()) {
4379 location = Location::DoubleStackSlot(location.GetStackIndex() + codegen_->GetFrameSize());
4380 }
4381 locations->SetOut(location);
4382 }
4383
VisitParameterValue(HParameterValue * instruction ATTRIBUTE_UNUSED)4384 void InstructionCodeGeneratorARM64::VisitParameterValue(
4385 HParameterValue* instruction ATTRIBUTE_UNUSED) {
4386 // Nothing to do, the parameter is already at its location.
4387 }
4388
VisitCurrentMethod(HCurrentMethod * instruction)4389 void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) {
4390 LocationSummary* locations =
4391 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
4392 locations->SetOut(LocationFrom(kArtMethodRegister));
4393 }
4394
VisitCurrentMethod(HCurrentMethod * instruction ATTRIBUTE_UNUSED)4395 void InstructionCodeGeneratorARM64::VisitCurrentMethod(
4396 HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
4397 // Nothing to do, the method is already at its location.
4398 }
4399
VisitPhi(HPhi * instruction)4400 void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
4401 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
4402 for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
4403 locations->SetInAt(i, Location::Any());
4404 }
4405 locations->SetOut(Location::Any());
4406 }
4407
VisitPhi(HPhi * instruction ATTRIBUTE_UNUSED)4408 void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) {
4409 LOG(FATAL) << "Unreachable";
4410 }
4411
VisitRem(HRem * rem)4412 void LocationsBuilderARM64::VisitRem(HRem* rem) {
4413 Primitive::Type type = rem->GetResultType();
4414 LocationSummary::CallKind call_kind =
4415 Primitive::IsFloatingPointType(type) ? LocationSummary::kCall : LocationSummary::kNoCall;
4416 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(rem, call_kind);
4417
4418 switch (type) {
4419 case Primitive::kPrimInt:
4420 case Primitive::kPrimLong:
4421 locations->SetInAt(0, Location::RequiresRegister());
4422 locations->SetInAt(1, Location::RegisterOrConstant(rem->InputAt(1)));
4423 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4424 break;
4425
4426 case Primitive::kPrimFloat:
4427 case Primitive::kPrimDouble: {
4428 InvokeRuntimeCallingConvention calling_convention;
4429 locations->SetInAt(0, LocationFrom(calling_convention.GetFpuRegisterAt(0)));
4430 locations->SetInAt(1, LocationFrom(calling_convention.GetFpuRegisterAt(1)));
4431 locations->SetOut(calling_convention.GetReturnLocation(type));
4432
4433 break;
4434 }
4435
4436 default:
4437 LOG(FATAL) << "Unexpected rem type " << type;
4438 }
4439 }
4440
VisitRem(HRem * rem)4441 void InstructionCodeGeneratorARM64::VisitRem(HRem* rem) {
4442 Primitive::Type type = rem->GetResultType();
4443
4444 switch (type) {
4445 case Primitive::kPrimInt:
4446 case Primitive::kPrimLong: {
4447 GenerateDivRemIntegral(rem);
4448 break;
4449 }
4450
4451 case Primitive::kPrimFloat:
4452 case Primitive::kPrimDouble: {
4453 int32_t entry_offset = (type == Primitive::kPrimFloat) ? QUICK_ENTRY_POINT(pFmodf)
4454 : QUICK_ENTRY_POINT(pFmod);
4455 codegen_->InvokeRuntime(entry_offset, rem, rem->GetDexPc(), nullptr);
4456 if (type == Primitive::kPrimFloat) {
4457 CheckEntrypointTypes<kQuickFmodf, float, float, float>();
4458 } else {
4459 CheckEntrypointTypes<kQuickFmod, double, double, double>();
4460 }
4461 break;
4462 }
4463
4464 default:
4465 LOG(FATAL) << "Unexpected rem type " << type;
4466 UNREACHABLE();
4467 }
4468 }
4469
VisitMemoryBarrier(HMemoryBarrier * memory_barrier)4470 void LocationsBuilderARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
4471 memory_barrier->SetLocations(nullptr);
4472 }
4473
VisitMemoryBarrier(HMemoryBarrier * memory_barrier)4474 void InstructionCodeGeneratorARM64::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) {
4475 codegen_->GenerateMemoryBarrier(memory_barrier->GetBarrierKind());
4476 }
4477
VisitReturn(HReturn * instruction)4478 void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
4479 LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(instruction);
4480 Primitive::Type return_type = instruction->InputAt(0)->GetType();
4481 locations->SetInAt(0, ARM64ReturnLocation(return_type));
4482 }
4483
VisitReturn(HReturn * instruction ATTRIBUTE_UNUSED)4484 void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction ATTRIBUTE_UNUSED) {
4485 codegen_->GenerateFrameExit();
4486 }
4487
VisitReturnVoid(HReturnVoid * instruction)4488 void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
4489 instruction->SetLocations(nullptr);
4490 }
4491
VisitReturnVoid(HReturnVoid * instruction ATTRIBUTE_UNUSED)4492 void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction ATTRIBUTE_UNUSED) {
4493 codegen_->GenerateFrameExit();
4494 }
4495
VisitRor(HRor * ror)4496 void LocationsBuilderARM64::VisitRor(HRor* ror) {
4497 HandleBinaryOp(ror);
4498 }
4499
VisitRor(HRor * ror)4500 void InstructionCodeGeneratorARM64::VisitRor(HRor* ror) {
4501 HandleBinaryOp(ror);
4502 }
4503
VisitShl(HShl * shl)4504 void LocationsBuilderARM64::VisitShl(HShl* shl) {
4505 HandleShift(shl);
4506 }
4507
VisitShl(HShl * shl)4508 void InstructionCodeGeneratorARM64::VisitShl(HShl* shl) {
4509 HandleShift(shl);
4510 }
4511
VisitShr(HShr * shr)4512 void LocationsBuilderARM64::VisitShr(HShr* shr) {
4513 HandleShift(shr);
4514 }
4515
VisitShr(HShr * shr)4516 void InstructionCodeGeneratorARM64::VisitShr(HShr* shr) {
4517 HandleShift(shr);
4518 }
4519
VisitSub(HSub * instruction)4520 void LocationsBuilderARM64::VisitSub(HSub* instruction) {
4521 HandleBinaryOp(instruction);
4522 }
4523
VisitSub(HSub * instruction)4524 void InstructionCodeGeneratorARM64::VisitSub(HSub* instruction) {
4525 HandleBinaryOp(instruction);
4526 }
4527
VisitStaticFieldGet(HStaticFieldGet * instruction)4528 void LocationsBuilderARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
4529 HandleFieldGet(instruction);
4530 }
4531
VisitStaticFieldGet(HStaticFieldGet * instruction)4532 void InstructionCodeGeneratorARM64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
4533 HandleFieldGet(instruction, instruction->GetFieldInfo());
4534 }
4535
VisitStaticFieldSet(HStaticFieldSet * instruction)4536 void LocationsBuilderARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
4537 HandleFieldSet(instruction);
4538 }
4539
VisitStaticFieldSet(HStaticFieldSet * instruction)4540 void InstructionCodeGeneratorARM64::VisitStaticFieldSet(HStaticFieldSet* instruction) {
4541 HandleFieldSet(instruction, instruction->GetFieldInfo(), instruction->GetValueCanBeNull());
4542 }
4543
VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet * instruction)4544 void LocationsBuilderARM64::VisitUnresolvedInstanceFieldGet(
4545 HUnresolvedInstanceFieldGet* instruction) {
4546 FieldAccessCallingConventionARM64 calling_convention;
4547 codegen_->CreateUnresolvedFieldLocationSummary(
4548 instruction, instruction->GetFieldType(), calling_convention);
4549 }
4550
VisitUnresolvedInstanceFieldGet(HUnresolvedInstanceFieldGet * instruction)4551 void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldGet(
4552 HUnresolvedInstanceFieldGet* instruction) {
4553 FieldAccessCallingConventionARM64 calling_convention;
4554 codegen_->GenerateUnresolvedFieldAccess(instruction,
4555 instruction->GetFieldType(),
4556 instruction->GetFieldIndex(),
4557 instruction->GetDexPc(),
4558 calling_convention);
4559 }
4560
VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet * instruction)4561 void LocationsBuilderARM64::VisitUnresolvedInstanceFieldSet(
4562 HUnresolvedInstanceFieldSet* instruction) {
4563 FieldAccessCallingConventionARM64 calling_convention;
4564 codegen_->CreateUnresolvedFieldLocationSummary(
4565 instruction, instruction->GetFieldType(), calling_convention);
4566 }
4567
VisitUnresolvedInstanceFieldSet(HUnresolvedInstanceFieldSet * instruction)4568 void InstructionCodeGeneratorARM64::VisitUnresolvedInstanceFieldSet(
4569 HUnresolvedInstanceFieldSet* instruction) {
4570 FieldAccessCallingConventionARM64 calling_convention;
4571 codegen_->GenerateUnresolvedFieldAccess(instruction,
4572 instruction->GetFieldType(),
4573 instruction->GetFieldIndex(),
4574 instruction->GetDexPc(),
4575 calling_convention);
4576 }
4577
VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet * instruction)4578 void LocationsBuilderARM64::VisitUnresolvedStaticFieldGet(
4579 HUnresolvedStaticFieldGet* instruction) {
4580 FieldAccessCallingConventionARM64 calling_convention;
4581 codegen_->CreateUnresolvedFieldLocationSummary(
4582 instruction, instruction->GetFieldType(), calling_convention);
4583 }
4584
VisitUnresolvedStaticFieldGet(HUnresolvedStaticFieldGet * instruction)4585 void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldGet(
4586 HUnresolvedStaticFieldGet* instruction) {
4587 FieldAccessCallingConventionARM64 calling_convention;
4588 codegen_->GenerateUnresolvedFieldAccess(instruction,
4589 instruction->GetFieldType(),
4590 instruction->GetFieldIndex(),
4591 instruction->GetDexPc(),
4592 calling_convention);
4593 }
4594
VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet * instruction)4595 void LocationsBuilderARM64::VisitUnresolvedStaticFieldSet(
4596 HUnresolvedStaticFieldSet* instruction) {
4597 FieldAccessCallingConventionARM64 calling_convention;
4598 codegen_->CreateUnresolvedFieldLocationSummary(
4599 instruction, instruction->GetFieldType(), calling_convention);
4600 }
4601
VisitUnresolvedStaticFieldSet(HUnresolvedStaticFieldSet * instruction)4602 void InstructionCodeGeneratorARM64::VisitUnresolvedStaticFieldSet(
4603 HUnresolvedStaticFieldSet* instruction) {
4604 FieldAccessCallingConventionARM64 calling_convention;
4605 codegen_->GenerateUnresolvedFieldAccess(instruction,
4606 instruction->GetFieldType(),
4607 instruction->GetFieldIndex(),
4608 instruction->GetDexPc(),
4609 calling_convention);
4610 }
4611
VisitSuspendCheck(HSuspendCheck * instruction)4612 void LocationsBuilderARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
4613 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCallOnSlowPath);
4614 }
4615
VisitSuspendCheck(HSuspendCheck * instruction)4616 void InstructionCodeGeneratorARM64::VisitSuspendCheck(HSuspendCheck* instruction) {
4617 HBasicBlock* block = instruction->GetBlock();
4618 if (block->GetLoopInformation() != nullptr) {
4619 DCHECK(block->GetLoopInformation()->GetSuspendCheck() == instruction);
4620 // The back edge will generate the suspend check.
4621 return;
4622 }
4623 if (block->IsEntryBlock() && instruction->GetNext()->IsGoto()) {
4624 // The goto will generate the suspend check.
4625 return;
4626 }
4627 GenerateSuspendCheck(instruction, nullptr);
4628 }
4629
VisitThrow(HThrow * instruction)4630 void LocationsBuilderARM64::VisitThrow(HThrow* instruction) {
4631 LocationSummary* locations =
4632 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kCall);
4633 InvokeRuntimeCallingConvention calling_convention;
4634 locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0)));
4635 }
4636
VisitThrow(HThrow * instruction)4637 void InstructionCodeGeneratorARM64::VisitThrow(HThrow* instruction) {
4638 codegen_->InvokeRuntime(
4639 QUICK_ENTRY_POINT(pDeliverException), instruction, instruction->GetDexPc(), nullptr);
4640 CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>();
4641 }
4642
VisitTypeConversion(HTypeConversion * conversion)4643 void LocationsBuilderARM64::VisitTypeConversion(HTypeConversion* conversion) {
4644 LocationSummary* locations =
4645 new (GetGraph()->GetArena()) LocationSummary(conversion, LocationSummary::kNoCall);
4646 Primitive::Type input_type = conversion->GetInputType();
4647 Primitive::Type result_type = conversion->GetResultType();
4648 DCHECK_NE(input_type, result_type);
4649 if ((input_type == Primitive::kPrimNot) || (input_type == Primitive::kPrimVoid) ||
4650 (result_type == Primitive::kPrimNot) || (result_type == Primitive::kPrimVoid)) {
4651 LOG(FATAL) << "Unexpected type conversion from " << input_type << " to " << result_type;
4652 }
4653
4654 if (Primitive::IsFloatingPointType(input_type)) {
4655 locations->SetInAt(0, Location::RequiresFpuRegister());
4656 } else {
4657 locations->SetInAt(0, Location::RequiresRegister());
4658 }
4659
4660 if (Primitive::IsFloatingPointType(result_type)) {
4661 locations->SetOut(Location::RequiresFpuRegister(), Location::kNoOutputOverlap);
4662 } else {
4663 locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
4664 }
4665 }
4666
VisitTypeConversion(HTypeConversion * conversion)4667 void InstructionCodeGeneratorARM64::VisitTypeConversion(HTypeConversion* conversion) {
4668 Primitive::Type result_type = conversion->GetResultType();
4669 Primitive::Type input_type = conversion->GetInputType();
4670
4671 DCHECK_NE(input_type, result_type);
4672
4673 if (Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type)) {
4674 int result_size = Primitive::ComponentSize(result_type);
4675 int input_size = Primitive::ComponentSize(input_type);
4676 int min_size = std::min(result_size, input_size);
4677 Register output = OutputRegister(conversion);
4678 Register source = InputRegisterAt(conversion, 0);
4679 if (result_type == Primitive::kPrimInt && input_type == Primitive::kPrimLong) {
4680 // 'int' values are used directly as W registers, discarding the top
4681 // bits, so we don't need to sign-extend and can just perform a move.
4682 // We do not pass the `kDiscardForSameWReg` argument to force clearing the
4683 // top 32 bits of the target register. We theoretically could leave those
4684 // bits unchanged, but we would have to make sure that no code uses a
4685 // 32bit input value as a 64bit value assuming that the top 32 bits are
4686 // zero.
4687 __ Mov(output.W(), source.W());
4688 } else if (result_type == Primitive::kPrimChar ||
4689 (input_type == Primitive::kPrimChar && input_size < result_size)) {
4690 __ Ubfx(output,
4691 output.IsX() ? source.X() : source.W(),
4692 0, Primitive::ComponentSize(Primitive::kPrimChar) * kBitsPerByte);
4693 } else {
4694 __ Sbfx(output, output.IsX() ? source.X() : source.W(), 0, min_size * kBitsPerByte);
4695 }
4696 } else if (Primitive::IsFloatingPointType(result_type) && Primitive::IsIntegralType(input_type)) {
4697 __ Scvtf(OutputFPRegister(conversion), InputRegisterAt(conversion, 0));
4698 } else if (Primitive::IsIntegralType(result_type) && Primitive::IsFloatingPointType(input_type)) {
4699 CHECK(result_type == Primitive::kPrimInt || result_type == Primitive::kPrimLong);
4700 __ Fcvtzs(OutputRegister(conversion), InputFPRegisterAt(conversion, 0));
4701 } else if (Primitive::IsFloatingPointType(result_type) &&
4702 Primitive::IsFloatingPointType(input_type)) {
4703 __ Fcvt(OutputFPRegister(conversion), InputFPRegisterAt(conversion, 0));
4704 } else {
4705 LOG(FATAL) << "Unexpected or unimplemented type conversion from " << input_type
4706 << " to " << result_type;
4707 }
4708 }
4709
VisitUShr(HUShr * ushr)4710 void LocationsBuilderARM64::VisitUShr(HUShr* ushr) {
4711 HandleShift(ushr);
4712 }
4713
VisitUShr(HUShr * ushr)4714 void InstructionCodeGeneratorARM64::VisitUShr(HUShr* ushr) {
4715 HandleShift(ushr);
4716 }
4717
VisitXor(HXor * instruction)4718 void LocationsBuilderARM64::VisitXor(HXor* instruction) {
4719 HandleBinaryOp(instruction);
4720 }
4721
VisitXor(HXor * instruction)4722 void InstructionCodeGeneratorARM64::VisitXor(HXor* instruction) {
4723 HandleBinaryOp(instruction);
4724 }
4725
VisitBoundType(HBoundType * instruction ATTRIBUTE_UNUSED)4726 void LocationsBuilderARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
4727 // Nothing to do, this should be removed during prepare for register allocator.
4728 LOG(FATAL) << "Unreachable";
4729 }
4730
VisitBoundType(HBoundType * instruction ATTRIBUTE_UNUSED)4731 void InstructionCodeGeneratorARM64::VisitBoundType(HBoundType* instruction ATTRIBUTE_UNUSED) {
4732 // Nothing to do, this should be removed during prepare for register allocator.
4733 LOG(FATAL) << "Unreachable";
4734 }
4735
4736 // Simple implementation of packed switch - generate cascaded compare/jumps.
VisitPackedSwitch(HPackedSwitch * switch_instr)4737 void LocationsBuilderARM64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
4738 LocationSummary* locations =
4739 new (GetGraph()->GetArena()) LocationSummary(switch_instr, LocationSummary::kNoCall);
4740 locations->SetInAt(0, Location::RequiresRegister());
4741 }
4742
VisitPackedSwitch(HPackedSwitch * switch_instr)4743 void InstructionCodeGeneratorARM64::VisitPackedSwitch(HPackedSwitch* switch_instr) {
4744 int32_t lower_bound = switch_instr->GetStartValue();
4745 uint32_t num_entries = switch_instr->GetNumEntries();
4746 Register value_reg = InputRegisterAt(switch_instr, 0);
4747 HBasicBlock* default_block = switch_instr->GetDefaultBlock();
4748
4749 // Roughly set 16 as max average assemblies generated per HIR in a graph.
4750 static constexpr int32_t kMaxExpectedSizePerHInstruction = 16 * vixl::kInstructionSize;
4751 // ADR has a limited range(+/-1MB), so we set a threshold for the number of HIRs in the graph to
4752 // make sure we don't emit it if the target may run out of range.
4753 // TODO: Instead of emitting all jump tables at the end of the code, we could keep track of ADR
4754 // ranges and emit the tables only as required.
4755 static constexpr int32_t kJumpTableInstructionThreshold = 1* MB / kMaxExpectedSizePerHInstruction;
4756
4757 if (num_entries <= kPackedSwitchCompareJumpThreshold ||
4758 // Current instruction id is an upper bound of the number of HIRs in the graph.
4759 GetGraph()->GetCurrentInstructionId() > kJumpTableInstructionThreshold) {
4760 // Create a series of compare/jumps.
4761 UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
4762 Register temp = temps.AcquireW();
4763 __ Subs(temp, value_reg, Operand(lower_bound));
4764
4765 const ArenaVector<HBasicBlock*>& successors = switch_instr->GetBlock()->GetSuccessors();
4766 // Jump to successors[0] if value == lower_bound.
4767 __ B(eq, codegen_->GetLabelOf(successors[0]));
4768 int32_t last_index = 0;
4769 for (; num_entries - last_index > 2; last_index += 2) {
4770 __ Subs(temp, temp, Operand(2));
4771 // Jump to successors[last_index + 1] if value < case_value[last_index + 2].
4772 __ B(lo, codegen_->GetLabelOf(successors[last_index + 1]));
4773 // Jump to successors[last_index + 2] if value == case_value[last_index + 2].
4774 __ B(eq, codegen_->GetLabelOf(successors[last_index + 2]));
4775 }
4776 if (num_entries - last_index == 2) {
4777 // The last missing case_value.
4778 __ Cmp(temp, Operand(1));
4779 __ B(eq, codegen_->GetLabelOf(successors[last_index + 1]));
4780 }
4781
4782 // And the default for any other value.
4783 if (!codegen_->GoesToNextBlock(switch_instr->GetBlock(), default_block)) {
4784 __ B(codegen_->GetLabelOf(default_block));
4785 }
4786 } else {
4787 JumpTableARM64* jump_table = codegen_->CreateJumpTable(switch_instr);
4788
4789 UseScratchRegisterScope temps(codegen_->GetVIXLAssembler());
4790
4791 // Below instructions should use at most one blocked register. Since there are two blocked
4792 // registers, we are free to block one.
4793 Register temp_w = temps.AcquireW();
4794 Register index;
4795 // Remove the bias.
4796 if (lower_bound != 0) {
4797 index = temp_w;
4798 __ Sub(index, value_reg, Operand(lower_bound));
4799 } else {
4800 index = value_reg;
4801 }
4802
4803 // Jump to default block if index is out of the range.
4804 __ Cmp(index, Operand(num_entries));
4805 __ B(hs, codegen_->GetLabelOf(default_block));
4806
4807 // In current VIXL implementation, it won't require any blocked registers to encode the
4808 // immediate value for Adr. So we are free to use both VIXL blocked registers to reduce the
4809 // register pressure.
4810 Register table_base = temps.AcquireX();
4811 // Load jump offset from the table.
4812 __ Adr(table_base, jump_table->GetTableStartLabel());
4813 Register jump_offset = temp_w;
4814 __ Ldr(jump_offset, MemOperand(table_base, index, UXTW, 2));
4815
4816 // Jump to target block by branching to table_base(pc related) + offset.
4817 Register target_address = table_base;
4818 __ Add(target_address, table_base, Operand(jump_offset, SXTW));
4819 __ Br(target_address);
4820 }
4821 }
4822
GenerateReferenceLoadOneRegister(HInstruction * instruction,Location out,uint32_t offset,Location maybe_temp)4823 void InstructionCodeGeneratorARM64::GenerateReferenceLoadOneRegister(HInstruction* instruction,
4824 Location out,
4825 uint32_t offset,
4826 Location maybe_temp) {
4827 Primitive::Type type = Primitive::kPrimNot;
4828 Register out_reg = RegisterFrom(out, type);
4829 if (kEmitCompilerReadBarrier) {
4830 Register temp_reg = RegisterFrom(maybe_temp, type);
4831 if (kUseBakerReadBarrier) {
4832 // Load with fast path based Baker's read barrier.
4833 // /* HeapReference<Object> */ out = *(out + offset)
4834 codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
4835 out,
4836 out_reg,
4837 offset,
4838 temp_reg,
4839 /* needs_null_check */ false,
4840 /* use_load_acquire */ false);
4841 } else {
4842 // Load with slow path based read barrier.
4843 // Save the value of `out` into `maybe_temp` before overwriting it
4844 // in the following move operation, as we will need it for the
4845 // read barrier below.
4846 __ Mov(temp_reg, out_reg);
4847 // /* HeapReference<Object> */ out = *(out + offset)
4848 __ Ldr(out_reg, HeapOperand(out_reg, offset));
4849 codegen_->GenerateReadBarrierSlow(instruction, out, out, maybe_temp, offset);
4850 }
4851 } else {
4852 // Plain load with no read barrier.
4853 // /* HeapReference<Object> */ out = *(out + offset)
4854 __ Ldr(out_reg, HeapOperand(out_reg, offset));
4855 GetAssembler()->MaybeUnpoisonHeapReference(out_reg);
4856 }
4857 }
4858
GenerateReferenceLoadTwoRegisters(HInstruction * instruction,Location out,Location obj,uint32_t offset,Location maybe_temp)4859 void InstructionCodeGeneratorARM64::GenerateReferenceLoadTwoRegisters(HInstruction* instruction,
4860 Location out,
4861 Location obj,
4862 uint32_t offset,
4863 Location maybe_temp) {
4864 Primitive::Type type = Primitive::kPrimNot;
4865 Register out_reg = RegisterFrom(out, type);
4866 Register obj_reg = RegisterFrom(obj, type);
4867 if (kEmitCompilerReadBarrier) {
4868 if (kUseBakerReadBarrier) {
4869 // Load with fast path based Baker's read barrier.
4870 Register temp_reg = RegisterFrom(maybe_temp, type);
4871 // /* HeapReference<Object> */ out = *(obj + offset)
4872 codegen_->GenerateFieldLoadWithBakerReadBarrier(instruction,
4873 out,
4874 obj_reg,
4875 offset,
4876 temp_reg,
4877 /* needs_null_check */ false,
4878 /* use_load_acquire */ false);
4879 } else {
4880 // Load with slow path based read barrier.
4881 // /* HeapReference<Object> */ out = *(obj + offset)
4882 __ Ldr(out_reg, HeapOperand(obj_reg, offset));
4883 codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset);
4884 }
4885 } else {
4886 // Plain load with no read barrier.
4887 // /* HeapReference<Object> */ out = *(obj + offset)
4888 __ Ldr(out_reg, HeapOperand(obj_reg, offset));
4889 GetAssembler()->MaybeUnpoisonHeapReference(out_reg);
4890 }
4891 }
4892
GenerateGcRootFieldLoad(HInstruction * instruction,Location root,vixl::Register obj,uint32_t offset,vixl::Label * fixup_label)4893 void InstructionCodeGeneratorARM64::GenerateGcRootFieldLoad(HInstruction* instruction,
4894 Location root,
4895 vixl::Register obj,
4896 uint32_t offset,
4897 vixl::Label* fixup_label) {
4898 Register root_reg = RegisterFrom(root, Primitive::kPrimNot);
4899 if (kEmitCompilerReadBarrier) {
4900 if (kUseBakerReadBarrier) {
4901 // Fast path implementation of art::ReadBarrier::BarrierForRoot when
4902 // Baker's read barrier are used:
4903 //
4904 // root = obj.field;
4905 // if (Thread::Current()->GetIsGcMarking()) {
4906 // root = ReadBarrier::Mark(root)
4907 // }
4908
4909 // /* GcRoot<mirror::Object> */ root = *(obj + offset)
4910 if (fixup_label == nullptr) {
4911 __ Ldr(root_reg, MemOperand(obj, offset));
4912 } else {
4913 vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
4914 __ Bind(fixup_label);
4915 __ ldr(root_reg, MemOperand(obj, offset));
4916 }
4917 static_assert(
4918 sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>),
4919 "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> "
4920 "have different sizes.");
4921 static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t),
4922 "art::mirror::CompressedReference<mirror::Object> and int32_t "
4923 "have different sizes.");
4924
4925 // Slow path used to mark the GC root `root`.
4926 SlowPathCodeARM64* slow_path =
4927 new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM64(instruction, root, root);
4928 codegen_->AddSlowPath(slow_path);
4929
4930 MacroAssembler* masm = GetVIXLAssembler();
4931 UseScratchRegisterScope temps(masm);
4932 Register temp = temps.AcquireW();
4933 // temp = Thread::Current()->GetIsGcMarking()
4934 __ Ldr(temp, MemOperand(tr, Thread::IsGcMarkingOffset<kArm64WordSize>().Int32Value()));
4935 __ Cbnz(temp, slow_path->GetEntryLabel());
4936 __ Bind(slow_path->GetExitLabel());
4937 } else {
4938 // GC root loaded through a slow path for read barriers other
4939 // than Baker's.
4940 // /* GcRoot<mirror::Object>* */ root = obj + offset
4941 if (fixup_label == nullptr) {
4942 __ Add(root_reg.X(), obj.X(), offset);
4943 } else {
4944 vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
4945 __ Bind(fixup_label);
4946 __ add(root_reg.X(), obj.X(), offset);
4947 }
4948 // /* mirror::Object* */ root = root->Read()
4949 codegen_->GenerateReadBarrierForRootSlow(instruction, root, root);
4950 }
4951 } else {
4952 // Plain GC root load with no read barrier.
4953 // /* GcRoot<mirror::Object> */ root = *(obj + offset)
4954 if (fixup_label == nullptr) {
4955 __ Ldr(root_reg, MemOperand(obj, offset));
4956 } else {
4957 vixl::SingleEmissionCheckScope guard(GetVIXLAssembler());
4958 __ Bind(fixup_label);
4959 __ ldr(root_reg, MemOperand(obj, offset));
4960 }
4961 // Note that GC roots are not affected by heap poisoning, thus we
4962 // do not have to unpoison `root_reg` here.
4963 }
4964 }
4965
GenerateFieldLoadWithBakerReadBarrier(HInstruction * instruction,Location ref,vixl::Register obj,uint32_t offset,Register temp,bool needs_null_check,bool use_load_acquire)4966 void CodeGeneratorARM64::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction,
4967 Location ref,
4968 vixl::Register obj,
4969 uint32_t offset,
4970 Register temp,
4971 bool needs_null_check,
4972 bool use_load_acquire) {
4973 DCHECK(kEmitCompilerReadBarrier);
4974 DCHECK(kUseBakerReadBarrier);
4975
4976 // /* HeapReference<Object> */ ref = *(obj + offset)
4977 Location no_index = Location::NoLocation();
4978 GenerateReferenceLoadWithBakerReadBarrier(
4979 instruction, ref, obj, offset, no_index, temp, needs_null_check, use_load_acquire);
4980 }
4981
GenerateArrayLoadWithBakerReadBarrier(HInstruction * instruction,Location ref,vixl::Register obj,uint32_t data_offset,Location index,Register temp,bool needs_null_check)4982 void CodeGeneratorARM64::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction,
4983 Location ref,
4984 vixl::Register obj,
4985 uint32_t data_offset,
4986 Location index,
4987 Register temp,
4988 bool needs_null_check) {
4989 DCHECK(kEmitCompilerReadBarrier);
4990 DCHECK(kUseBakerReadBarrier);
4991
4992 // Array cells are never volatile variables, therefore array loads
4993 // never use Load-Acquire instructions on ARM64.
4994 const bool use_load_acquire = false;
4995
4996 // /* HeapReference<Object> */ ref =
4997 // *(obj + data_offset + index * sizeof(HeapReference<Object>))
4998 GenerateReferenceLoadWithBakerReadBarrier(
4999 instruction, ref, obj, data_offset, index, temp, needs_null_check, use_load_acquire);
5000 }
5001
GenerateReferenceLoadWithBakerReadBarrier(HInstruction * instruction,Location ref,vixl::Register obj,uint32_t offset,Location index,Register temp,bool needs_null_check,bool use_load_acquire)5002 void CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction,
5003 Location ref,
5004 vixl::Register obj,
5005 uint32_t offset,
5006 Location index,
5007 Register temp,
5008 bool needs_null_check,
5009 bool use_load_acquire) {
5010 DCHECK(kEmitCompilerReadBarrier);
5011 DCHECK(kUseBakerReadBarrier);
5012 // If `index` is a valid location, then we are emitting an array
5013 // load, so we shouldn't be using a Load Acquire instruction.
5014 // In other words: `index.IsValid()` => `!use_load_acquire`.
5015 DCHECK(!index.IsValid() || !use_load_acquire);
5016
5017 MacroAssembler* masm = GetVIXLAssembler();
5018 UseScratchRegisterScope temps(masm);
5019
5020 // In slow path based read barriers, the read barrier call is
5021 // inserted after the original load. However, in fast path based
5022 // Baker's read barriers, we need to perform the load of
5023 // mirror::Object::monitor_ *before* the original reference load.
5024 // This load-load ordering is required by the read barrier.
5025 // The fast path/slow path (for Baker's algorithm) should look like:
5026 //
5027 // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState();
5028 // lfence; // Load fence or artificial data dependency to prevent load-load reordering
5029 // HeapReference<Object> ref = *src; // Original reference load.
5030 // bool is_gray = (rb_state == ReadBarrier::gray_ptr_);
5031 // if (is_gray) {
5032 // ref = ReadBarrier::Mark(ref); // Performed by runtime entrypoint slow path.
5033 // }
5034 //
5035 // Note: the original implementation in ReadBarrier::Barrier is
5036 // slightly more complex as it performs additional checks that we do
5037 // not do here for performance reasons.
5038
5039 Primitive::Type type = Primitive::kPrimNot;
5040 Register ref_reg = RegisterFrom(ref, type);
5041 DCHECK(obj.IsW());
5042 uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value();
5043
5044 // /* int32_t */ monitor = obj->monitor_
5045 __ Ldr(temp, HeapOperand(obj, monitor_offset));
5046 if (needs_null_check) {
5047 MaybeRecordImplicitNullCheck(instruction);
5048 }
5049 // /* LockWord */ lock_word = LockWord(monitor)
5050 static_assert(sizeof(LockWord) == sizeof(int32_t),
5051 "art::LockWord and int32_t have different sizes.");
5052 // /* uint32_t */ rb_state = lock_word.ReadBarrierState()
5053 __ Lsr(temp, temp, LockWord::kReadBarrierStateShift);
5054 __ And(temp, temp, Operand(LockWord::kReadBarrierStateMask));
5055 static_assert(
5056 LockWord::kReadBarrierStateMask == ReadBarrier::rb_ptr_mask_,
5057 "art::LockWord::kReadBarrierStateMask is not equal to art::ReadBarrier::rb_ptr_mask_.");
5058
5059 // Introduce a dependency on the high bits of rb_state, which shall
5060 // be all zeroes, to prevent load-load reordering, and without using
5061 // a memory barrier (which would be more expensive).
5062 // temp2 = rb_state & ~LockWord::kReadBarrierStateMask = 0
5063 Register temp2 = temps.AcquireW();
5064 __ Bic(temp2, temp, Operand(LockWord::kReadBarrierStateMask));
5065 // obj is unchanged by this operation, but its value now depends on
5066 // temp2, which depends on temp.
5067 __ Add(obj, obj, Operand(temp2));
5068 temps.Release(temp2);
5069
5070 // The actual reference load.
5071 if (index.IsValid()) {
5072 static_assert(
5073 sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t),
5074 "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes.");
5075 // /* HeapReference<Object> */ ref =
5076 // *(obj + offset + index * sizeof(HeapReference<Object>))
5077 const size_t shift_amount = Primitive::ComponentSizeShift(type);
5078 if (index.IsConstant()) {
5079 uint32_t computed_offset = offset + (Int64ConstantFrom(index) << shift_amount);
5080 Load(type, ref_reg, HeapOperand(obj, computed_offset));
5081 } else {
5082 temp2 = temps.AcquireW();
5083 __ Add(temp2, obj, offset);
5084 Load(type, ref_reg, HeapOperand(temp2, XRegisterFrom(index), LSL, shift_amount));
5085 temps.Release(temp2);
5086 }
5087 } else {
5088 // /* HeapReference<Object> */ ref = *(obj + offset)
5089 MemOperand field = HeapOperand(obj, offset);
5090 if (use_load_acquire) {
5091 LoadAcquire(instruction, ref_reg, field, /* needs_null_check */ false);
5092 } else {
5093 Load(type, ref_reg, field);
5094 }
5095 }
5096
5097 // Object* ref = ref_addr->AsMirrorPtr()
5098 GetAssembler()->MaybeUnpoisonHeapReference(ref_reg);
5099
5100 // Slow path used to mark the object `ref` when it is gray.
5101 SlowPathCodeARM64* slow_path =
5102 new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM64(instruction, ref, ref);
5103 AddSlowPath(slow_path);
5104
5105 // if (rb_state == ReadBarrier::gray_ptr_)
5106 // ref = ReadBarrier::Mark(ref);
5107 __ Cmp(temp, ReadBarrier::gray_ptr_);
5108 __ B(eq, slow_path->GetEntryLabel());
5109 __ Bind(slow_path->GetExitLabel());
5110 }
5111
GenerateReadBarrierSlow(HInstruction * instruction,Location out,Location ref,Location obj,uint32_t offset,Location index)5112 void CodeGeneratorARM64::GenerateReadBarrierSlow(HInstruction* instruction,
5113 Location out,
5114 Location ref,
5115 Location obj,
5116 uint32_t offset,
5117 Location index) {
5118 DCHECK(kEmitCompilerReadBarrier);
5119
5120 // Insert a slow path based read barrier *after* the reference load.
5121 //
5122 // If heap poisoning is enabled, the unpoisoning of the loaded
5123 // reference will be carried out by the runtime within the slow
5124 // path.
5125 //
5126 // Note that `ref` currently does not get unpoisoned (when heap
5127 // poisoning is enabled), which is alright as the `ref` argument is
5128 // not used by the artReadBarrierSlow entry point.
5129 //
5130 // TODO: Unpoison `ref` when it is used by artReadBarrierSlow.
5131 SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena())
5132 ReadBarrierForHeapReferenceSlowPathARM64(instruction, out, ref, obj, offset, index);
5133 AddSlowPath(slow_path);
5134
5135 __ B(slow_path->GetEntryLabel());
5136 __ Bind(slow_path->GetExitLabel());
5137 }
5138
MaybeGenerateReadBarrierSlow(HInstruction * instruction,Location out,Location ref,Location obj,uint32_t offset,Location index)5139 void CodeGeneratorARM64::MaybeGenerateReadBarrierSlow(HInstruction* instruction,
5140 Location out,
5141 Location ref,
5142 Location obj,
5143 uint32_t offset,
5144 Location index) {
5145 if (kEmitCompilerReadBarrier) {
5146 // Baker's read barriers shall be handled by the fast path
5147 // (CodeGeneratorARM64::GenerateReferenceLoadWithBakerReadBarrier).
5148 DCHECK(!kUseBakerReadBarrier);
5149 // If heap poisoning is enabled, unpoisoning will be taken care of
5150 // by the runtime within the slow path.
5151 GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index);
5152 } else if (kPoisonHeapReferences) {
5153 GetAssembler()->UnpoisonHeapReference(WRegisterFrom(out));
5154 }
5155 }
5156
GenerateReadBarrierForRootSlow(HInstruction * instruction,Location out,Location root)5157 void CodeGeneratorARM64::GenerateReadBarrierForRootSlow(HInstruction* instruction,
5158 Location out,
5159 Location root) {
5160 DCHECK(kEmitCompilerReadBarrier);
5161
5162 // Insert a slow path based read barrier *after* the GC root load.
5163 //
5164 // Note that GC roots are not affected by heap poisoning, so we do
5165 // not need to do anything special for this here.
5166 SlowPathCodeARM64* slow_path =
5167 new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathARM64(instruction, out, root);
5168 AddSlowPath(slow_path);
5169
5170 __ B(slow_path->GetEntryLabel());
5171 __ Bind(slow_path->GetExitLabel());
5172 }
5173
VisitClassTableGet(HClassTableGet * instruction)5174 void LocationsBuilderARM64::VisitClassTableGet(HClassTableGet* instruction) {
5175 LocationSummary* locations =
5176 new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
5177 locations->SetInAt(0, Location::RequiresRegister());
5178 locations->SetOut(Location::RequiresRegister());
5179 }
5180
VisitClassTableGet(HClassTableGet * instruction)5181 void InstructionCodeGeneratorARM64::VisitClassTableGet(HClassTableGet* instruction) {
5182 LocationSummary* locations = instruction->GetLocations();
5183 uint32_t method_offset = 0;
5184 if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) {
5185 method_offset = mirror::Class::EmbeddedVTableEntryOffset(
5186 instruction->GetIndex(), kArm64PointerSize).SizeValue();
5187 } else {
5188 method_offset = mirror::Class::EmbeddedImTableEntryOffset(
5189 instruction->GetIndex() % mirror::Class::kImtSize, kArm64PointerSize).Uint32Value();
5190 }
5191 __ Ldr(XRegisterFrom(locations->Out()),
5192 MemOperand(XRegisterFrom(locations->InAt(0)), method_offset));
5193 }
5194
5195
5196
5197 #undef __
5198 #undef QUICK_ENTRY_POINT
5199
5200 } // namespace arm64
5201 } // namespace art
5202